/** * Ctor. * * @param videoChannel The <tt>VideoChannel</tt> associated to this <tt>SimulcastEngine</tt>. */ public SimulcastEngine(VideoChannel videoChannel) { this.videoChannel = videoChannel; simulcastReceiver = new SimulcastReceiver( this, ServiceUtils.getService(videoChannel.getBundleContext(), ConfigurationService.class)); this.logger = Logger.getLogger(classLogger, videoChannel.getContent().getConference().getLogger()); }
/** * The <tt>SimulcastReceiver</tt> of a <tt>SimulcastEngine</tt> receives the simulcast streams from * a simulcast enabled participant and manages 1 or more <tt>SimulcastLayer</tt>s. It fires a * property change event whenever the simulcast layers that it manages change. * * <p>This class is thread safe. * * @author George Politis * @author Lyubomir Marinov */ public class SimulcastReceiver extends PropertyChangeNotifier { /** * The <tt>Logger</tt> used by the <tt>ReceivingLayers</tt> class and its instances to print debug * information. */ private static final Logger logger = Logger.getLogger(SimulcastReceiver.class); /** * The name of the property that gets fired when there's a change in the simulcast layers that * this receiver manages. */ public static final String SIMULCAST_LAYERS_PNAME = SimulcastReceiver.class.getName() + ".simulcastLayers"; /** * The number of (video) frames which defines the interval of time (indirectly) during which a * {@code SimulcastLayer} needs to receive data from its remote peer or it will be declared * paused/stopped/not streaming by its {@code SimulcastReceiver}. */ static final int TIMEOUT_ON_FRAME_COUNT = 5; /** The pool of threads utilized by this class. */ private static final ExecutorService executorService = ExecutorUtils.newCachedThreadPool(true, SimulcastReceiver.class.getName()); /** Helper object that <tt>SwitchingSimulcastSender</tt> instances use to build JSON messages. */ private static final SimulcastMessagesMapper mapper = new SimulcastMessagesMapper(); /** The <tt>SimulcastEngine</tt> that owns this receiver. */ private final SimulcastEngine simulcastEngine; /** The simulcast layers of this <tt>VideoChannel</tt>. */ private SimulcastLayer[] simulcastLayers; /** * Indicates whether we're receiving native or non-native simulcast from the associated endpoint. * It determines whether the bridge should send messages over the data channels to manage the * non-native simulcast. In the case of native simulcast, there's nothing to do for the bridge. * * <p>NOTE that at the time of this writing we only support native simulcast. Last time we tried * non-native simulcast there was no way to limit the bitrate of lower layer streams and thus * there was no point in implementing non-native simulcast. * * <p>NOTE^2 This has changed recently with the webrtc stack automatically limiting the stream * bitrate based on its resolution (see commit 1c7d48d431e098ba42fa6bd9f1cfe69a703edee5 in the * webrtc git repository). So it might be something that we will want to implement in the future * for browsers that don't support native simulcast (Temasys). */ private boolean nativeSimulcast = true; /** * The history of the order/sequence of receipt of (video) frames by {@link #simulcastLayers}. * Used in an attempt to speed up the detection of paused/stopped {@code SimulcastLayer}s by * counting (video) frames. */ private final List<SimulcastLayer> simulcastLayerFrameHistory = new LinkedList<SimulcastLayer>(); /** * Ctor. * * @param simulcastEngine the <tt>SimulcastEngine</tt> that owns this receiver. */ public SimulcastReceiver(SimulcastEngine simulcastEngine) { this.simulcastEngine = simulcastEngine; } /** * Gets the <tt>SimulcastEngine</tt> that owns this receiver. * * @return the <tt>SimulcastEngine</tt> that owns this receiver. */ public SimulcastEngine getSimulcastEngine() { return this.simulcastEngine; } /** * Returns true if the endpoint has signaled two or more simulcast layers. * * @return true if the endpoint has signaled two or more simulcast layers, false otherwise. */ public boolean hasLayers() { SimulcastLayer[] sl = simulcastLayers; return sl != null && sl.length != 0; } /** * Returns a <tt>SimulcastLayer</tt> that is the closest match to the target order, or null if * simulcast hasn't been configured for this receiver. * * @param targetOrder the simulcast layer target order. * @return a <tt>SimulcastLayer</tt> that is the closest match to the target order, or null. */ public SimulcastLayer getSimulcastLayer(int targetOrder) { SimulcastLayer[] layers = getSimulcastLayers(); if (layers == null || layers.length == 0) { return null; } // Iterate through the simulcast layers that we own and return the one // that matches best the targetOrder parameter. SimulcastLayer next = layers[0]; for (int i = 1; i < Math.min(targetOrder + 1, layers.length); i++) { if (!layers[i].isStreaming()) { break; } next = layers[i]; } return next; } /** * Gets the simulcast layers of this simulcast manager in a new <tt>SortedSet</tt> so that the * caller won't have to worry about the structure changing by some other thread. * * @return the simulcast layers of this receiver in a new sorted set if simulcast is signaled, or * null. */ public SimulcastLayer[] getSimulcastLayers() { return simulcastLayers; } /** * Sets the simulcast layers for this receiver and fires an event about it. * * @param simulcastLayers the simulcast layers for this receiver. */ public void setSimulcastLayers(SimulcastLayer[] simulcastLayers) { this.simulcastLayers = simulcastLayers; if (logger.isInfoEnabled()) { if (simulcastLayers == null) { logInfo("Simulcast disabled."); } else { for (SimulcastLayer l : simulcastLayers) { logInfo(l.getOrder() + ": " + l.getPrimarySSRC()); } } } executorService.execute( new Runnable() { public void run() { firePropertyChange(SIMULCAST_LAYERS_PNAME, null, null); } }); // TODO If simulcastLayers has changed, then simulcastLayerFrameHistory // has very likely become irrelevant. In other words, clear // simulcastLayerFrameHistory. } /** * Notifies this instance that a <tt>DatagramPacket</tt> packet received on the data * <tt>DatagramSocket</tt> of this <tt>Channel</tt> has been accepted for further processing * within Jitsi Videobridge. * * @param pkt the accepted <tt>RawPacket</tt>. */ public void accepted(RawPacket pkt) { // With native simulcast we don't have a notification when a stream // has started/stopped. The simulcast manager implements a timeout // for the high quality stream and it needs to be notified when // the channel has accepted a datagram packet for the timeout to // function correctly. if (!hasLayers() || pkt == null) { return; } // Find the layer that corresponds to this packet. int acceptedSSRC = pkt.getSSRC(); SimulcastLayer[] layers = getSimulcastLayers(); SimulcastLayer acceptedLayer = null; for (SimulcastLayer layer : layers) { // We only care about the primary SSRC and not the RTX ssrc (or // future FEC ssrc). if ((int) layer.getPrimarySSRC() == acceptedSSRC) { acceptedLayer = layer; break; } } // If this is not an RTP packet or if we can't find an accepted // layer, log and return as it makes no sense to continue in this // situation. if (acceptedLayer == null) { return; } // There are sequences of packets with increasing timestamps but without // the marker bit set. Supposedly, they are probes to detect whether the // bandwidth may increase. We think that they should cause neither the // start nor the stop of any SimulcastLayer. // XXX There's RawPacket#getPayloadLength() but the implementation // includes pkt.paddingSize at the time of this writing and we do not // know whether that's going to stay that way. int pktPayloadLength = pkt.getLength() - pkt.getHeaderLength(); int pktPaddingSize = pkt.getPaddingSize(); if (pktPayloadLength <= pktPaddingSize) { if (logger.isTraceEnabled()) { logger.trace( "pkt.payloadLength= " + pktPayloadLength + " <= pkt.paddingSize= " + pktPaddingSize + "(" + pkt.getSequenceNumber() + ")"); } return; } // NOTE(gp) we expect the base layer to be always on, so we never touch // it or starve it. // XXX Refer to the implementation of // SimulcastLayer#touch(boolean, RawPacket) for an explanation of why we // chose to use a return value. boolean frameStarted = acceptedLayer.touch(pkt); if (frameStarted) simulcastLayerFrameStarted(acceptedLayer, pkt, layers); } /** * Maybe send a data channel command to the associated <tt>Endpoint</tt> to make it start * streaming its hq stream, if it's being watched by some receiver. */ public void maybeSendStartHighQualityStreamCommand() { if (nativeSimulcast || !hasLayers()) { // In native simulcast the client adjusts its layers autonomously so // we don't need (nor we can) to control it with data channel // messages. return; } Endpoint newEndpoint = getSimulcastEngine().getVideoChannel().getEndpoint(); SimulcastLayer[] newSimulcastLayers = getSimulcastLayers(); SctpConnection sctpConnection; if (newSimulcastLayers == null || newSimulcastLayers.length <= 1 /* newEndpoint != null is implied */ || (sctpConnection = newEndpoint.getSctpConnection()) == null || !sctpConnection.isReady() || sctpConnection.isExpired()) { return; } // we have a new endpoint and it has an SCTP connection that is // ready and not expired. if somebody else is watching the new // endpoint, start its hq stream. boolean startHighQualityStream = false; for (Endpoint e : getSimulcastEngine().getVideoChannel().getContent().getConference().getEndpoints()) { // TODO(gp) need some synchronization here. What if the // selected endpoint changes while we're in the loop? if (e == newEndpoint) continue; Endpoint eSelectedEndpoint = e.getEffectivelySelectedEndpoint(); if (newEndpoint == eSelectedEndpoint) { // somebody is watching the new endpoint or somebody has not // yet signaled its selected endpoint to the bridge, start // the hq stream. if (logger.isDebugEnabled()) { Map<String, Object> map = new HashMap<String, Object>(3); map.put("e", e); map.put("newEndpoint", newEndpoint); map.put("maybe", eSelectedEndpoint == null ? "(maybe) " : ""); StringCompiler sc = new StringCompiler(map).c("{e.id} is {maybe} watching {newEndpoint.id}."); logDebug(sc.toString().replaceAll("\\s+", " ")); } startHighQualityStream = true; break; } } if (startHighQualityStream) { // TODO(gp) this assumes only a single hq stream. logDebug( getSimulcastEngine().getVideoChannel().getEndpoint().getID() + " notifies " + newEndpoint.getID() + " to start its HQ stream."); SimulcastLayer hqLayer = newSimulcastLayers[newSimulcastLayers.length - 1]; ; StartSimulcastLayerCommand command = new StartSimulcastLayerCommand(hqLayer); String json = mapper.toJson(command); try { newEndpoint.sendMessageOnDataChannel(json); } catch (IOException e) { logError(newEndpoint.getID() + " failed to send message on data channel.", e); } } } /** * Maybe send a data channel command to he associated simulcast sender to make it stop streaming * its hq stream, if it's not being watched by any participant. */ public void maybeSendStopHighQualityStreamCommand() { if (nativeSimulcast || !hasLayers()) { // In native simulcast the client adjusts its layers autonomously so // we don't need (nor we can) to control it with data channel // messages. return; } Endpoint oldEndpoint = getSimulcastEngine().getVideoChannel().getEndpoint(); SimulcastLayer[] oldSimulcastLayers = getSimulcastLayers(); SctpConnection sctpConnection; if (oldSimulcastLayers != null && oldSimulcastLayers.length > 1 /* oldEndpoint != null is implied*/ && (sctpConnection = oldEndpoint.getSctpConnection()) != null && sctpConnection.isReady() && !sctpConnection.isExpired()) { // we have an old endpoint and it has an SCTP connection that is // ready and not expired. if nobody else is watching the old // endpoint, stop its hq stream. boolean stopHighQualityStream = true; for (Endpoint e : getSimulcastEngine().getVideoChannel().getContent().getConference().getEndpoints()) { // TODO(gp) need some synchronization here. What if the selected // endpoint changes while we're in the loop? if (oldEndpoint != e && (oldEndpoint == e.getEffectivelySelectedEndpoint()) || e.getEffectivelySelectedEndpoint() == null) { // somebody is watching the old endpoint or somebody has not // yet signaled its selected endpoint to the bridge, don't // stop the hq stream. stopHighQualityStream = false; break; } } if (stopHighQualityStream) { // TODO(gp) this assumes only a single hq stream. logDebug( getSimulcastEngine().getVideoChannel().getEndpoint().getID() + " notifies " + oldEndpoint.getID() + " to stop " + "its HQ stream."); SimulcastLayer hqLayer = oldSimulcastLayers[oldSimulcastLayers.length - 1]; StopSimulcastLayerCommand command = new StopSimulcastLayerCommand(hqLayer); String json = mapper.toJson(command); try { oldEndpoint.sendMessageOnDataChannel(json); } catch (IOException e1) { logError(oldEndpoint.getID() + " failed to send " + "message on data channel.", e1); } } } } private void logDebug(String msg) { if (logger.isDebugEnabled()) { msg = getSimulcastEngine().getVideoChannel().getEndpoint().getID() + ": " + msg; logger.debug(msg); } } private void logWarn(String msg) { if (logger.isWarnEnabled()) { msg = getSimulcastEngine().getVideoChannel().getEndpoint().getID() + ": " + msg; logger.warn(msg); } } private void logError(String msg, Throwable e) { msg = getSimulcastEngine().getVideoChannel().getEndpoint().getID() + ": " + msg; logger.error(msg, e); } private void logInfo(String msg) { if (logger.isInfoEnabled()) { msg = getSimulcastEngine().getVideoChannel().getEndpoint().getID() + ": " + msg; logger.info(msg); } } /** * Notifies this {@code SimulcastReceiver} that a specific {@code SimulcastReceiver} has detected * the start of a new video frame in the RTP stream that it represents. Determines whether any of * {@link #simulcastLayers} other than {@code source} have been paused/stopped by the remote peer. * The determination is based on counting (video) frames. * * @param source the {@code SimulcastLayer} which is the source of the event i.e. which has * detected the start of a new video frame in the RTP stream that it represents * @param pkt the {@code RawPacket} which was received by {@code source} and possibly influenced * the decision that a new view frame was started in the RTP stream represented by {@code * source} * @param layers the set of {@code SimulcastLayer}s managed by this {@code SimulcastReceiver}. * Explicitly provided to the method in order to avoid invocations of {@link * #getSimulcastLayers()} because the latter makes a copy at the time of this writing. */ private void simulcastLayerFrameStarted( SimulcastLayer source, RawPacket pkt, SimulcastLayer[] layers) { // Allow the value of the constant TIMEOUT_ON_FRAME_COUNT to disable (at // compile time) the frame-based approach to the detection of layer // drops. if (TIMEOUT_ON_FRAME_COUNT <= 1) return; // Timeouts in layers caused by source may occur only based on the span // (of time or received frames) during which source has received // TIMEOUT_ON_FRAME_COUNT number of frames. The current method // invocation signals the receipt of 1 frame by source. int indexOfLastSourceOccurrenceInHistory = -1; int sourceFrameCount = 0; int ix = 0; for (Iterator<SimulcastLayer> it = simulcastLayerFrameHistory.iterator(); it.hasNext(); ++ix) { if (it.next() == source) { if (indexOfLastSourceOccurrenceInHistory != -1) { // Prune simulcastLayerFrameHistory so that it does not // become unnecessarily long. it.remove(); } else if (++sourceFrameCount >= TIMEOUT_ON_FRAME_COUNT - 1) { // The span of TIMEOUT_ON_FRAME_COUNT number of frames // received by source only is to be examined for the // purposes of timeouts. The current method invocations // signals the receipt of 1 frame by source so // TIMEOUT_ON_FRAME_COUNT - 1 occurrences of source in // simulcastLayerFrameHistory is enough. indexOfLastSourceOccurrenceInHistory = ix; } } } if (indexOfLastSourceOccurrenceInHistory != -1) { // Presumably, if a SimulcastLayer is active, all SimulcastLayers // before it (according to SimulcastLayer's order) are active as // well. Consequently, timeouts may occur in SimulcastLayers which // are after source. boolean maybeTimeout = false; for (SimulcastLayer layer : layers) { if (maybeTimeout) { // There's no point in timing layer out if it's timed out // already. if (layer.isStreaming()) { maybeTimeout(source, pkt, layer, indexOfLastSourceOccurrenceInHistory); } } else if (layer == source) { maybeTimeout = true; } } } // As previously stated, the current method invocation signals the // receipt of 1 frame by source. simulcastLayerFrameHistory.add(0, source); // TODO Prune simulcastLayerFrameHistory by forgetting so that it does // not become too long. } /** * Determines whether {@code effect} has been paused/stopped by the remote peer. The determination * is based on counting frames and is triggered by the receipt of (a piece of) a new (video) frame * by {@code cause}. * * @param cause the {@code SimulcastLayer} which has received (a piece of) a new (video) frame and * has thus triggered a check on {@code effect} * @param pkt the {@code RawPacket} which was received by {@code cause} and possibly influenced * the decision to trigger a check on {@code effect} * @param effect the {@code SimulcastLayer} which is to be checked whether it looks like it has * been paused/stopped by the remote peer * @param endIndexInSimulcastLayerFrameHistory */ private void maybeTimeout( SimulcastLayer cause, RawPacket pkt, SimulcastLayer effect, int endIndexInSimulcastLayerFrameHistory) { Iterator<SimulcastLayer> it = simulcastLayerFrameHistory.iterator(); boolean timeout = true; for (int ix = 0; it.hasNext() && ix < endIndexInSimulcastLayerFrameHistory; ++ix) { if (it.next() == effect) { timeout = false; break; } } if (timeout) { effect.maybeTimeout(pkt); if (!effect.isStreaming()) { // Since effect has been determined to have been paused/stopped // by the remote peer, its possible presence in // simulcastLayerFrameHistory is irrelevant now. In other words, // remove effect from simulcastLayerFrameHistory. while (it.hasNext()) { if (it.next() == effect) it.remove(); } } } } }
/** * Implements {@link PacketTransformer} for DTLS-SRTP. It's capable of working in pure DTLS mode if * appropriate flag was set in <tt>DtlsControlImpl</tt>. * * @author Lyubomir Marinov */ public class DtlsPacketTransformer extends SinglePacketTransformer { private static final long CONNECT_RETRY_INTERVAL = 500; /** * The maximum number of times that {@link #runInConnectThread(DTLSProtocol, TlsPeer, * DatagramTransport)} is to retry the invocations of {@link DTLSClientProtocol#connect(TlsClient, * DatagramTransport)} and {@link DTLSServerProtocol#accept(TlsServer, DatagramTransport)} in * anticipation of a successful connection. */ private static final int CONNECT_TRIES = 3; /** * The indicator which determines whether unencrypted packets sent or received through * <tt>DtlsPacketTransformer</tt> are to be dropped. The default value is <tt>false</tt>. * * @see #DROP_UNENCRYPTED_PKTS_PNAME */ private static final boolean DROP_UNENCRYPTED_PKTS; /** * The name of the <tt>ConfigurationService</tt> and/or <tt>System</tt> property which indicates * whether unencrypted packets sent or received through <tt>DtlsPacketTransformer</tt> are to be * dropped. The default value is <tt>false</tt>. */ private static final String DROP_UNENCRYPTED_PKTS_PNAME = DtlsPacketTransformer.class.getName() + ".dropUnencryptedPkts"; /** The length of the header of a DTLS record. */ static final int DTLS_RECORD_HEADER_LENGTH = 13; /** * The number of milliseconds a <tt>DtlsPacketTransform</tt> is to wait on its {@link * #dtlsTransport} in order to receive a packet. */ private static final int DTLS_TRANSPORT_RECEIVE_WAITMILLIS = -1; /** * The <tt>Logger</tt> used by the <tt>DtlsPacketTransformer</tt> class and its instances to print * debug information. */ private static final Logger logger = Logger.getLogger(DtlsPacketTransformer.class); static { ConfigurationService cfg = LibJitsi.getConfigurationService(); boolean dropUnencryptedPkts = false; if (cfg == null) { String s = System.getProperty(DROP_UNENCRYPTED_PKTS_PNAME); if (s != null) dropUnencryptedPkts = Boolean.parseBoolean(s); } else { dropUnencryptedPkts = cfg.getBoolean(DROP_UNENCRYPTED_PKTS_PNAME, dropUnencryptedPkts); } DROP_UNENCRYPTED_PKTS = dropUnencryptedPkts; } /** * Determines whether a specific array of <tt>byte</tt>s appears to contain a DTLS record. * * @param buf the array of <tt>byte</tt>s to be analyzed * @param off the offset within <tt>buf</tt> at which the analysis is to start * @param len the number of bytes within <tt>buf</tt> starting at <tt>off</tt> to be analyzed * @return <tt>true</tt> if the specified <tt>buf</tt> appears to contain a DTLS record */ public static boolean isDtlsRecord(byte[] buf, int off, int len) { boolean b = false; if (len >= DTLS_RECORD_HEADER_LENGTH) { short type = TlsUtils.readUint8(buf, off); switch (type) { case ContentType.alert: case ContentType.application_data: case ContentType.change_cipher_spec: case ContentType.handshake: int major = buf[off + 1] & 0xff; int minor = buf[off + 2] & 0xff; ProtocolVersion version = null; if ((major == ProtocolVersion.DTLSv10.getMajorVersion()) && (minor == ProtocolVersion.DTLSv10.getMinorVersion())) { version = ProtocolVersion.DTLSv10; } if ((version == null) && (major == ProtocolVersion.DTLSv12.getMajorVersion()) && (minor == ProtocolVersion.DTLSv12.getMinorVersion())) { version = ProtocolVersion.DTLSv12; } if (version != null) { int length = TlsUtils.readUint16(buf, off + 11); if (DTLS_RECORD_HEADER_LENGTH + length <= len) b = true; } break; default: // Unless a new ContentType has been defined by the Bouncy // Castle Crypto APIs, the specified buf does not represent a // DTLS record. break; } } return b; } /** The ID of the component which this instance works for/is associated with. */ private final int componentID; /** The <tt>RTPConnector</tt> which uses this <tt>PacketTransformer</tt>. */ private AbstractRTPConnector connector; /** The background <tt>Thread</tt> which initializes {@link #dtlsTransport}. */ private Thread connectThread; /** * The <tt>DatagramTransport</tt> implementation which adapts {@link #connector} and this * <tt>PacketTransformer</tt> to the terms of the Bouncy Castle Crypto APIs. */ private DatagramTransportImpl datagramTransport; /** * The <tt>DTLSTransport</tt> through which the actual packet transformations are being performed * by this instance. */ private DTLSTransport dtlsTransport; /** The <tt>MediaType</tt> of the stream which this instance works for/is associated with. */ private MediaType mediaType; /** * Whether rtcp-mux is in use. * * <p>If enabled, and this is the transformer for RTCP, it will not establish a DTLS session on * its own, but rather wait for the RTP transformer to do so, and reuse it to initialize the SRTP * transformer. */ private boolean rtcpmux = false; /** * The value of the <tt>setup</tt> SDP attribute defined by RFC 4145 "TCP-Based Media * Transport in the Session Description Protocol (SDP)" which determines whether this * instance acts as a DTLS client or a DTLS server. */ private DtlsControl.Setup setup; /** The {@code SRTPTransformer} (to be) used by this instance. */ private SinglePacketTransformer _srtpTransformer; /** * The indicator which determines whether the <tt>TlsPeer</tt> employed by this * <tt>PacketTransformer</tt> has raised an <tt>AlertDescription.close_notify</tt> * <tt>AlertLevel.warning</tt> i.e. the remote DTLS peer has closed the write side of the * connection. */ private boolean tlsPeerHasRaisedCloseNotifyWarning; /** The <tt>TransformEngine</tt> which has initialized this instance. */ private final DtlsTransformEngine transformEngine; /** * Initializes a new <tt>DtlsPacketTransformer</tt> instance. * * @param transformEngine the <tt>TransformEngine</tt> which is initializing the new instance * @param componentID the ID of the component for which the new instance is to work */ public DtlsPacketTransformer(DtlsTransformEngine transformEngine, int componentID) { this.transformEngine = transformEngine; this.componentID = componentID; } /** {@inheritDoc} */ @Override public synchronized void close() { // SrtpControl.start(MediaType) starts its associated TransformEngine. // We will use that mediaType to signal the normal stop then as well // i.e. we will call setMediaType(null) first. setMediaType(null); setConnector(null); } /** * Closes {@link #datagramTransport} if it is non-<tt>null</tt> and logs and swallows any * <tt>IOException</tt>. */ private void closeDatagramTransport() { if (datagramTransport != null) { try { datagramTransport.close(); } catch (IOException ioe) { // DatagramTransportImpl has no reason to fail because it is // merely an adapter of #connector and this PacketTransformer to // the terms of the Bouncy Castle Crypto API. logger.error("Failed to (properly) close " + datagramTransport.getClass(), ioe); } datagramTransport = null; } } /** * Determines whether {@link #runInConnectThread(DTLSProtocol, TlsPeer, DatagramTransport)} is to * try to establish a DTLS connection. * * @param i the number of tries remaining after the current one * @param datagramTransport * @return <tt>true</tt> to try to establish a DTLS connection; otherwise, <tt>false</tt> */ private boolean enterRunInConnectThreadLoop(int i, DatagramTransport datagramTransport) { if (i < 0 || i > CONNECT_TRIES) { return false; } else { Thread currentThread = Thread.currentThread(); synchronized (this) { if (i > 0 && i < CONNECT_TRIES - 1) { boolean interrupted = false; try { wait(CONNECT_RETRY_INTERVAL); } catch (InterruptedException ie) { interrupted = true; } if (interrupted) currentThread.interrupt(); } return currentThread.equals(this.connectThread) && datagramTransport.equals(this.datagramTransport); } } } /** * Gets the <tt>DtlsControl</tt> implementation associated with this instance. * * @return the <tt>DtlsControl</tt> implementation associated with this instance */ DtlsControlImpl getDtlsControl() { return getTransformEngine().getDtlsControl(); } /** * Gets the <tt>TransformEngine</tt> which has initialized this instance. * * @return the <tt>TransformEngine</tt> which has initialized this instance */ DtlsTransformEngine getTransformEngine() { return transformEngine; } /** * Handles a specific <tt>IOException</tt> which was thrown during the execution of {@link * #runInConnectThread(DTLSProtocol, TlsPeer, DatagramTransport)} while trying to establish a DTLS * connection * * @param ioe the <tt>IOException</tt> to handle * @param msg the human-readable message to log about the specified <tt>ioe</tt> * @param i the number of tries remaining after the current one * @return <tt>true</tt> if the specified <tt>ioe</tt> was successfully handled; <tt>false</tt>, * otherwise */ private boolean handleRunInConnectThreadException(IOException ioe, String msg, int i) { // SrtpControl.start(MediaType) starts its associated TransformEngine. // We will use that mediaType to signal the normal stop then as well // i.e. we will ignore exception after the procedure to stop this // PacketTransformer has begun. if (mediaType == null) return false; if (ioe instanceof TlsFatalAlert) { TlsFatalAlert tfa = (TlsFatalAlert) ioe; short alertDescription = tfa.getAlertDescription(); if (alertDescription == AlertDescription.unexpected_message) { msg += " Received fatal unexpected message."; if (i == 0 || !Thread.currentThread().equals(connectThread) || connector == null || mediaType == null) { msg += " Giving up after " + (CONNECT_TRIES - i) + " retries."; } else { msg += " Will retry."; logger.error(msg, ioe); return true; } } else { msg += " Received fatal alert " + alertDescription + "."; } } logger.error(msg, ioe); return false; } /** * Tries to initialize {@link #_srtpTransformer} by using the <tt>DtlsPacketTransformer</tt> for * RTP. * * @return the (possibly updated) value of {@link #_srtpTransformer}. */ private SinglePacketTransformer initializeSRTCPTransformerFromRtp() { DtlsPacketTransformer rtpTransformer = (DtlsPacketTransformer) getTransformEngine().getRTPTransformer(); // Prevent recursion (that is pretty much impossible to ever happen). if (rtpTransformer != this) { PacketTransformer srtpTransformer = rtpTransformer.waitInitializeAndGetSRTPTransformer(); if (srtpTransformer != null && srtpTransformer instanceof SRTPTransformer) { synchronized (this) { if (_srtpTransformer == null) { _srtpTransformer = new SRTCPTransformer((SRTPTransformer) srtpTransformer); // For the sake of completeness, we notify whenever we // assign to _srtpTransformer. notifyAll(); } } } } return _srtpTransformer; } /** * Initializes a new <tt>SRTPTransformer</tt> instance with a specific (negotiated) * <tt>SRTPProtectionProfile</tt> and the keying material specified by a specific * <tt>TlsContext</tt>. * * @param srtpProtectionProfile the (negotiated) <tt>SRTPProtectionProfile</tt> to initialize the * new instance with * @param tlsContext the <tt>TlsContext</tt> which represents the keying material * @return a new <tt>SRTPTransformer</tt> instance initialized with <tt>srtpProtectionProfile</tt> * and <tt>tlsContext</tt> */ private SinglePacketTransformer initializeSRTPTransformer( int srtpProtectionProfile, TlsContext tlsContext) { boolean rtcp; switch (componentID) { case Component.RTCP: rtcp = true; break; case Component.RTP: rtcp = false; break; default: throw new IllegalStateException("componentID"); } int cipher_key_length; int cipher_salt_length; int cipher; int auth_function; int auth_key_length; int RTCP_auth_tag_length, RTP_auth_tag_length; switch (srtpProtectionProfile) { case SRTPProtectionProfile.SRTP_AES128_CM_HMAC_SHA1_32: cipher_key_length = 128 / 8; cipher_salt_length = 112 / 8; cipher = SRTPPolicy.AESCM_ENCRYPTION; auth_function = SRTPPolicy.HMACSHA1_AUTHENTICATION; auth_key_length = 160 / 8; RTCP_auth_tag_length = 80 / 8; RTP_auth_tag_length = 32 / 8; break; case SRTPProtectionProfile.SRTP_AES128_CM_HMAC_SHA1_80: cipher_key_length = 128 / 8; cipher_salt_length = 112 / 8; cipher = SRTPPolicy.AESCM_ENCRYPTION; auth_function = SRTPPolicy.HMACSHA1_AUTHENTICATION; auth_key_length = 160 / 8; RTCP_auth_tag_length = RTP_auth_tag_length = 80 / 8; break; case SRTPProtectionProfile.SRTP_NULL_HMAC_SHA1_32: cipher_key_length = 0; cipher_salt_length = 0; cipher = SRTPPolicy.NULL_ENCRYPTION; auth_function = SRTPPolicy.HMACSHA1_AUTHENTICATION; auth_key_length = 160 / 8; RTCP_auth_tag_length = 80 / 8; RTP_auth_tag_length = 32 / 8; break; case SRTPProtectionProfile.SRTP_NULL_HMAC_SHA1_80: cipher_key_length = 0; cipher_salt_length = 0; cipher = SRTPPolicy.NULL_ENCRYPTION; auth_function = SRTPPolicy.HMACSHA1_AUTHENTICATION; auth_key_length = 160 / 8; RTCP_auth_tag_length = RTP_auth_tag_length = 80 / 8; break; default: throw new IllegalArgumentException("srtpProtectionProfile"); } byte[] keyingMaterial = tlsContext.exportKeyingMaterial( ExporterLabel.dtls_srtp, null, 2 * (cipher_key_length + cipher_salt_length)); byte[] client_write_SRTP_master_key = new byte[cipher_key_length]; byte[] server_write_SRTP_master_key = new byte[cipher_key_length]; byte[] client_write_SRTP_master_salt = new byte[cipher_salt_length]; byte[] server_write_SRTP_master_salt = new byte[cipher_salt_length]; byte[][] keyingMaterialValues = { client_write_SRTP_master_key, server_write_SRTP_master_key, client_write_SRTP_master_salt, server_write_SRTP_master_salt }; for (int i = 0, keyingMaterialOffset = 0; i < keyingMaterialValues.length; i++) { byte[] keyingMaterialValue = keyingMaterialValues[i]; System.arraycopy( keyingMaterial, keyingMaterialOffset, keyingMaterialValue, 0, keyingMaterialValue.length); keyingMaterialOffset += keyingMaterialValue.length; } SRTPPolicy srtcpPolicy = new SRTPPolicy( cipher, cipher_key_length, auth_function, auth_key_length, RTCP_auth_tag_length, cipher_salt_length); SRTPPolicy srtpPolicy = new SRTPPolicy( cipher, cipher_key_length, auth_function, auth_key_length, RTP_auth_tag_length, cipher_salt_length); SRTPContextFactory clientSRTPContextFactory = new SRTPContextFactory( /* sender */ tlsContext instanceof TlsClientContext, client_write_SRTP_master_key, client_write_SRTP_master_salt, srtpPolicy, srtcpPolicy); SRTPContextFactory serverSRTPContextFactory = new SRTPContextFactory( /* sender */ tlsContext instanceof TlsServerContext, server_write_SRTP_master_key, server_write_SRTP_master_salt, srtpPolicy, srtcpPolicy); SRTPContextFactory forwardSRTPContextFactory; SRTPContextFactory reverseSRTPContextFactory; if (tlsContext instanceof TlsClientContext) { forwardSRTPContextFactory = clientSRTPContextFactory; reverseSRTPContextFactory = serverSRTPContextFactory; } else if (tlsContext instanceof TlsServerContext) { forwardSRTPContextFactory = serverSRTPContextFactory; reverseSRTPContextFactory = clientSRTPContextFactory; } else { throw new IllegalArgumentException("tlsContext"); } SinglePacketTransformer srtpTransformer; if (rtcp) { srtpTransformer = new SRTCPTransformer(forwardSRTPContextFactory, reverseSRTPContextFactory); } else { srtpTransformer = new SRTPTransformer(forwardSRTPContextFactory, reverseSRTPContextFactory); } return srtpTransformer; } /** * Notifies this instance that the DTLS record layer associated with a specific <tt>TlsPeer</tt> * has raised an alert. * * @param tlsPeer the <tt>TlsPeer</tt> whose associated DTLS record layer has raised an alert * @param alertLevel {@link AlertLevel} * @param alertDescription {@link AlertDescription} * @param message a human-readable message explaining what caused the alert. May be <tt>null</tt>. * @param cause the exception that caused the alert to be raised. May be <tt>null</tt>. */ void notifyAlertRaised( TlsPeer tlsPeer, short alertLevel, short alertDescription, String message, Exception cause) { if (AlertLevel.warning == alertLevel && AlertDescription.close_notify == alertDescription) { tlsPeerHasRaisedCloseNotifyWarning = true; } } /** {@inheritDoc} */ @Override public RawPacket reverseTransform(RawPacket pkt) { byte[] buf = pkt.getBuffer(); int off = pkt.getOffset(); int len = pkt.getLength(); if (isDtlsRecord(buf, off, len)) { if (rtcpmux && Component.RTCP == componentID) { // This should never happen. logger.warn( "Dropping a DTLS record, because it was received on the" + " RTCP channel while rtcpmux is in use."); return null; } boolean receive; synchronized (this) { if (datagramTransport == null) { receive = false; } else { datagramTransport.queueReceive(buf, off, len); receive = true; } } if (receive) { DTLSTransport dtlsTransport = this.dtlsTransport; if (dtlsTransport == null) { // The specified pkt looks like a DTLS record and it has // been consumed for the purposes of the secure channel // represented by this PacketTransformer. pkt = null; } else { try { int receiveLimit = dtlsTransport.getReceiveLimit(); int delta = receiveLimit - len; if (delta > 0) { pkt.grow(delta); buf = pkt.getBuffer(); off = pkt.getOffset(); len = pkt.getLength(); } else if (delta < 0) { pkt.shrink(-delta); buf = pkt.getBuffer(); off = pkt.getOffset(); len = pkt.getLength(); } int received = dtlsTransport.receive(buf, off, len, DTLS_TRANSPORT_RECEIVE_WAITMILLIS); if (received <= 0) { // No application data was decoded. pkt = null; } else { delta = len - received; if (delta > 0) pkt.shrink(delta); } } catch (IOException ioe) { pkt = null; // SrtpControl.start(MediaType) starts its associated // TransformEngine. We will use that mediaType to signal // the normal stop then as well i.e. we will ignore // exception after the procedure to stop this // PacketTransformer has begun. if (mediaType != null && !tlsPeerHasRaisedCloseNotifyWarning) { logger.error("Failed to decode a DTLS record!", ioe); } } } } else { // The specified pkt looks like a DTLS record but it is // unexpected in the current state of the secure channel // represented by this PacketTransformer. This PacketTransformer // has not been started (successfully) or has been closed. pkt = null; } } else if (transformEngine.isSrtpDisabled()) { // In pure DTLS mode only DTLS records pass through. pkt = null; } else { // DTLS-SRTP has not been initialized yet or has failed to // initialize. SinglePacketTransformer srtpTransformer = waitInitializeAndGetSRTPTransformer(); if (srtpTransformer != null) pkt = srtpTransformer.reverseTransform(pkt); else if (DROP_UNENCRYPTED_PKTS) pkt = null; // XXX Else, it is our explicit policy to let the received packet // pass through and rely on the SrtpListener to notify the user that // the session is not secured. } return pkt; } /** * Runs in {@link #connectThread} to initialize {@link #dtlsTransport}. * * @param dtlsProtocol * @param tlsPeer * @param datagramTransport */ private void runInConnectThread( DTLSProtocol dtlsProtocol, TlsPeer tlsPeer, DatagramTransport datagramTransport) { DTLSTransport dtlsTransport = null; final boolean srtp = !transformEngine.isSrtpDisabled(); int srtpProtectionProfile = 0; TlsContext tlsContext = null; // DTLS client if (dtlsProtocol instanceof DTLSClientProtocol) { DTLSClientProtocol dtlsClientProtocol = (DTLSClientProtocol) dtlsProtocol; TlsClientImpl tlsClient = (TlsClientImpl) tlsPeer; for (int i = CONNECT_TRIES - 1; i >= 0; i--) { if (!enterRunInConnectThreadLoop(i, datagramTransport)) break; try { dtlsTransport = dtlsClientProtocol.connect(tlsClient, datagramTransport); break; } catch (IOException ioe) { if (!handleRunInConnectThreadException( ioe, "Failed to connect this DTLS client to a DTLS" + " server!", i)) { break; } } } if (dtlsTransport != null && srtp) { srtpProtectionProfile = tlsClient.getChosenProtectionProfile(); tlsContext = tlsClient.getContext(); } } // DTLS server else if (dtlsProtocol instanceof DTLSServerProtocol) { DTLSServerProtocol dtlsServerProtocol = (DTLSServerProtocol) dtlsProtocol; TlsServerImpl tlsServer = (TlsServerImpl) tlsPeer; for (int i = CONNECT_TRIES - 1; i >= 0; i--) { if (!enterRunInConnectThreadLoop(i, datagramTransport)) break; try { dtlsTransport = dtlsServerProtocol.accept(tlsServer, datagramTransport); break; } catch (IOException ioe) { if (!handleRunInConnectThreadException( ioe, "Failed to accept a connection from a DTLS client!", i)) { break; } } } if (dtlsTransport != null && srtp) { srtpProtectionProfile = tlsServer.getChosenProtectionProfile(); tlsContext = tlsServer.getContext(); } } else { // It MUST be either a DTLS client or a DTLS server. throw new IllegalStateException("dtlsProtocol"); } SinglePacketTransformer srtpTransformer = (dtlsTransport == null || !srtp) ? null : initializeSRTPTransformer(srtpProtectionProfile, tlsContext); boolean closeSRTPTransformer; synchronized (this) { if (Thread.currentThread().equals(this.connectThread) && datagramTransport.equals(this.datagramTransport)) { this.dtlsTransport = dtlsTransport; _srtpTransformer = srtpTransformer; notifyAll(); } closeSRTPTransformer = (_srtpTransformer != srtpTransformer); } if (closeSRTPTransformer && srtpTransformer != null) srtpTransformer.close(); } /** * Sends the data contained in a specific byte array as application data through the DTLS * connection of this <tt>DtlsPacketTransformer</tt>. * * @param buf the byte array containing data to send. * @param off the offset in <tt>buf</tt> where the data begins. * @param len the length of data to send. */ public void sendApplicationData(byte[] buf, int off, int len) { DTLSTransport dtlsTransport = this.dtlsTransport; Throwable throwable = null; if (dtlsTransport != null) { try { dtlsTransport.send(buf, off, len); } catch (IOException ioe) { throwable = ioe; } } else { throwable = new NullPointerException("dtlsTransport"); } if (throwable != null) { // SrtpControl.start(MediaType) starts its associated // TransformEngine. We will use that mediaType to signal the normal // stop then as well i.e. we will ignore exception after the // procedure to stop this PacketTransformer has begun. if (mediaType != null && !tlsPeerHasRaisedCloseNotifyWarning) { logger.error("Failed to send application data over DTLS transport: ", throwable); } } } /** * Sets the <tt>RTPConnector</tt> which is to use or uses this <tt>PacketTransformer</tt>. * * @param connector the <tt>RTPConnector</tt> which is to use or uses this * <tt>PacketTransformer</tt> */ void setConnector(AbstractRTPConnector connector) { if (this.connector != connector) { this.connector = connector; DatagramTransportImpl datagramTransport = this.datagramTransport; if (datagramTransport != null) datagramTransport.setConnector(connector); } } /** * Sets the <tt>MediaType</tt> of the stream which this instance is to work for/be associated * with. * * @param mediaType the <tt>MediaType</tt> of the stream which this instance is to work for/be * associated with */ synchronized void setMediaType(MediaType mediaType) { if (this.mediaType != mediaType) { MediaType oldValue = this.mediaType; this.mediaType = mediaType; if (oldValue != null) stop(); if (this.mediaType != null) start(); } } /** * Enables/disables rtcp-mux. * * @param rtcpmux whether to enable or disable. */ void setRtcpmux(boolean rtcpmux) { this.rtcpmux = rtcpmux; } /** * Sets the DTLS protocol according to which this <tt>DtlsPacketTransformer</tt> is to act either * as a DTLS server or a DTLS client. * * @param setup the value of the <tt>setup</tt> SDP attribute to set on this instance in order to * determine whether this instance is to act as a DTLS client or a DTLS server */ void setSetup(DtlsControl.Setup setup) { if (this.setup != setup) this.setup = setup; } /** Starts this <tt>PacketTransformer</tt>. */ private synchronized void start() { if (this.datagramTransport != null) { if (this.connectThread == null && dtlsTransport == null) { logger.warn( getClass().getName() + " has been started but has failed to establish" + " the DTLS connection!"); } return; } if (rtcpmux && Component.RTCP == componentID) { // In the case of rtcp-mux, the RTCP transformer does not create // a DTLS session. The SRTP context (_srtpTransformer) will be // initialized on demand using initializeSRTCPTransformerFromRtp(). return; } AbstractRTPConnector connector = this.connector; if (connector == null) throw new NullPointerException("connector"); DtlsControl.Setup setup = this.setup; SecureRandom secureRandom = DtlsControlImpl.createSecureRandom(); final DTLSProtocol dtlsProtocolObj; final TlsPeer tlsPeer; if (DtlsControl.Setup.ACTIVE.equals(setup)) { dtlsProtocolObj = new DTLSClientProtocol(secureRandom); tlsPeer = new TlsClientImpl(this); } else { dtlsProtocolObj = new DTLSServerProtocol(secureRandom); tlsPeer = new TlsServerImpl(this); } tlsPeerHasRaisedCloseNotifyWarning = false; final DatagramTransportImpl datagramTransport = new DatagramTransportImpl(componentID); datagramTransport.setConnector(connector); Thread connectThread = new Thread() { @Override public void run() { try { runInConnectThread(dtlsProtocolObj, tlsPeer, datagramTransport); } finally { if (Thread.currentThread().equals(DtlsPacketTransformer.this.connectThread)) { DtlsPacketTransformer.this.connectThread = null; } } } }; connectThread.setDaemon(true); connectThread.setName(DtlsPacketTransformer.class.getName() + ".connectThread"); this.connectThread = connectThread; this.datagramTransport = datagramTransport; boolean started = false; try { connectThread.start(); started = true; } finally { if (!started) { if (connectThread.equals(this.connectThread)) this.connectThread = null; if (datagramTransport.equals(this.datagramTransport)) this.datagramTransport = null; } } notifyAll(); } /** Stops this <tt>PacketTransformer</tt>. */ private synchronized void stop() { if (connectThread != null) connectThread = null; try { // The dtlsTransport and _srtpTransformer SHOULD be closed, of // course. The datagramTransport MUST be closed. if (dtlsTransport != null) { try { dtlsTransport.close(); } catch (IOException ioe) { logger.error("Failed to (properly) close " + dtlsTransport.getClass(), ioe); } dtlsTransport = null; } if (_srtpTransformer != null) { _srtpTransformer.close(); _srtpTransformer = null; } } finally { try { closeDatagramTransport(); } finally { notifyAll(); } } } /** {@inheritDoc} */ @Override public RawPacket transform(RawPacket pkt) { byte[] buf = pkt.getBuffer(); int off = pkt.getOffset(); int len = pkt.getLength(); // If the specified pkt represents a DTLS record, then it should pass // through this PacketTransformer (e.g. it has been sent through // DatagramTransportImpl). if (isDtlsRecord(buf, off, len)) return pkt; // SRTP if (!transformEngine.isSrtpDisabled()) { // DTLS-SRTP has not been initialized yet or has failed to // initialize. SinglePacketTransformer srtpTransformer = waitInitializeAndGetSRTPTransformer(); if (srtpTransformer != null) pkt = srtpTransformer.transform(pkt); else if (DROP_UNENCRYPTED_PKTS) pkt = null; // XXX Else, it is our explicit policy to let the received packet // pass through and rely on the SrtpListener to notify the user that // the session is not secured. } // Pure/non-SRTP DTLS else { // The specified pkt will pass through this PacketTransformer only // if it gets transformed into a DTLS record. pkt = null; sendApplicationData(buf, off, len); } return pkt; } /** * Gets the {@code SRTPTransformer} used by this instance. If {@link #_srtpTransformer} does not * exist (yet) and the state of this instance indicates that its initialization is in progess, * then blocks until {@code _srtpTransformer} is initialized and returns it. * * @return the {@code SRTPTransformer} used by this instance */ private SinglePacketTransformer waitInitializeAndGetSRTPTransformer() { SinglePacketTransformer srtpTransformer = _srtpTransformer; if (srtpTransformer != null) return srtpTransformer; if (rtcpmux && Component.RTCP == componentID) return initializeSRTCPTransformerFromRtp(); // XXX It is our explicit policy to rely on the SrtpListener to notify // the user that the session is not secure. Unfortunately, (1) the // SrtpListener is not supported by this DTLS SrtpControl implementation // and (2) encrypted packets may arrive soon enough to be let through // while _srtpTransformer is still initializing. Consequently, we will // block and wait for _srtpTransformer to initialize. boolean interrupted = false; try { synchronized (this) { do { srtpTransformer = _srtpTransformer; if (srtpTransformer != null) break; // _srtpTransformer is initialized if (connectThread == null) { // Though _srtpTransformer is NOT initialized, there is // no point in waiting because there is no one to // initialize it. break; } try { // It does not really matter (enough) how much we wait // here because we wait in a loop. long timeout = CONNECT_TRIES * CONNECT_RETRY_INTERVAL; wait(timeout); } catch (InterruptedException ie) { interrupted = true; } } while (true); } } finally { if (interrupted) Thread.currentThread().interrupt(); } return srtpTransformer; } }
/** * Manages the set of <tt>Endpoint</tt>s whose video streams are being forwarded to a specific * <tt>VideoChannel</tt> (i.e. the <tt>VideoChannel</tt>'s <tt>LastN</tt> set). * * @author Lyubomir Marinov * @author George Politis * @author Boris Grozev */ public class LastNController { /** * The <tt>Logger</tt> used by the <tt>VideoChannel</tt> class and its instances to print debug * information. */ private static final Logger logger = Logger.getLogger(LastNController.class); /** An empty list instance. */ private static final List<String> INITIAL_EMPTY_LIST = Collections.unmodifiableList(new LinkedList<String>()); /** The set of <tt>Endpoints</tt> whose video streams are currently being forwarded. */ private List<String> forwardedEndpoints = INITIAL_EMPTY_LIST; /** * The list of all <tt>Endpoint</tt>s in the conference, ordered by the last time they were * elected dominant speaker. */ private List<String> conferenceSpeechActivityEndpoints = INITIAL_EMPTY_LIST; /** * The list of endpoints which have been explicitly marked as 'pinned' and whose video streams * should always be forwarded. */ private List<String> pinnedEndpoints = INITIAL_EMPTY_LIST; /** * The maximum number of endpoints whose video streams will be forwarded to the endpoint, as * externally configured (by the client, by the focus agent, or by default configuration). A value * of {@code -1} means that there is no limit, and all endpoints' video streams will be forwarded. */ private int lastN = -1; /** * The current limit to the number of endpoints whose video streams will be forwarded to the * endpoint. This value can be changed by videobridge (i.e. when Adaptive Last N is used), but it * must not exceed the value of {@link #lastN}. A value of {@code -1} means that there is no * limit, and all endpoints' video streams will be forwarded. */ private int currentLastN = -1; /** Whether or not adaptive lastN is in use. */ private boolean adaptiveLastN = false; /** Whether or not adaptive simulcast is in use. */ private boolean adaptiveSimulcast = false; /** * The instance which implements <tt>Adaptive LastN</tt> or <tt>Adaptive Simulcast</tt> on our * behalf. */ private BitrateController bitrateController = null; /** The {@link VideoChannel} which owns this {@link LastNController}. */ private final VideoChannel channel; /** The ID of the endpoint of {@link #channel}. */ private String endpointId; /** * Initializes a new {@link LastNController} instance which is to belong to a particular {@link * VideoChannel}. * * @param channel the owning {@link VideoChannel}. */ public LastNController(VideoChannel channel) { this.channel = channel; } /** * @return the maximum number of endpoints whose video streams will be forwarded to the endpoint. * A value of {@code -1} means that there is no limit. */ public int getLastN() { return lastN; } /** @return the set of <tt>Endpoints</tt> whose video streams are currently being forwarded. */ public List<String> getForwardedEndpoints() { return forwardedEndpoints; } /** * Sets the value of {@code lastN}, that is, the maximum number of endpoints whose video streams * will be forwarded to the endpoint. A value of {@code -1} means that there is no limit. * * @param lastN the value to set. */ public void setLastN(int lastN) { if (logger.isDebugEnabled()) { logger.debug("Setting lastN=" + lastN); } List<String> endpointsToAskForKeyframe = null; synchronized (this) { // Since we have the lock anyway, call update() inside, so it // doesn't have to obtain it again. But keep the call to // askForKeyframes() outside. if (this.lastN != lastN) { // If we're just now enabling lastN, we don't need to ask for // keyframes as all streams were being forwarded already. boolean update = this.lastN != -1; this.lastN = lastN; if (lastN >= 0 && (currentLastN < 0 || currentLastN > lastN)) { currentLastN = lastN; } if (update) { endpointsToAskForKeyframe = update(); } } } askForKeyframes(endpointsToAskForKeyframe); } /** Closes this {@link LastNController}. */ public void close() { if (bitrateController != null) { try { bitrateController.close(); } finally { bitrateController = null; } } } /** * Sets the list of "pinned" endpoints (i.e. endpoints for which video should always be forwarded, * regardless of {@code lastN}). * * @param newPinnedEndpointIds the list of endpoint IDs to set. */ public void setPinnedEndpointIds(List<String> newPinnedEndpointIds) { if (logger.isDebugEnabled()) { logger.debug("Setting pinned endpoints: " + newPinnedEndpointIds.toString()); } List<String> endpointsToAskForKeyframe = null; synchronized (this) { // Since we have the lock anyway, call update() inside, so it // doesn't have to obtain it again. But keep the call to // askForKeyframes() outside. if (!pinnedEndpoints.equals(newPinnedEndpointIds)) { pinnedEndpoints = Collections.unmodifiableList(newPinnedEndpointIds); endpointsToAskForKeyframe = update(); } } askForKeyframes(endpointsToAskForKeyframe); } /** * Checks whether RTP packets from {@code sourceChannel} should be forwarded to {@link #channel}. * * @param sourceChannel the channel. * @return {@code true} iff RTP packets from {@code sourceChannel} should be forwarded to {@link * #channel}. */ public boolean isForwarded(Channel sourceChannel) { if (lastN < 0 && currentLastN < 0) { // If Last-N is disabled, we forward everything. return true; } if (sourceChannel == null) { logger.warn("Invalid sourceChannel: null."); return false; } Endpoint channelEndpoint = sourceChannel.getEndpoint(); if (channelEndpoint == null) { logger.warn("sourceChannel has no endpoint."); return false; } if (forwardedEndpoints == INITIAL_EMPTY_LIST) { // LastN is enabled, but we haven't yet initialized the list of // endpoints in the conference. initializeConferenceEndpoints(); } // This may look like a place to optimize, because we query an unordered // list (in O(n)) and it executes on each video packet if lastN is // enabled. However, the size of forwardedEndpoints is restricted to // lastN and so small enough that it is not worth optimizing. return forwardedEndpoints.contains(channelEndpoint.getID()); } /** @return the number of streams currently being forwarded. */ public int getN() { return forwardedEndpoints.size(); } /** @return the list of "pinned" endpoints. */ public List<String> getPinnedEndpoints() { return pinnedEndpoints; } /** * Notifies this instance that the ordered list of endpoints in the conference has changed. * * @param endpoints the new ordered list of endpoints in the conference. * @return the list of endpoints which were added to the list of forwarded endpoints as a result * of the call, or {@code null} if none were added. */ public List<Endpoint> speechActivityEndpointsChanged(List<Endpoint> endpoints) { List<String> newEndpointIdList = getIDs(endpoints); List<String> enteringEndpointIds = speechActivityEndpointIdsChanged(newEndpointIdList); if (logger.isDebugEnabled()) { logger.debug( "New list of conference endpoints: " + newEndpointIdList.toString() + "; entering endpoints: " + (enteringEndpointIds == null ? "none" : enteringEndpointIds.toString())); } List<Endpoint> ret = new LinkedList<>(); if (enteringEndpointIds != null) { for (Endpoint endpoint : endpoints) { if (enteringEndpointIds.contains(endpoint.getID())) { ret.add(endpoint); } } } return ret; } /** * Notifies this instance that the ordered list of endpoints (specified as a list of endpoint IDs) * in the conference has changed. * * @param endpointIds the new ordered list of endpoints (specified as a list of endpoint IDs) in * the conference. * @return the list of IDs of endpoints which were added to the list of forwarded endpoints as a * result of the call. */ private synchronized List<String> speechActivityEndpointIdsChanged(List<String> endpointIds) { if (conferenceSpeechActivityEndpoints.equals(endpointIds)) { if (logger.isDebugEnabled()) { logger.debug("Conference endpoints have not changed."); } return null; } else { List<String> newEndpoints = new LinkedList<>(endpointIds); newEndpoints.removeAll(conferenceSpeechActivityEndpoints); conferenceSpeechActivityEndpoints = endpointIds; return update(newEndpoints); } } /** * Enables or disables the "adaptive last-n" mode, depending on the value of {@code * adaptiveLastN}. * * @param adaptiveLastN {@code true} to enable, {@code false} to disable */ public void setAdaptiveLastN(boolean adaptiveLastN) { if (this.adaptiveLastN != adaptiveLastN) { if (adaptiveLastN && bitrateController == null) { bitrateController = new BitrateController(this, channel); } this.adaptiveLastN = adaptiveLastN; } } /** * Enables or disables the "adaptive simulcast" mod, depending on the value of {@code * adaptiveLastN}. * * @param adaptiveSimulcast {@code true} to enable, {@code false} to disable. */ public void setAdaptiveSimulcast(boolean adaptiveSimulcast) { if (this.adaptiveSimulcast != adaptiveSimulcast) { if (adaptiveSimulcast && bitrateController == null) { bitrateController = new BitrateController(this, channel); } this.adaptiveSimulcast = adaptiveSimulcast; } } /** @return {@code true} iff the "adaptive last-n" mode is enabled. */ public boolean getAdaptiveLastN() { return adaptiveLastN; } /** @return {@code true} iff the "adaptive simulcast" mode is enabled. */ public boolean getAdaptiveSimulcast() { return adaptiveSimulcast; } /** * Recalculates the list of forwarded endpoints based on the current values of the various * parameters of this instance ({@link #lastN}, {@link #conferenceSpeechActivityEndpoints}, {@link * #pinnedEndpoints}). * * @return the list of IDs of endpoints which were added to {@link #forwardedEndpoints} (i.e. of * endpoints * "entering last-n") as a result of this call. Returns {@code null} if no * endpoints were added. */ private synchronized List<String> update() { return update(null); } /** * Recalculates the list of forwarded endpoints based on the current values of the various * parameters of this instance ({@link #lastN}, {@link #conferenceSpeechActivityEndpoints}, {@link * #pinnedEndpoints}). * * @param newConferenceEndpoints A list of endpoints which entered the conference since the last * call to this method. They need not be asked for keyframes, because they were never filtered * by this {@link #LastNController(VideoChannel)}. * @return the list of IDs of endpoints which were added to {@link #forwardedEndpoints} (i.e. of * endpoints * "entering last-n") as a result of this call. Returns {@code null} if no * endpoints were added. */ private synchronized List<String> update(List<String> newConferenceEndpoints) { List<String> newForwardedEndpoints = new LinkedList<>(); String ourEndpointId = getEndpointId(); if (conferenceSpeechActivityEndpoints == INITIAL_EMPTY_LIST) { conferenceSpeechActivityEndpoints = getIDs(channel.getConferenceSpeechActivity().getEndpoints()); newConferenceEndpoints = conferenceSpeechActivityEndpoints; } if (lastN < 0 && currentLastN < 0) { // Last-N is disabled, we forward everything. newForwardedEndpoints.addAll(conferenceSpeechActivityEndpoints); if (ourEndpointId != null) { newForwardedEndpoints.remove(ourEndpointId); } } else { // Here we have lastN >= 0 || currentLastN >= 0 which implies // currentLastN >= 0. // Pinned endpoints are always forwarded. newForwardedEndpoints.addAll(getPinnedEndpoints()); // As long as they are still endpoints in the conference. newForwardedEndpoints.retainAll(conferenceSpeechActivityEndpoints); if (newForwardedEndpoints.size() > currentLastN) { // What do we want in this case? It looks like a contradictory // request from the client, but maybe it makes for a good API // on the client to allow the pinned to override last-n. // Unfortunately, this will not play well with Adaptive-Last-N // or changes to Last-N for other reasons. } else if (newForwardedEndpoints.size() < currentLastN) { for (String endpointId : conferenceSpeechActivityEndpoints) { if (newForwardedEndpoints.size() < currentLastN) { if (!endpointId.equals(ourEndpointId) && !newForwardedEndpoints.contains(endpointId)) { newForwardedEndpoints.add(endpointId); } } else { break; } } } } List<String> enteringEndpoints; if (forwardedEndpoints.equals(newForwardedEndpoints)) { // We want forwardedEndpoints != INITIAL_EMPTY_LIST forwardedEndpoints = newForwardedEndpoints; enteringEndpoints = null; } else { enteringEndpoints = new ArrayList<>(newForwardedEndpoints); enteringEndpoints.removeAll(forwardedEndpoints); if (logger.isDebugEnabled()) { logger.debug( "Forwarded endpoints changed: " + forwardedEndpoints.toString() + " -> " + newForwardedEndpoints.toString() + ". Entering: " + enteringEndpoints.toString()); } forwardedEndpoints = Collections.unmodifiableList(newForwardedEndpoints); if (lastN >= 0 || currentLastN >= 0) { // TODO: we may want to do this asynchronously. channel.sendLastNEndpointsChangeEventOnDataChannel(forwardedEndpoints, enteringEndpoints); } } // If lastN is disabled, the endpoints entering forwardedEndpoints were // never filtered, so they don't need to be asked for keyframes. if (lastN < 0 && currentLastN < 0) { enteringEndpoints = null; } if (enteringEndpoints != null && newConferenceEndpoints != null) { // Endpoints just entering the conference need not be asked for // keyframes. enteringEndpoints.removeAll(newConferenceEndpoints); } return enteringEndpoints; } /** * Sends a keyframe request to the endpoints specified in {@code endpointIds} * * @param endpointIds the list of IDs of endpoints to which to send a request for a keyframe. */ private void askForKeyframes(List<String> endpointIds) { // TODO: Execute asynchronously. if (endpointIds != null && !endpointIds.isEmpty()) { channel.getContent().askForKeyframesById(endpointIds); } } /** @return the ID of the endpoint of our channel. */ private String getEndpointId() { if (endpointId == null) { Endpoint endpoint = channel.getEndpoint(); if (endpoint != null) { endpointId = endpoint.getID(); } } return endpointId; } /** * Initializes the local list of endpoints ({@link #speechActivityEndpointsChanged(List)}) with * the current endpoints from the conference. */ public synchronized void initializeConferenceEndpoints() { speechActivityEndpointsChanged(channel.getConferenceSpeechActivity().getEndpoints()); if (logger.isDebugEnabled()) { logger.debug( "Initialized the list of endpoints: " + conferenceSpeechActivityEndpoints.toString()); } } /** * Extracts a list of endpoint IDs from a list of {@link Endpoint}s. * * @param endpoints the list of {@link Endpoint}s. * @return the list of IDs of endpoints in {@code endpoints}. */ private List<String> getIDs(List<Endpoint> endpoints) { if (endpoints != null && !endpoints.isEmpty()) { List<String> endpointIds = new LinkedList<>(); for (Endpoint endpoint : endpoints) { endpointIds.add(endpoint.getID()); } return endpointIds; } return null; } public int getCurrentLastN() { return currentLastN; } public int setCurrentLastN(int currentLastN) { List<String> endpointsToAskForKeyframe; synchronized (this) { // Since we have the lock anyway, call update() inside, so it // doesn't have to obtain it again. But keep the call to // askForKeyframes() outside. if (lastN >= 0 && lastN < currentLastN) { currentLastN = lastN; } this.currentLastN = currentLastN; endpointsToAskForKeyframe = update(); } askForKeyframes(endpointsToAskForKeyframe); return currentLastN; } }
/** * Implements {@link BundleActivator} for the <tt>msofficecomm</tt> bundle. * * @author Lyubomir Marinov */ public class MsOfficeCommActivator implements BundleActivator { /** * The <tt>Logger</tt> used by the <tt>MsOfficeCommActivator</tt> class and its instances for * logging output. */ private static final Logger logger = Logger.getLogger(MsOfficeCommActivator.class); /** * Starts the <tt>msofficecomm</tt> bundle in a specific {@link BundleContext}. * * @param bundleContext the <tt>BundleContext</tt> in which the <tt>msofficecomm</tt> bundle is to * be started * @throws Exception if anything goes wrong while starting the <tt>msofficecomm</tt> bundle in the * specified <tt>BundleContext</tt> */ public void start(BundleContext bundleContext) throws Exception { // The msofficecomm bundle is available on Windows only. if (!OSUtils.IS_WINDOWS) return; if (logger.isInfoEnabled()) logger.info("MsOfficeComm plugin ... [STARTED]"); Messenger.start(bundleContext); boolean stopMessenger = true; try { int hresult = OutOfProcessServer.start(); if (logger.isInfoEnabled()) logger.info("MsOfficeComm started OutOfProcessServer HRESULT:" + hresult); if (hresult < 0) throw new RuntimeException("HRESULT " + hresult); else stopMessenger = false; } finally { if (stopMessenger) Messenger.stop(bundleContext); } } /** * Stops the <tt>msofficecomm</tt> bundle in a specific {@link BundleContext}. * * @param bundleContext the <tt>BundleContext</tt> in which the <tt>msofficecomm</tt> bundle is to * be stopped * @throws Exception if anything goes wrong while stopping the <tt>msofficecomm</tt> bundle in the * specified <tt>BundleContext</tt> */ public void stop(BundleContext bundleContext) throws Exception { // The msofficecomm bundle is available on Windows only. if (!OSUtils.IS_WINDOWS) return; try { int hresult = OutOfProcessServer.stop(); if (hresult < 0) throw new RuntimeException("HRESULT " + hresult); } finally { Messenger.stop(bundleContext); } if (logger.isInfoEnabled()) logger.info("MsOfficeComm plugin ... [UNREGISTERED]"); } }
/** * Intercepts and handles outgoing RTX (RFC-4588) packets for an <tt>RtpChannel</tt>. Depending on * whether the destination supports the RTX format (RFC-4588) either removes the RTX encapsulation * (thus effectively retransmitting packets bit-by-bit) or updates the sequence number and SSRC * fields taking into account the data sent to the particular <tt>RtpChannel</tt>. * * @author Boris Grozev */ public class RtxTransformer extends SinglePacketTransformerAdapter implements TransformEngine { /** * The <tt>Logger</tt> used by the <tt>RtxTransformer</tt> class and its instances to print debug * information. */ private static final Logger logger = Logger.getLogger(RtxTransformer.class); /** The <tt>RtpChannel</tt> for the transformer. */ private RtpChannel channel; /** Maps an RTX SSRC to the last RTP sequence number sent with that SSRC. */ private final Map<Long, Integer> rtxSequenceNumbers = new HashMap<>(); /** * Initializes a new <tt>RtxTransformer</tt> with a specific <tt>RtpChannel</tt>. * * @param channel the <tt>RtpChannel</tt> for the transformer. */ RtxTransformer(RtpChannel channel) { this.channel = channel; } /** Implements {@link PacketTransformer#transform(RawPacket[])}. {@inheritDoc} */ @Override public RawPacket transform(RawPacket pkt) { byte rtxPt; if (pkt != null && (rtxPt = channel.getRtxPayloadType()) != -1 && pkt.getPayloadType() == rtxPt) { pkt = handleRtxPacket(pkt); } return pkt; } /** * Handles an RTX packet and returns it. * * @param pkt the packet to handle. * @return the packet */ private RawPacket handleRtxPacket(RawPacket pkt) { boolean destinationSupportsRtx = channel.getRtxPayloadType() != -1; RawPacket mediaPacket = createMediaPacket(pkt); if (mediaPacket != null) { RawPacketCache cache = channel.getStream().getPacketCache(); if (cache != null) { cache.cachePacket(mediaPacket); } } if (destinationSupportsRtx) { pkt.setSequenceNumber( getNextRtxSequenceNumber(pkt.getSSRC() & 0xffffffffL, pkt.getSequenceNumber())); } else { // If the media packet was not reconstructed, drop the RTX packet // (by returning null). return mediaPacket; } return pkt; } /** * Creates a {@code RawPacket} which represents the original packet encapsulated in {@code pkt} * using the RTX format. * * @param pkt the packet from which to extract a media packet. * @return the extracted media packet. */ private RawPacket createMediaPacket(RawPacket pkt) { RawPacket mediaPacket = null; long rtxSsrc = pkt.getSSRC() & 0xffffffffL; // We need to know the SSRC paired with rtxSsrc *as seen by the // receiver (i.e. this.channel)*. However, we only store SSRCs // that endpoints *send* with. // We therefore assume that SSRC re-writing has not introduced any // new SSRCs and therefor the FID mappings known to the senders // also apply to receivers. RtpChannel sourceChannel = channel.getContent().findChannelByFidSsrc(rtxSsrc); if (sourceChannel != null) { long mediaSsrc = sourceChannel.getFidPairedSsrc(rtxSsrc); if (mediaSsrc != -1) { byte apt = sourceChannel.getRtxAssociatedPayloadType(); if (apt != -1) { mediaPacket = new RawPacket(pkt.getBuffer().clone(), pkt.getOffset(), pkt.getLength()); // Remove the RTX header by moving the RTP header two bytes // right. byte[] buf = mediaPacket.getBuffer(); int off = mediaPacket.getOffset(); System.arraycopy(buf, off, buf, off + 2, mediaPacket.getHeaderLength()); mediaPacket.setOffset(off + 2); mediaPacket.setLength(pkt.getLength() - 2); mediaPacket.setSSRC((int) mediaSsrc); mediaPacket.setSequenceNumber(pkt.getOriginalSequenceNumber()); mediaPacket.setPayloadType(apt); } } } return mediaPacket; } /** Implements {@link TransformEngine#getRTPTransformer()}. */ @Override public PacketTransformer getRTPTransformer() { return this; } /** Implements {@link TransformEngine#getRTCPTransformer()}. */ @Override public PacketTransformer getRTCPTransformer() { return null; } /** * Returns the sequence number to use for a specific RTX packet, which is based on the packet's * original sequence number. * * <p>Because we terminate the RTX format, and with simulcast we might translate RTX packets from * multiple SSRCs into the same SSRC, we keep count of the RTX packets (and their sequence * numbers) which we sent for each SSRC. * * @param ssrc the SSRC of the RTX stream for the packet. * @param defaultSeq the default sequence number to use in case we don't (yet) have any * information about <tt>ssrc</tt>. * @return the sequence number which should be used for the next RTX packet sent using SSRC * <tt>ssrc</tt>. */ private int getNextRtxSequenceNumber(long ssrc, int defaultSeq) { Integer seq; synchronized (rtxSequenceNumbers) { seq = rtxSequenceNumbers.get(ssrc); if (seq == null) seq = defaultSeq; else seq++; rtxSequenceNumbers.put(ssrc, seq); } return seq; } /** * Returns the next RTP sequence number to use for the RTX stream for a particular SSRC. * * @param ssrc the SSRC. * @return the next sequence number to use for SSRC <tt>ssrc</tt>. */ private int getNextRtxSequenceNumber(long ssrc) { return getNextRtxSequenceNumber(ssrc, new Random().nextInt(1 << 16)); } /** * Tries to find an SSRC paired with {@code ssrc} in an FID group in one of the channels from * {@link #channel}'s {@code Content}. Returns -1 on failure. * * @param ssrc the SSRC for which to find a paired SSRC. * @return An SSRC paired with {@code ssrc} in an FID group, or -1. */ private long getPairedSsrc(long ssrc) { RtpChannel sourceChannel = channel.getContent().findChannelByFidSsrc(ssrc); if (sourceChannel != null) { return sourceChannel.getFidPairedSsrc(ssrc); } return -1; } /** * Retransmits a packet to {@link #channel}. If the destination supports the RTX format, the * packet will be encapsulated in RTX, otherwise, the packet will be retransmitted as-is. * * @param pkt the packet to retransmit. * @param after the {@code TransformEngine} in the chain of {@code TransformEngine}s of the * associated {@code MediaStream} after which the injection of {@code pkt} is to begin * @return {@code true} if the packet was successfully retransmitted, {@code false} otherwise. */ public boolean retransmit(RawPacket pkt, TransformEngine after) { boolean destinationSupportsRtx = channel.getRtxPayloadType() != -1; boolean retransmitPlain; if (destinationSupportsRtx) { long rtxSsrc = getPairedSsrc(pkt.getSSRC()); if (rtxSsrc == -1) { logger.warn("Cannot find SSRC for RTX, retransmitting plain."); retransmitPlain = true; } else { retransmitPlain = !encapsulateInRtxAndTransmit(pkt, rtxSsrc); } } else { retransmitPlain = true; } if (retransmitPlain) { MediaStream mediaStream = channel.getStream(); if (mediaStream != null) { try { mediaStream.injectPacket(pkt, /* data */ true, after); } catch (TransmissionFailedException tfe) { logger.warn("Failed to retransmit a packet."); return false; } } } return true; } /** * Encapsulates {@code pkt} in the RTX format, using {@code rtxSsrc} as its SSRC, and transmits it * to {@link #channel} by injecting it in the {@code MediaStream}. * * @param pkt the packet to transmit. * @param rtxSsrc the SSRC for the RTX stream. * @return {@code true} if the packet was successfully retransmitted, {@code false} otherwise. */ private boolean encapsulateInRtxAndTransmit(RawPacket pkt, long rtxSsrc) { byte[] buf = pkt.getBuffer(); int len = pkt.getLength(); int off = pkt.getOffset(); byte[] newBuf = buf; if (buf.length < len + 2) { // FIXME The byte array newly allocated and assigned to newBuf must // be made known to pkt eventually. newBuf = new byte[len + 2]; } int osn = pkt.getSequenceNumber(); int headerLength = pkt.getHeaderLength(); int payloadLength = len - headerLength; System.arraycopy(buf, off, newBuf, 0, headerLength); // FIXME If newBuf is actually buf, then we will override the first two // bytes of the payload bellow. newBuf[headerLength] = (byte) ((osn >> 8) & 0xff); newBuf[headerLength + 1] = (byte) (osn & 0xff); System.arraycopy(buf, off + headerLength, newBuf, headerLength + 2, payloadLength); // FIXME We tried to extend the payload of pkt by two bytes above but // we never told pkt that its length has increased by these two bytes. MediaStream mediaStream = channel.getStream(); if (mediaStream != null) { pkt.setSSRC((int) rtxSsrc); // Only call getNextRtxSequenceNumber() when we're sure we're going // to transmit a packet, because it consumes a sequence number. pkt.setSequenceNumber(getNextRtxSequenceNumber(rtxSsrc)); try { mediaStream.injectPacket(pkt, /* data */ true, /* after */ null); } catch (TransmissionFailedException tfe) { logger.warn("Failed to transmit an RTX packet."); return false; } } return true; } }
/** * Class is a transport layer for WebRTC data channels. It consists of SCTP connection running on * top of ICE/DTLS layer. Manages WebRTC data channels. See * http://tools.ietf.org/html/draft-ietf-rtcweb-data-channel-08 for more info on WebRTC data * channels. * * <p>Control protocol: http://tools.ietf.org/html/draft-ietf-rtcweb-data-protocol-03 FIXME handle * closing of data channels(SCTP stream reset) * * @author Pawel Domas * @author Lyubomir Marinov * @author Boris Grozev */ public class SctpConnection extends Channel implements SctpDataCallback, SctpSocket.NotificationListener { /** Generator used to track debug IDs. */ private static int debugIdGen = -1; /** DTLS transport buffer size. Note: randomly chosen. */ private static final int DTLS_BUFFER_SIZE = 2048; /** Switch used for debugging SCTP traffic purposes. FIXME to be removed */ private static final boolean LOG_SCTP_PACKETS = false; /** The logger */ private static final Logger logger = Logger.getLogger(SctpConnection.class); /** * Message type used to acknowledge WebRTC data channel allocation on SCTP stream ID on which * <tt>MSG_OPEN_CHANNEL</tt> message arrives. */ private static final int MSG_CHANNEL_ACK = 0x2; private static final byte[] MSG_CHANNEL_ACK_BYTES = new byte[] {MSG_CHANNEL_ACK}; /** * Message with this type sent over control PPID in order to open new WebRTC data channel on SCTP * stream ID that this message is sent. */ private static final int MSG_OPEN_CHANNEL = 0x3; /** SCTP transport buffer size. */ private static final int SCTP_BUFFER_SIZE = DTLS_BUFFER_SIZE - 13; /** The pool of <tt>Thread</tt>s which run <tt>SctpConnection</tt>s. */ private static final ExecutorService threadPool = ExecutorUtils.newCachedThreadPool(true, SctpConnection.class.getName()); /** Payload protocol id that identifies binary data in WebRTC data channel. */ static final int WEB_RTC_PPID_BIN = 53; /** Payload protocol id for control data. Used for <tt>WebRtcDataStream</tt> allocation. */ static final int WEB_RTC_PPID_CTRL = 50; /** Payload protocol id that identifies text data UTF8 encoded in WebRTC data channels. */ static final int WEB_RTC_PPID_STRING = 51; /** * The <tt>String</tt> value of the <tt>Protocol</tt> field of the <tt>DATA_CHANNEL_OPEN</tt> * message. */ private static final String WEBRTC_DATA_CHANNEL_PROTOCOL = "http://jitsi.org/protocols/colibri"; private static synchronized int generateDebugId() { debugIdGen += 2; return debugIdGen; } /** * Indicates whether the STCP association is ready and has not been ended by a subsequent state * change. */ private boolean assocIsUp; /** Indicates if we have accepted incoming connection. */ private boolean acceptedIncomingConnection; /** Data channels mapped by SCTP stream identified(sid). */ private final Map<Integer, WebRtcDataStream> channels = new HashMap<Integer, WebRtcDataStream>(); /** Debug ID used to distinguish SCTP sockets in packet logs. */ private final int debugId; /** * The <tt>AsyncExecutor</tt> which is to asynchronously dispatch the events fired by this * instance in order to prevent possible listeners from blocking this <tt>SctpConnection</tt> in * general and {@link #sctpSocket} in particular for too long. The timeout of <tt>15</tt> is * chosen to be in accord with the time it takes to expire a <tt>Channel</tt>. */ private final AsyncExecutor<Runnable> eventDispatcher = new AsyncExecutor<Runnable>(15, TimeUnit.MILLISECONDS); /** Datagram socket for ICE/UDP layer. */ private IceSocketWrapper iceSocket; /** * List of <tt>WebRtcDataStreamListener</tt>s that will be notified whenever new WebRTC data * channel is opened. */ private final List<WebRtcDataStreamListener> listeners = new ArrayList<WebRtcDataStreamListener>(); /** Remote SCTP port. */ private final int remoteSctpPort; /** <tt>SctpSocket</tt> used for SCTP transport. */ private SctpSocket sctpSocket; /** * Flag prevents from starting this connection multiple times from {@link #maybeStartStream()}. */ private boolean started; /** * Initializes a new <tt>SctpConnection</tt> instance. * * @param id the string identifier of this connection instance * @param content the <tt>Content</tt> which is initializing the new instance * @param endpoint the <tt>Endpoint</tt> of newly created instance * @param remoteSctpPort the SCTP port used by remote peer * @param channelBundleId the ID of the channel-bundle this <tt>SctpConnection</tt> is to be a * part of (or <tt>null</tt> if no it is not to be a part of a channel-bundle). * @throws Exception if an error occurs while initializing the new instance */ public SctpConnection( String id, Content content, Endpoint endpoint, int remoteSctpPort, String channelBundleId) throws Exception { super(content, id, channelBundleId); setEndpoint(endpoint.getID()); this.remoteSctpPort = remoteSctpPort; this.debugId = generateDebugId(); } /** * Adds <tt>WebRtcDataStreamListener</tt> to the list of listeners. * * @param listener the <tt>WebRtcDataStreamListener</tt> to be added to the listeners list. */ public void addChannelListener(WebRtcDataStreamListener listener) { if (listener == null) { throw new NullPointerException("listener"); } else { synchronized (listeners) { if (!listeners.contains(listener)) { listeners.add(listener); } } } } /** {@inheritDoc} */ @Override protected void closeStream() throws IOException { try { synchronized (this) { assocIsUp = false; acceptedIncomingConnection = false; if (sctpSocket != null) { sctpSocket.close(); sctpSocket = null; } } } finally { if (iceSocket != null) { // It is now the responsibility of the transport manager to // close the socket. // iceUdpSocket.close(); } } } /** {@inheritDoc} */ @Override public void expire() { try { eventDispatcher.shutdown(); } finally { super.expire(); } } /** * Gets the <tt>WebRtcDataStreamListener</tt>s added to this instance. * * @return the <tt>WebRtcDataStreamListener</tt>s added to this instance or <tt>null</tt> if there * are no <tt>WebRtcDataStreamListener</tt>s added to this instance */ private WebRtcDataStreamListener[] getChannelListeners() { WebRtcDataStreamListener[] ls; synchronized (listeners) { if (listeners.isEmpty()) { ls = null; } else { ls = listeners.toArray(new WebRtcDataStreamListener[listeners.size()]); } } return ls; } /** * Returns default <tt>WebRtcDataStream</tt> if it's ready or <tt>null</tt> otherwise. * * @return <tt>WebRtcDataStream</tt> if it's ready or <tt>null</tt> otherwise. * @throws IOException */ public WebRtcDataStream getDefaultDataStream() throws IOException { WebRtcDataStream def; synchronized (this) { if (sctpSocket == null) { def = null; } else { // Channel that runs on sid 0 def = channels.get(0); if (def == null) { def = openChannel(0, 0, 0, 0, "default"); } // Pawel Domas: Must be acknowledged before use /* * XXX Lyubomir Marinov: We're always sending ordered. According * to "WebRTC Data Channel Establishment Protocol", we can start * sending messages containing user data after the * DATA_CHANNEL_OPEN message has been sent without waiting for * the reception of the corresponding DATA_CHANNEL_ACK message. */ // if (!def.isAcknowledged()) // def = null; } } return def; } /** * Returns <tt>true</tt> if this <tt>SctpConnection</tt> is connected to the remote peer and * operational. * * @return <tt>true</tt> if this <tt>SctpConnection</tt> is connected to the remote peer and * operational */ public boolean isReady() { return assocIsUp && acceptedIncomingConnection; } /** {@inheritDoc} */ @Override protected void maybeStartStream() throws IOException { // connector final StreamConnector connector = getStreamConnector(); if (connector == null) return; synchronized (this) { if (started) return; threadPool.execute( new Runnable() { @Override public void run() { try { Sctp.init(); runOnDtlsTransport(connector); } catch (IOException e) { logger.error(e, e); } finally { try { Sctp.finish(); } catch (IOException e) { logger.error("Failed to shutdown SCTP stack", e); } } } }); started = true; } } /** * Submits {@link #notifyChannelOpenedInEventDispatcher(WebRtcDataStream)} to {@link * #eventDispatcher} for asynchronous execution. * * @param dataChannel */ private void notifyChannelOpened(final WebRtcDataStream dataChannel) { if (!isExpired()) { eventDispatcher.execute( new Runnable() { @Override public void run() { notifyChannelOpenedInEventDispatcher(dataChannel); } }); } } private void notifyChannelOpenedInEventDispatcher(WebRtcDataStream dataChannel) { /* * When executing asynchronously in eventDispatcher, it is technically * possible that this SctpConnection may have expired by now. */ if (!isExpired()) { WebRtcDataStreamListener[] ls = getChannelListeners(); if (ls != null) { for (WebRtcDataStreamListener l : ls) { l.onChannelOpened(this, dataChannel); } } } } /** * Submits {@link #notifySctpConnectionReadyInEventDispatcher()} to {@link #eventDispatcher} for * asynchronous execution. */ private void notifySctpConnectionReady() { if (!isExpired()) { eventDispatcher.execute( new Runnable() { @Override public void run() { notifySctpConnectionReadyInEventDispatcher(); } }); } } /** * Notifies the <tt>WebRtcDataStreamListener</tt>s added to this instance that this * <tt>SctpConnection</tt> is ready i.e. it is connected to the remote peer and operational. */ private void notifySctpConnectionReadyInEventDispatcher() { /* * When executing asynchronously in eventDispatcher, it is technically * possible that this SctpConnection may have expired by now. */ if (!isExpired() && isReady()) { WebRtcDataStreamListener[] ls = getChannelListeners(); if (ls != null) { for (WebRtcDataStreamListener l : ls) { l.onSctpConnectionReady(this); } } } } /** * Handles control packet. * * @param data raw packet data that arrived on control PPID. * @param sid SCTP stream id on which the data has arrived. */ private synchronized void onCtrlPacket(byte[] data, int sid) throws IOException { ByteBuffer buffer = ByteBuffer.wrap(data); int messageType = /* 1 byte unsigned integer */ 0xFF & buffer.get(); if (messageType == MSG_CHANNEL_ACK) { if (logger.isDebugEnabled()) { logger.debug(getEndpoint().getID() + " ACK received SID: " + sid); } // Open channel ACK WebRtcDataStream channel = channels.get(sid); if (channel != null) { // Ack check prevents from firing multiple notifications // if we get more than one ACKs (by mistake/bug). if (!channel.isAcknowledged()) { channel.ackReceived(); notifyChannelOpened(channel); } else { logger.warn("Redundant ACK received for SID: " + sid); } } else { logger.error("No channel exists on sid: " + sid); } } else if (messageType == MSG_OPEN_CHANNEL) { int channelType = /* 1 byte unsigned integer */ 0xFF & buffer.get(); int priority = /* 2 bytes unsigned integer */ 0xFFFF & buffer.getShort(); long reliability = /* 4 bytes unsigned integer */ 0xFFFFFFFFL & buffer.getInt(); int labelLength = /* 2 bytes unsigned integer */ 0xFFFF & buffer.getShort(); int protocolLength = /* 2 bytes unsigned integer */ 0xFFFF & buffer.getShort(); String label; String protocol; if (labelLength == 0) { label = ""; } else { byte[] labelBytes = new byte[labelLength]; buffer.get(labelBytes); label = new String(labelBytes, "UTF-8"); } if (protocolLength == 0) { protocol = ""; } else { byte[] protocolBytes = new byte[protocolLength]; buffer.get(protocolBytes); protocol = new String(protocolBytes, "UTF-8"); } if (logger.isDebugEnabled()) { logger.debug( "!!! " + getEndpoint().getID() + " data channel open request on SID: " + sid + " type: " + channelType + " prio: " + priority + " reliab: " + reliability + " label: " + label + " proto: " + protocol); } if (channels.containsKey(sid)) { logger.error("Channel on sid: " + sid + " already exists"); } WebRtcDataStream newChannel = new WebRtcDataStream(sctpSocket, sid, label, true); channels.put(sid, newChannel); sendOpenChannelAck(sid); notifyChannelOpened(newChannel); } else { logger.error("Unexpected ctrl msg type: " + messageType); } } /** {@inheritDoc} */ @Override protected void onEndpointChanged(Endpoint oldValue, Endpoint newValue) { if (oldValue != null) oldValue.setSctpConnection(null); if (newValue != null) newValue.setSctpConnection(this); } /** Implements notification in order to track socket state. */ @Override public synchronized void onSctpNotification(SctpSocket socket, SctpNotification notification) { if (logger.isDebugEnabled()) { logger.debug("socket=" + socket + "; notification=" + notification); } switch (notification.sn_type) { case SctpNotification.SCTP_ASSOC_CHANGE: SctpNotification.AssociationChange assocChange = (SctpNotification.AssociationChange) notification; switch (assocChange.state) { case SctpNotification.AssociationChange.SCTP_COMM_UP: if (!assocIsUp) { boolean wasReady = isReady(); assocIsUp = true; if (isReady() && !wasReady) notifySctpConnectionReady(); } break; case SctpNotification.AssociationChange.SCTP_COMM_LOST: case SctpNotification.AssociationChange.SCTP_SHUTDOWN_COMP: case SctpNotification.AssociationChange.SCTP_CANT_STR_ASSOC: try { closeStream(); } catch (IOException e) { logger.error("Error closing SCTP socket", e); } break; } break; } } /** * {@inheritDoc} * * <p>SCTP input data callback. */ @Override public void onSctpPacket( byte[] data, int sid, int ssn, int tsn, long ppid, int context, int flags) { if (ppid == WEB_RTC_PPID_CTRL) { // Channel control PPID try { onCtrlPacket(data, sid); } catch (IOException e) { logger.error("IOException when processing ctrl packet", e); } } else if (ppid == WEB_RTC_PPID_STRING || ppid == WEB_RTC_PPID_BIN) { WebRtcDataStream channel; synchronized (this) { channel = channels.get(sid); } if (channel == null) { logger.error("No channel found for sid: " + sid); return; } if (ppid == WEB_RTC_PPID_STRING) { // WebRTC String String str; String charsetName = "UTF-8"; try { str = new String(data, charsetName); } catch (UnsupportedEncodingException uee) { logger.error("Unsupported charset encoding/name " + charsetName, uee); str = null; } channel.onStringMsg(str); } else { // WebRTC Binary channel.onBinaryMsg(data); } } else { logger.warn("Got message on unsupported PPID: " + ppid); } } /** * Opens new WebRTC data channel using specified parameters. * * @param type channel type as defined in control protocol description. Use 0 for "reliable". * @param prio channel priority. The higher the number, the lower the priority. * @param reliab Reliability Parameter<br> * This field is ignored if a reliable channel is used. If a partial reliable channel with * limited number of retransmissions is used, this field specifies the number of * retransmissions. If a partial reliable channel with limited lifetime is used, this field * specifies the maximum lifetime in milliseconds. The following table summarizes this:<br> * </br> * <p>+------------------------------------------------+------------------+ | Channel Type | * Reliability | | | Parameter | * +------------------------------------------------+------------------+ | * DATA_CHANNEL_RELIABLE | Ignored | | DATA_CHANNEL_RELIABLE_UNORDERED | Ignored | | * DATA_CHANNEL_PARTIAL_RELIABLE_REXMIT | Number of RTX | | * DATA_CHANNEL_PARTIAL_RELIABLE_REXMIT_UNORDERED | Number of RTX | | * DATA_CHANNEL_PARTIAL_RELIABLE_TIMED | Lifetime in ms | | * DATA_CHANNEL_PARTIAL_RELIABLE_TIMED_UNORDERED | Lifetime in ms | * +------------------------------------------------+------------------+ * @param sid SCTP stream id that will be used by new channel (it must not be already used). * @param label text label for the channel. * @return new instance of <tt>WebRtcDataStream</tt> that represents opened WebRTC data channel. * @throws IOException if IO error occurs. */ public synchronized WebRtcDataStream openChannel( int type, int prio, long reliab, int sid, String label) throws IOException { if (channels.containsKey(sid)) { throw new IOException("Channel on sid: " + sid + " already exists"); } // Label Length & Label byte[] labelBytes; int labelByteLength; if (label == null) { labelBytes = null; labelByteLength = 0; } else { labelBytes = label.getBytes("UTF-8"); labelByteLength = labelBytes.length; if (labelByteLength > 0xFFFF) labelByteLength = 0xFFFF; } // Protocol Length & Protocol String protocol = WEBRTC_DATA_CHANNEL_PROTOCOL; byte[] protocolBytes; int protocolByteLength; if (protocol == null) { protocolBytes = null; protocolByteLength = 0; } else { protocolBytes = protocol.getBytes("UTF-8"); protocolByteLength = protocolBytes.length; if (protocolByteLength > 0xFFFF) protocolByteLength = 0xFFFF; } ByteBuffer packet = ByteBuffer.allocate(12 + labelByteLength + protocolByteLength); // Message open new channel on current sid // Message Type packet.put((byte) MSG_OPEN_CHANNEL); // Channel Type packet.put((byte) type); // Priority packet.putShort((short) prio); // Reliability Parameter packet.putInt((int) reliab); // Label Length packet.putShort((short) labelByteLength); // Protocol Length packet.putShort((short) protocolByteLength); // Label if (labelByteLength != 0) { packet.put(labelBytes, 0, labelByteLength); } // Protocol if (protocolByteLength != 0) { packet.put(protocolBytes, 0, protocolByteLength); } int sentCount = sctpSocket.send(packet.array(), true, sid, WEB_RTC_PPID_CTRL); if (sentCount != packet.capacity()) { throw new IOException("Failed to open new chanel on sid: " + sid); } WebRtcDataStream channel = new WebRtcDataStream(sctpSocket, sid, label, false); channels.put(sid, channel); return channel; } /** * Removes <tt>WebRtcDataStreamListener</tt> from the list of listeners. * * @param listener the <tt>WebRtcDataStreamListener</tt> to be removed from the listeners list. */ public void removeChannelListener(WebRtcDataStreamListener listener) { if (listener != null) { synchronized (listeners) { listeners.remove(listener); } } } private void runOnDtlsTransport(StreamConnector connector) throws IOException { DtlsControlImpl dtlsControl = (DtlsControlImpl) getTransportManager().getDtlsControl(this); DtlsTransformEngine engine = dtlsControl.getTransformEngine(); final DtlsPacketTransformer transformer = (DtlsPacketTransformer) engine.getRTPTransformer(); byte[] receiveBuffer = new byte[SCTP_BUFFER_SIZE]; if (LOG_SCTP_PACKETS) { System.setProperty( ConfigurationService.PNAME_SC_HOME_DIR_LOCATION, System.getProperty("java.io.tmpdir")); System.setProperty( ConfigurationService.PNAME_SC_HOME_DIR_NAME, SctpConnection.class.getName()); } synchronized (this) { // FIXME local SCTP port is hardcoded in bridge offer SDP (Jitsi // Meet) sctpSocket = Sctp.createSocket(5000); assocIsUp = false; acceptedIncomingConnection = false; } // Implement output network link for SCTP stack on DTLS transport sctpSocket.setLink( new NetworkLink() { @Override public void onConnOut(SctpSocket s, byte[] packet) throws IOException { if (LOG_SCTP_PACKETS) { LibJitsi.getPacketLoggingService() .logPacket( PacketLoggingService.ProtocolName.ICE4J, new byte[] {0, 0, 0, (byte) debugId}, 5000, new byte[] {0, 0, 0, (byte) (debugId + 1)}, remoteSctpPort, PacketLoggingService.TransportName.UDP, true, packet); } // Send through DTLS transport transformer.sendApplicationData(packet, 0, packet.length); } }); if (logger.isDebugEnabled()) { logger.debug("Connecting SCTP to port: " + remoteSctpPort + " to " + getEndpoint().getID()); } sctpSocket.setNotificationListener(this); sctpSocket.listen(); // FIXME manage threads threadPool.execute( new Runnable() { @Override public void run() { SctpSocket sctpSocket = null; try { // sctpSocket is set to null on close sctpSocket = SctpConnection.this.sctpSocket; while (sctpSocket != null) { if (sctpSocket.accept()) { acceptedIncomingConnection = true; break; } Thread.sleep(100); sctpSocket = SctpConnection.this.sctpSocket; } if (isReady()) { notifySctpConnectionReady(); } } catch (Exception e) { logger.error("Error accepting SCTP connection", e); } if (sctpSocket == null && logger.isInfoEnabled()) { logger.info( "SctpConnection " + getID() + " closed" + " before SctpSocket accept()-ed."); } } }); // Notify that from now on SCTP connection is considered functional sctpSocket.setDataCallback(this); // Setup iceSocket DatagramSocket datagramSocket = connector.getDataSocket(); if (datagramSocket != null) { this.iceSocket = new IceUdpSocketWrapper(datagramSocket); } else { this.iceSocket = new IceTcpSocketWrapper(connector.getDataTCPSocket()); } DatagramPacket rcvPacket = new DatagramPacket(receiveBuffer, 0, receiveBuffer.length); // Receive loop, breaks when SCTP socket is closed try { do { iceSocket.receive(rcvPacket); RawPacket raw = new RawPacket(rcvPacket.getData(), rcvPacket.getOffset(), rcvPacket.getLength()); raw = transformer.reverseTransform(raw); // Check for app data if (raw == null) continue; if (LOG_SCTP_PACKETS) { LibJitsi.getPacketLoggingService() .logPacket( PacketLoggingService.ProtocolName.ICE4J, new byte[] {0, 0, 0, (byte) (debugId + 1)}, remoteSctpPort, new byte[] {0, 0, 0, (byte) debugId}, 5000, PacketLoggingService.TransportName.UDP, false, raw.getBuffer(), raw.getOffset(), raw.getLength()); } // Pass network packet to SCTP stack sctpSocket.onConnIn(raw.getBuffer(), raw.getOffset(), raw.getLength()); } while (true); } finally { // Eventually, close the socket although it should happen from // expire(). synchronized (this) { assocIsUp = false; acceptedIncomingConnection = false; if (sctpSocket != null) { sctpSocket.close(); sctpSocket = null; } } } } /** * Sends acknowledgment for open channel request on given SCTP stream ID. * * @param sid SCTP stream identifier to be used for sending ack. */ private void sendOpenChannelAck(int sid) throws IOException { // Send ACK byte[] ack = MSG_CHANNEL_ACK_BYTES; int sendAck = sctpSocket.send(ack, true, sid, WEB_RTC_PPID_CTRL); if (sendAck != ack.length) { logger.error("Failed to send open channel confirmation"); } } /** * {@inheritDoc} * * <p>Creates a <tt>TransportManager</tt> instance suitable for an <tt>SctpConnection</tt> (e.g. * with 1 component only). */ protected TransportManager createTransportManager(String xmlNamespace) throws IOException { if (IceUdpTransportPacketExtension.NAMESPACE.equals(xmlNamespace)) { Content content = getContent(); return new IceUdpTransportManager( content.getConference(), isInitiator(), 1 /* num components */, content.getName()); } else if (RawUdpTransportPacketExtension.NAMESPACE.equals(xmlNamespace)) { // TODO: support RawUdp once RawUdpTransportManager is updated // return new RawUdpTransportManager(this); throw new IllegalArgumentException("Unsupported Jingle transport " + xmlNamespace); } else { throw new IllegalArgumentException("Unsupported Jingle transport " + xmlNamespace); } } }
/** * Discovers and registers DirectShow video capture devices with JMF. * * @author Sebastien Vincent */ public class DirectShowSystem extends DeviceSystem { /** * The <tt>Logger</tt> used by the <tt>DirectShowSystem</tt> class and its instances for logging * output. */ private static final Logger logger = Logger.getLogger(DirectShowSystem.class); /** The protocol of the <tt>MediaLocator</tt>s identifying QuickTime/QTKit capture devices. */ private static final String LOCATOR_PROTOCOL = LOCATOR_PROTOCOL_DIRECTSHOW; /** * Constructor. Discover and register DirectShow capture devices with JMF. * * @throws Exception if anything goes wrong while discovering and registering DirectShow capture * defines with JMF */ public DirectShowSystem() throws Exception { super(MediaType.VIDEO, LOCATOR_PROTOCOL); } protected void doInitialize() throws Exception { DSCaptureDevice devices[] = DSManager.getInstance().getCaptureDevices(); boolean captureDeviceInfoIsAdded = false; for (int i = 0, count = (devices == null) ? 0 : devices.length; i < count; i++) { long pixelFormat = devices[i].getFormat().getPixelFormat(); int ffmpegPixFmt = (int) DataSource.getFFmpegPixFmt(pixelFormat); Format format = null; if (ffmpegPixFmt != FFmpeg.PIX_FMT_NONE) { format = new AVFrameFormat(ffmpegPixFmt, (int) pixelFormat); } else { logger.warn( "No support for this webcam: " + devices[i].getName() + "(format " + pixelFormat + " not supported)"); continue; } if (logger.isInfoEnabled()) { for (DSFormat f : devices[i].getSupportedFormats()) { if (f.getWidth() != 0 && f.getHeight() != 0) logger.info( "Webcam available resolution for " + devices[i].getName() + ":" + f.getWidth() + "x" + f.getHeight()); } } CaptureDeviceInfo device = new CaptureDeviceInfo( devices[i].getName(), new MediaLocator(LOCATOR_PROTOCOL + ':' + devices[i].getName()), new Format[] {format}); if (logger.isInfoEnabled()) logger.info("Found[" + i + "]: " + device.getName()); CaptureDeviceManager.addDevice(device); captureDeviceInfoIsAdded = true; } if (captureDeviceInfoIsAdded && !MediaServiceImpl.isJmfRegistryDisableLoad()) CaptureDeviceManager.commit(); DSManager.dispose(); } }
/** * Initializes a new <tt>RtxTransformer</tt> with a specific <tt>RtpChannel</tt>. * * @param channel the <tt>RtpChannel</tt> for the transformer. */ RtxTransformer(RtpChannel channel) { super(RTPPacketPredicate.INSTANCE); this.channel = channel; this.logger = Logger.getLogger(classLogger, channel.getContent().getConference().getLogger()); }
/** * Intercepts RTX (RFC-4588) packets coming from an {@link RtpChannel}, and removes their RTX * encapsulation. Allows packets to be retransmitted to a channel (using the RTX format if the * destination supports it). * * @author Boris Grozev * @author George Politis */ public class RtxTransformer extends SinglePacketTransformerAdapter implements TransformEngine { /** * The {@link Logger} used by the {@link RtxTransformer} class to print debug information. Note * that {@link Conference} instances should use {@link #logger} instead. */ private static final Logger classLogger = Logger.getLogger(RtxTransformer.class); /** The <tt>RtpChannel</tt> for the transformer. */ private RtpChannel channel; /** Maps an RTX SSRC to the last RTP sequence number sent with that SSRC. */ private final Map<Long, Integer> rtxSequenceNumbers = new HashMap<>(); /** The {@link Logger} to be used by this instance to print debug information. */ private final Logger logger; /** * The payload type number configured for RTX (RFC-4588), or -1 if none is configured (the other * end does not support rtx). */ private byte rtxPayloadType = -1; /** The "associated payload type" number for RTX. */ private byte rtxAssociatedPayloadType = -1; /** * Initializes a new <tt>RtxTransformer</tt> with a specific <tt>RtpChannel</tt>. * * @param channel the <tt>RtpChannel</tt> for the transformer. */ RtxTransformer(RtpChannel channel) { super(RTPPacketPredicate.INSTANCE); this.channel = channel; this.logger = Logger.getLogger(classLogger, channel.getContent().getConference().getLogger()); } /** Implements {@link PacketTransformer#transform(RawPacket[])}. {@inheritDoc} */ @Override public RawPacket reverseTransform(RawPacket pkt) { if (isRtx(pkt)) { pkt = deRtx(pkt); } return pkt; } /** * Determines whether {@code pkt} is an RTX packet. * * @param pkt the packet to check. * @return {@code true} iff {@code pkt} is an RTX packet. */ private boolean isRtx(RawPacket pkt) { byte rtxPt = rtxPayloadType; return rtxPt != -1 && rtxPt == pkt.getPayloadType(); } /** * Removes the RTX encapsulation from a packet. * * @param pkt the packet to remove the RTX encapsulation from. * @return the original media packet represented by {@code pkt}, or null if we couldn't * reconstruct the original packet. */ private RawPacket deRtx(RawPacket pkt) { boolean success = false; if (pkt.getPayloadLength() - pkt.getPaddingSize() < 2) { // We need at least 2 bytes to read the OSN field. if (logger.isDebugEnabled()) { logger.debug("Dropping an incoming RTX packet with padding only: " + pkt); } return null; } long mediaSsrc = getPrimarySsrc(pkt); if (mediaSsrc != -1) { if (rtxAssociatedPayloadType != -1) { int osn = pkt.getOriginalSequenceNumber(); // Remove the RTX header by moving the RTP header two bytes // right. byte[] buf = pkt.getBuffer(); int off = pkt.getOffset(); System.arraycopy(buf, off, buf, off + 2, pkt.getHeaderLength()); pkt.setOffset(off + 2); pkt.setLength(pkt.getLength() - 2); pkt.setSSRC((int) mediaSsrc); pkt.setSequenceNumber(osn); pkt.setPayloadType(rtxAssociatedPayloadType); success = true; } else { logger.warn( "RTX packet received, but no APT is defined. Packet " + "SSRC " + pkt.getSSRCAsLong() + ", associated media" + " SSRC " + mediaSsrc); } } // If we failed to handle the RTX packet, drop it. return success ? pkt : null; } /** Implements {@link TransformEngine#getRTPTransformer()}. */ @Override public PacketTransformer getRTPTransformer() { return this; } /** Implements {@link TransformEngine#getRTCPTransformer()}. */ @Override public PacketTransformer getRTCPTransformer() { return null; } /** * Returns the sequence number to use for a specific RTX packet, which is based on the packet's * original sequence number. * * <p>Because we terminate the RTX format, and with simulcast we might translate RTX packets from * multiple SSRCs into the same SSRC, we keep count of the RTX packets (and their sequence * numbers) which we sent for each SSRC. * * @param ssrc the SSRC of the RTX stream for the packet. * @return the sequence number which should be used for the next RTX packet sent using SSRC * <tt>ssrc</tt>. */ private int getNextRtxSequenceNumber(long ssrc) { Integer seq; synchronized (rtxSequenceNumbers) { seq = rtxSequenceNumbers.get(ssrc); if (seq == null) seq = new Random().nextInt(0xffff); else seq++; rtxSequenceNumbers.put(ssrc, seq); } return seq; } /** * Tries to find an SSRC paired with {@code ssrc} in an FID group in one of the channels from * {@link #channel}'s {@code Content}. Returns -1 on failure. * * @param pkt the {@code RawPacket} that holds the RTP packet for which to find a paired SSRC. * @return An SSRC paired with {@code ssrc} in an FID group, or -1. */ private long getRtxSsrc(RawPacket pkt) { StreamRTPManager receiveRTPManager = channel.getStream().getRTPTranslator().findStreamRTPManagerByReceiveSSRC(pkt.getSSRC()); MediaStreamTrackReceiver receiver = null; if (receiveRTPManager != null) { MediaStream receiveStream = receiveRTPManager.getMediaStream(); if (receiveStream != null) { receiver = receiveStream.getMediaStreamTrackReceiver(); } } if (receiver == null) { return -1; } RTPEncoding encoding = receiver.resolveRTPEncoding(pkt); if (encoding == null) { logger.warn( "encoding_not_found" + ",stream_hash=" + channel.getStream().hashCode() + " ssrc=" + pkt.getSSRCAsLong()); return -1; } return encoding.getRTXSSRC(); } /** * Retransmits a packet to {@link #channel}. If the destination supports the RTX format, the * packet will be encapsulated in RTX, otherwise, the packet will be retransmitted as-is. * * @param pkt the packet to retransmit. * @param after the {@code TransformEngine} in the chain of {@code TransformEngine}s of the * associated {@code MediaStream} after which the injection of {@code pkt} is to begin * @return {@code true} if the packet was successfully retransmitted, {@code false} otherwise. */ public boolean retransmit(RawPacket pkt, TransformEngine after) { boolean destinationSupportsRtx = rtxPayloadType != -1; boolean retransmitPlain; if (destinationSupportsRtx) { long rtxSsrc = getRtxSsrc(pkt); if (rtxSsrc == -1) { logger.warn( "Cannot find SSRC for RTX, retransmitting plain. " + "SSRC=" + pkt.getSSRCAsLong()); retransmitPlain = true; } else { retransmitPlain = !encapsulateInRtxAndTransmit(pkt, rtxSsrc, after); } } else { retransmitPlain = true; } if (retransmitPlain) { MediaStream mediaStream = channel.getStream(); if (mediaStream != null) { try { mediaStream.injectPacket(pkt, /* data */ true, after); } catch (TransmissionFailedException tfe) { logger.warn("Failed to retransmit a packet."); return false; } } } return true; } /** * Notifies this instance that the dynamic payload types of the associated {@link MediaStream} * have changed. */ public void onDynamicPayloadTypesChanged() { rtxPayloadType = -1; rtxAssociatedPayloadType = -1; MediaStream mediaStream = channel.getStream(); Map<Byte, MediaFormat> mediaFormatMap = mediaStream.getDynamicRTPPayloadTypes(); Iterator<Map.Entry<Byte, MediaFormat>> it = mediaFormatMap.entrySet().iterator(); while (it.hasNext() && rtxPayloadType == -1) { Map.Entry<Byte, MediaFormat> entry = it.next(); MediaFormat format = entry.getValue(); if (!Constants.RTX.equalsIgnoreCase(format.getEncoding())) { continue; } // XXX(gp) we freak out if multiple codecs with RTX support are // present. rtxPayloadType = entry.getKey(); rtxAssociatedPayloadType = Byte.parseByte(format.getFormatParameters().get("apt")); } } /** * Encapsulates {@code pkt} in the RTX format, using {@code rtxSsrc} as its SSRC, and transmits it * to {@link #channel} by injecting it in the {@code MediaStream}. * * @param pkt the packet to transmit. * @param rtxSsrc the SSRC for the RTX stream. * @param after the {@code TransformEngine} in the chain of {@code TransformEngine}s of the * associated {@code MediaStream} after which the injection of {@code pkt} is to begin * @return {@code true} if the packet was successfully retransmitted, {@code false} otherwise. */ private boolean encapsulateInRtxAndTransmit(RawPacket pkt, long rtxSsrc, TransformEngine after) { byte[] buf = pkt.getBuffer(); int len = pkt.getLength(); int off = pkt.getOffset(); byte[] newBuf = new byte[len + 2]; RawPacket rtxPkt = new RawPacket(newBuf, 0, len + 2); int osn = pkt.getSequenceNumber(); int headerLength = pkt.getHeaderLength(); int payloadLength = pkt.getPayloadLength(); // Copy the header. System.arraycopy(buf, off, newBuf, 0, headerLength); // Set the OSN field. newBuf[headerLength] = (byte) ((osn >> 8) & 0xff); newBuf[headerLength + 1] = (byte) (osn & 0xff); // Copy the payload. System.arraycopy(buf, off + headerLength, newBuf, headerLength + 2, payloadLength); MediaStream mediaStream = channel.getStream(); if (mediaStream != null) { rtxPkt.setSSRC((int) rtxSsrc); rtxPkt.setPayloadType(rtxPayloadType); // Only call getNextRtxSequenceNumber() when we're sure we're going // to transmit a packet, because it consumes a sequence number. rtxPkt.setSequenceNumber(getNextRtxSequenceNumber(rtxSsrc)); try { mediaStream.injectPacket(rtxPkt, /* data */ true, after); } catch (TransmissionFailedException tfe) { logger.warn("Failed to transmit an RTX packet."); return false; } } return true; } /** * Returns the SSRC paired with <tt>ssrc</tt> in an FID source-group, if any. If none is found, * returns -1. * * @return the SSRC paired with <tt>ssrc</tt> in an FID source-group, if any. If none is found, * returns -1. */ private long getPrimarySsrc(RawPacket pkt) { MediaStreamTrackReceiver receiver = channel.getStream().getMediaStreamTrackReceiver(); if (receiver == null) { if (logger.isDebugEnabled()) { logger.debug("Dropping an incoming RTX packet from an unknown source."); } return -1; } RTPEncoding encoding = receiver.resolveRTPEncoding(pkt); if (encoding == null) { if (logger.isDebugEnabled()) { logger.debug("Dropping an incoming RTX packet from an unknown source."); } return -1; } return encoding.getPrimarySSRC(); } }
/** * Implements <tt>MediaFormatFactory</tt> for the JMF <tt>Format</tt> types. * * @author Lyubomir Marinov */ public class MediaFormatFactoryImpl implements MediaFormatFactory { /** * The <tt>Logger</tt> used by the <tt>MediaFormatFactoryImpl</tt> class and its instances for * logging output. */ private static final Logger logger = Logger.getLogger(MediaFormatFactoryImpl.class); /** * Creates an unknown <tt>MediaFormat</tt>. * * @param type <tt>MediaType</tt> * @return unknown <tt>MediaFormat</tt> */ public MediaFormat createUnknownMediaFormat(MediaType type) { Format unknown = null; /* * FIXME Why is a VideoFormat instance created for MediaType.AUDIO and * an AudioFormat instance for MediaType.VIDEO? */ if (type.equals(MediaType.AUDIO)) unknown = new VideoFormat("unknown"); else if (type.equals(MediaType.VIDEO)) unknown = new AudioFormat("unknown"); return MediaFormatImpl.createInstance(unknown); } /** * Creates a <tt>MediaFormat</tt> for the specified <tt>encoding</tt> with default clock rate and * set of format parameters. If <tt>encoding</tt> is known to this <tt>MediaFormatFactory</tt>, * returns a <tt>MediaFormat</tt> which is either an <tt>AudioMediaFormat</tt> or a * <tt>VideoMediaFormat</tt> instance. Otherwise, returns <tt>null</tt>. * * @param encoding the well-known encoding (name) to create a <tt>MediaFormat</tt> for * @return a <tt>MediaFormat</tt> with the specified <tt>encoding</tt> which is either an * <tt>AudioMediaFormat</tt> or a <tt>VideoMediaFormat</tt> instance if <tt>encoding</tt> is * known to this <tt>MediaFormatFactory</tt>; otherwise, <tt>null</tt> * @see MediaFormatFactory#createMediaFormat(String) */ public MediaFormat createMediaFormat(String encoding) { return createMediaFormat(encoding, CLOCK_RATE_NOT_SPECIFIED); } /** * Creates a <tt>MediaFormat</tt> for the specified RTP payload type with default clock rate and * set of format parameters. If <tt>rtpPayloadType</tt> is known to this * <tt>MediaFormatFactory</tt>, returns a <tt>MediaFormat</tt> which is either an * <tt>AudioMediaFormat</tt> or a <tt>VideoMediaFormat</tt> instance. Otherwise, returns * <tt>null</tt>. * * @param rtpPayloadType the RTP payload type of the <tt>MediaFormat</tt> to create * @return a <tt>MediaFormat</tt> with the specified <tt>rtpPayloadType</tt> which is either an * <tt>AudioMediaFormat</tt> or a <tt>VideoMediaFormat</tt> instance if * <tt>rtpPayloadType</tt> is known to this <tt>MediaFormatFactory</tt>; otherwise, * <tt>null</tt> * @see MediaFormatFactory#createMediaFormat(byte) */ public MediaFormat createMediaFormat(byte rtpPayloadType) { /* * We know which are the MediaFormat instances with the specified * rtpPayloadType but we cannot directly return them because they do not * reflect the user's configuration with respect to being enabled and * disabled. */ for (MediaFormat rtpPayloadTypeMediaFormat : MediaUtils.getMediaFormats(rtpPayloadType)) { MediaFormat mediaFormat = createMediaFormat( rtpPayloadTypeMediaFormat.getEncoding(), rtpPayloadTypeMediaFormat.getClockRate()); if (mediaFormat != null) return mediaFormat; } return null; } /** * Creates a <tt>MediaFormat</tt> for the specified <tt>encoding</tt> with the specified * <tt>clockRate</tt> and a default set of format parameters. If <tt>encoding</tt> is known to * this <tt>MediaFormatFactory</tt>, returns a <tt>MediaFormat</tt> which is either an * <tt>AudioMediaFormat</tt> or a <tt>VideoMediaFormat</tt> instance. Otherwise, returns * <tt>null</tt>. * * @param encoding the well-known encoding (name) to create a <tt>MediaFormat</tt> for * @param clockRate the clock rate in Hz to create a <tt>MediaFormat</tt> for * @return a <tt>MediaFormat</tt> with the specified <tt>encoding</tt> and <tt>clockRate</tt> * which is either an <tt>AudioMediaFormat</tt> or a <tt>VideoMediaFormat</tt> instance if * <tt>encoding</tt> is known to this <tt>MediaFormatFactory</tt>; otherwise, <tt>null</tt> * @see MediaFormatFactory#createMediaFormat(String, double) */ public MediaFormat createMediaFormat(String encoding, double clockRate) { return createMediaFormat(encoding, clockRate, 1); } /** * Creates a <tt>MediaFormat</tt> for the specified <tt>encoding</tt>, <tt>clockRate</tt> and * <tt>channels</tt> and a default set of format parameters. If <tt>encoding</tt> is known to this * <tt>MediaFormatFactory</tt>, returns a <tt>MediaFormat</tt> which is either an * <tt>AudioMediaFormat</tt> or a <tt>VideoMediaFormat</tt> instance. Otherwise, returns * <tt>null</tt>. * * @param encoding the well-known encoding (name) to create a <tt>MediaFormat</tt> for * @param clockRate the clock rate in Hz to create a <tt>MediaFormat</tt> for * @param channels the number of available channels (1 for mono, 2 for stereo) if it makes sense * for the <tt>MediaFormat</tt> with the specified <tt>encoding</tt>; otherwise, ignored * @return a <tt>MediaFormat</tt> with the specified <tt>encoding</tt>, <tt>clockRate</tt> and * <tt>channels</tt> and a default set of format parameters which is either an * <tt>AudioMediaFormat</tt> or a <tt>VideoMediaFormat</tt> instance if <tt>encoding</tt> is * known to this <tt>MediaFormatFactory</tt>; otherwise, <tt>null</tt> * @see MediaFormatFactory#createMediaFormat(String, double, int) */ public MediaFormat createMediaFormat(String encoding, double clockRate, int channels) { return createMediaFormat(encoding, clockRate, channels, null); } private MediaFormat createMediaFormat( String encoding, double clockRate, int channels, Map<String, String> fmtps) { for (MediaFormat format : getSupportedMediaFormats(encoding, clockRate)) { /* * The mediaType, encoding and clockRate properties are sure to * match because format is the result of the search for encoding and * clockRate. We just want to make sure that the channels and the * format parameters match. */ if (format.matches( format.getMediaType(), format.getEncoding(), format.getClockRate(), channels, fmtps)) return format; } return null; } /** * Creates a <tt>MediaFormat</tt> for the specified <tt>encoding</tt>, <tt>clockRate</tt> and set * of format parameters. If <tt>encoding</tt> is known to this <tt>MediaFormatFactory</tt>, * returns a <tt>MediaFormat</tt> which is either an <tt>AudioMediaFormat</tt> or a * <tt>VideoMediaFormat</tt> instance. Otherwise, returns <tt>null</tt>. * * @param encoding the well-known encoding (name) to create a <tt>MediaFormat</tt> for * @param clockRate the clock rate in Hz to create a <tt>MediaFormat</tt> for * @param formatParams any codec specific parameters which have been received via SIP/SDP or * XMPP/Jingle * @return a <tt>MediaFormat</tt> with the specified <tt>encoding</tt>, <tt>clockRate</tt> and set * of format parameters which is either an <tt>AudioMediaFormat</tt> or a * <tt>VideoMediaFormat</tt> instance if <tt>encoding</tt> is known to this * <tt>MediaFormatFactory</tt>; otherwise, <tt>null</tt> * @see MediaFormatFactory#createMediaFormat(String, double, Map, Map) */ public MediaFormat createMediaFormat( String encoding, double clockRate, Map<String, String> formatParams, Map<String, String> advancedParams) { return createMediaFormat(encoding, clockRate, 1, -1, formatParams, advancedParams); } /** * Creates a <tt>MediaFormat</tt> for the specified <tt>encoding</tt>, <tt>clockRate</tt>, * <tt>channels</tt> and set of format parameters. If <tt>encoding</tt> is known to this * <tt>MediaFormatFactory</tt>, returns a <tt>MediaFormat</tt> which is either an * <tt>AudioMediaFormat</tt> or a <tt>VideoMediaFormat</tt> instance. Otherwise, returns * <tt>null</tt>. * * @param encoding the well-known encoding (name) to create a <tt>MediaFormat</tt> for * @param clockRate the clock rate in Hz to create a <tt>MediaFormat</tt> for * @param frameRate the frame rate in number of frames per second to create a <tt>MediaFormat</tt> * for * @param channels the number of available channels (1 for mono, 2 for stereo) if it makes sense * for the <tt>MediaFormat</tt> with the specified <tt>encoding</tt>; otherwise, ignored * @param formatParams any codec specific parameters which have been received via SIP/SDP or * XMPP/Jingle * @param advancedParams any parameters which have been received via SIP/SDP or XMPP/Jingle * @return a <tt>MediaFormat</tt> with the specified <tt>encoding</tt>, <tt>clockRate</tt>, * <tt>channels</tt> and set of format parameters which is either an <tt>AudioMediaFormat</tt> * or a <tt>VideoMediaFormat</tt> instance if <tt>encoding</tt> is known to this * <tt>MediaFormatFactory</tt>; otherwise, <tt>null</tt> * @see MediaFormatFactory#createMediaFormat(String, double, int, float, Map, Map) */ public MediaFormat createMediaFormat( String encoding, double clockRate, int channels, float frameRate, Map<String, String> formatParams, Map<String, String> advancedParams) { MediaFormat mediaFormat = createMediaFormat(encoding, clockRate, channels, formatParams); if (mediaFormat == null) return null; /* * MediaFormatImpl is immutable so if the caller wants to change the * format parameters and/or the advanced attributes, we'll have to * create a new MediaFormatImpl. */ Map<String, String> formatParameters = null; Map<String, String> advancedParameters = null; if ((formatParams != null) && !formatParams.isEmpty()) formatParameters = formatParams; if ((advancedParams != null) && !advancedParams.isEmpty()) advancedParameters = advancedParams; if ((formatParameters != null) || (advancedParameters != null)) { switch (mediaFormat.getMediaType()) { case AUDIO: mediaFormat = new AudioMediaFormatImpl( ((AudioMediaFormatImpl) mediaFormat).getFormat(), formatParameters, advancedParameters); break; case VIDEO: VideoMediaFormatImpl videoMediaFormatImpl = (VideoMediaFormatImpl) mediaFormat; /* * If the format of VideoMediaFormatImpl is * a ParameterizedVideoFormat, it's possible for the format * parameters of that ParameterizedVideoFormat and of the new * VideoMediaFormatImpl (to be created) to be out of sync. While * it's not technically perfect, it should be practically safe * for the format parameters which distinguish VideoFormats with * the same encoding and clock rate because mediaFormat has * already been created in sync with formatParams (with respect * to the format parameters which distinguish VideoFormats with * the same encoding and clock rate). */ mediaFormat = new VideoMediaFormatImpl( videoMediaFormatImpl.getFormat(), videoMediaFormatImpl.getClockRate(), frameRate, formatParameters, advancedParameters); break; default: mediaFormat = null; } } return mediaFormat; } /** * Creates a <tt>MediaFormat</tt> either for the specified <tt>rtpPayloadType</tt> or for the * specified <tt>encoding</tt>, <tt>clockRate</tt>, <tt>channels</tt> and set of format * parameters. If <tt>encoding</tt> is known to this <tt>MediaFormatFactory</tt>, ignores * <tt>rtpPayloadType</tt> and returns a <tt>MediaFormat</tt> which is either an * <tt>AudioMediaFormat</tt> or a <tt>VideoMediaFormat</tt> instance. If <tt>rtpPayloadType</tt> * is not {@link MediaFormat#RTP_PAYLOAD_TYPE_UNKNOWN} and <tt>encoding</tt> is <tt>null</tt>, * uses the encoding associated with <tt>rtpPayloadType</tt>. * * @param rtpPayloadType the RTP payload type to create a <tt>MediaFormat</tt> for; {@link * MediaFormat#RTP_PAYLOAD_TYPE_UNKNOWN} if <tt>encoding</tt> is not <tt>null</tt>. If * <tt>rtpPayloadType</tt> is not <tt>MediaFormat#RTP_PAYLOAD_TYPE_UNKNOWN</tt> and * <tt>encoding</tt> is not <tt>null</tt>, <tt>rtpPayloadType</tt> is ignored * @param encoding the well-known encoding (name) to create a <tt>MediaFormat</tt> for; * <tt>null</tt> * @param clockRate the clock rate in Hz to create a <tt>MediaFormat</tt> for * @param frameRate the frame rate in number of frames per second to create a <tt>MediaFormat</tt> * for * @param channels the number of available channels (1 for mono, 2 for stereo) if it makes sense * for the <tt>MediaFormat</tt> with the specified <tt>encoding</tt>; otherwise, ignored * @param formatParams any codec specific parameters which have been received via SIP/SDP or * XMPP/Jingle * @param advancedParams any parameters which have been received via SIP/SDP or XMPP/Jingle * @return a <tt>MediaFormat</tt> with the specified <tt>encoding</tt>, <tt>clockRate</tt>, * <tt>channels</tt> and set of format parameters which is either an <tt>AudioMediaFormat</tt> * or a <tt>VideoMediaFormat</tt> instance if <tt>encoding</tt> is known to this * <tt>MediaFormatFactory</tt>; otherwise, <tt>null</tt> */ public MediaFormat createMediaFormat( byte rtpPayloadType, String encoding, double clockRate, int channels, float frameRate, Map<String, String> formatParams, Map<String, String> advancedParams) { /* * If rtpPayloadType is specified, use it only to figure out encoding * and/or clockRate in case either one of them is unknown. */ if ((MediaFormat.RTP_PAYLOAD_TYPE_UNKNOWN != rtpPayloadType) && ((encoding == null) || (CLOCK_RATE_NOT_SPECIFIED == clockRate))) { MediaFormat[] rtpPayloadTypeMediaFormats = MediaUtils.getMediaFormats(rtpPayloadType); if (rtpPayloadTypeMediaFormats.length > 0) { if (encoding == null) encoding = rtpPayloadTypeMediaFormats[0].getEncoding(); // Assign or check the clock rate. if (CLOCK_RATE_NOT_SPECIFIED == clockRate) clockRate = rtpPayloadTypeMediaFormats[0].getClockRate(); else { boolean clockRateIsValid = false; for (MediaFormat rtpPayloadTypeMediaFormat : rtpPayloadTypeMediaFormats) if (rtpPayloadTypeMediaFormat.getEncoding().equals(encoding) && (rtpPayloadTypeMediaFormat.getClockRate() == clockRate)) { clockRateIsValid = true; break; } if (!clockRateIsValid) return null; } } } return createMediaFormat( encoding, clockRate, channels, frameRate, formatParams, advancedParams); } /** * Gets the <tt>MediaFormat</tt>s among the specified <tt>mediaFormats</tt> which have the * specified <tt>encoding</tt> and, optionally, <tt>clockRate</tt>. * * @param mediaFormats the <tt>MediaFormat</tt>s from which to filter out only the ones which have * the specified <tt>encoding</tt> and, optionally, <tt>clockRate</tt> * @param encoding the well-known encoding (name) of the <tt>MediaFormat</tt>s to be retrieved * @param clockRate the clock rate of the <tt>MediaFormat</tt>s to be retrieved; {@link * #CLOCK_RATE_NOT_SPECIFIED} if any clock rate is acceptable * @return a <tt>List</tt> of the <tt>MediaFormat</tt>s among <tt>mediaFormats</tt> which have the * specified <tt>encoding</tt> and, optionally, <tt>clockRate</tt> */ private List<MediaFormat> getMatchingMediaFormats( MediaFormat[] mediaFormats, String encoding, double clockRate) { /* * XXX Use String#equalsIgnoreCase(String) because some clients transmit * some of the codecs starting with capital letters. */ /* * As per RFC 3551.4.5.2, because of a mistake in RFC 1890 and for * backward compatibility, G.722 should always be announced as 8000 even * though it is wideband. So, if someone is looking for G722/16000, * then: Forgive them, for they know not what they do! */ if ("G722".equalsIgnoreCase(encoding) && (16000 == clockRate)) { clockRate = 8000; if (logger.isInfoEnabled()) logger.info("Suppressing erroneous 16000 announcement for G.722"); } List<MediaFormat> supportedMediaFormats = new ArrayList<MediaFormat>(); for (MediaFormat mediaFormat : mediaFormats) { if (mediaFormat.getEncoding().equalsIgnoreCase(encoding) && ((CLOCK_RATE_NOT_SPECIFIED == clockRate) || (mediaFormat.getClockRate() == clockRate))) { supportedMediaFormats.add(mediaFormat); } } return supportedMediaFormats; } /** * Gets the <tt>MediaFormat</tt>s supported by this <tt>MediaFormatFactory</tt> and the * <tt>MediaService</tt> associated with it and having the specified <tt>encoding</tt> and, * optionally, <tt>clockRate</tt>. * * @param encoding the well-known encoding (name) of the <tt>MediaFormat</tt>s to be retrieved * @param clockRate the clock rate of the <tt>MediaFormat</tt>s to be retrieved; {@link * #CLOCK_RATE_NOT_SPECIFIED} if any clock rate is acceptable * @return a <tt>List</tt> of the <tt>MediaFormat</tt>s supported by the <tt>MediaService</tt> * associated with this <tt>MediaFormatFactory</tt> and having the specified encoding and, * optionally, clock rate */ private List<MediaFormat> getSupportedMediaFormats(String encoding, double clockRate) { EncodingConfiguration encodingConfiguration = NeomediaServiceUtils.getMediaServiceImpl().getCurrentEncodingConfiguration(); List<MediaFormat> supportedMediaFormats = getMatchingMediaFormats( encodingConfiguration.getAllEncodings(MediaType.AUDIO), encoding, clockRate); if (supportedMediaFormats.isEmpty()) supportedMediaFormats = getMatchingMediaFormats( encodingConfiguration.getAllEncodings(MediaType.VIDEO), encoding, clockRate); return supportedMediaFormats; } }
/** * An <tt>Iterator</tt> that iterates RED blocks (primary and non-primary). * * @author George Politis */ public class REDBlockIterator implements Iterator<REDBlock> { /** * The <tt>Logger</tt> used by the <tt>REDBlockIterator</tt> class and its instances to print * debug information. */ private static final Logger logger = Logger.getLogger(REDBlockIterator.class); /** The byte buffer that holds the RED payload that this instance is dissecting. */ private final byte[] buffer; /** The offset in the buffer where the RED payload begin. */ private final int offset; /** The length of the RED payload in the buffer. */ private final int length; /** The number of RED blocks inside the RED payload. */ private int cntRemainingBlocks = -1; /** The offset of the next RED block header inside the RED payload. */ private int offNextBlockHeader = -1; /** The offset of the next RED block payload inside the RED payload. */ private int offNextBlockPayload = -1; /** * Matches a RED block in the RED payload. * * @param predicate the predicate that is used to match the RED block. * @param buffer the byte buffer that contains the RED payload. * @param offset the offset in the buffer where the RED payload begins. * @param length the length of the RED payload. * @return the first RED block that matches the given predicate, null otherwise. */ public static REDBlock matchFirst( Predicate<REDBlock> predicate, byte[] buffer, int offset, int length) { if (isMultiBlock(buffer, offset, length)) { REDBlockIterator it = new REDBlockIterator(buffer, offset, length); while (it.hasNext()) { REDBlock b = it.next(); if (b != null && predicate.test(b)) { return b; } } return null; } else { REDBlock b = getPrimaryBlock(buffer, offset, length); if (b != null && predicate.test(b)) { return b; } else { return null; } } } /** * Gets the first RED block in the RED payload. * * @param buffer the byte buffer that contains the RED payload. * @param offset the offset in the buffer where the RED payload begins. * @param length the length of the RED payload. * @return the primary RED block if it exists, null otherwise. */ public static REDBlock getPrimaryBlock(byte[] buffer, int offset, int length) { // Chrome is typically sending RED packets with a single block carrying // either VP8 or FEC. This is unusual, and probably wrong as it messes // up the sequence numbers and packet loss computations but it's just // the way it is. Here we detect this situation and avoid looping // through the blocks if there is a single block. if (isMultiBlock(buffer, offset, length)) { REDBlock block = null; REDBlockIterator redBlockIterator = new REDBlockIterator(buffer, offset, length); while (redBlockIterator.hasNext()) { block = redBlockIterator.next(); } if (block == null) { logger.warn("No primary block found."); } return block; } else { if (buffer == null || offset < 0 || length < 0 || buffer.length < offset + length) { logger.warn( "Prevented an array out of bounds exception: " + "buffer length: " + buffer.length + ", offset: " + offset + ", len: " + length); return null; } byte blockPT = (byte) (buffer[offset] & 0x7f); int blockOff = offset + 1; // + 1 for the primary block header. int blockLen = length - blockOff; if (buffer.length < blockOff + blockLen) { logger.warn("Primary block doesn't fit in RED packet."); return null; } return new REDBlock(buffer, blockOff, blockLen, blockPT); } } /** * Returns {@code true} if a specific RED packet contains multiple blocks; * {@code false}, otherwise. * * @param buffer the byte buffer that contains the RED payload. * @param offset the offset in the buffer where the RED payload begins. * @param length the length of the RED payload. * @return {@code true if {@pkt} contains multiple RED blocks; otherwise, * {@code false} */ public static boolean isMultiBlock(byte[] buffer, int offset, int length) { if (buffer == null || buffer.length == 0) { logger.warn("The buffer appears to be empty."); return false; } if (offset < 0 || buffer.length <= offset) { logger.warn("Prevented array out of bounds exception."); return false; } return (buffer[offset] & 0x80) != 0; } /** * Ctor. * * @param buffer the byte buffer that contains the RED payload. * @param offset the offset in the buffer where the RED payload begins. * @param length the length of the RED payload. */ public REDBlockIterator(byte[] buffer, int offset, int length) { this.buffer = buffer; this.offset = offset; this.length = length; this.initialize(); } @Override public boolean hasNext() { return cntRemainingBlocks > 0; } @Override public REDBlock next() { if (!hasNext()) { throw new NoSuchElementException(); } cntRemainingBlocks--; if (buffer == null || buffer.length <= offNextBlockHeader) { logger.warn("Prevented an array out of bounds exception."); return null; } byte blockPT = (byte) (buffer[offNextBlockHeader] & 0x7f); int blockLen; if (hasNext()) { if (buffer.length < offNextBlockHeader + 4) { logger.warn("Prevented an array out of bounds exception."); return null; } // 0 1 2 3 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // |F| block PT | timestamp offset | block length | // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ blockLen = (buffer[offNextBlockHeader + 2] & 0x03) << 8 | (buffer[offNextBlockHeader + 3] & 0xFF); offNextBlockHeader += 4; // next RED header offNextBlockPayload += blockLen; } else { // 0 1 2 3 4 5 6 7 // +-+-+-+-+-+-+-+-+ // |0| Block PT | // +-+-+-+-+-+-+-+-+ blockLen = length - (offNextBlockPayload + 1); offNextBlockHeader = -1; offNextBlockPayload = -1; } return new REDBlock(buffer, offNextBlockPayload, blockLen, blockPT); } @Override public void remove() { throw new UnsupportedOperationException(); } /** Initializes this instance. */ private void initialize() { if (buffer == null || buffer.length == 0) { return; } // beginning of RTP payload offNextBlockHeader = offset; // Number of packets inside RED. cntRemainingBlocks = 0; // 0 1 2 3 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // |F| block PT | timestamp offset | block length | // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ while ((buffer[offNextBlockHeader] & 0x80) != 0) { cntRemainingBlocks++; offNextBlockHeader += 4; } // 0 1 2 3 4 5 6 7 // +-+-+-+-+-+-+-+-+ // |0| Block PT | // +-+-+-+-+-+-+-+-+ if (buffer.length >= offNextBlockHeader + 8) { cntRemainingBlocks++; } // back to beginning of RTP payload offNextBlockHeader = offset; if (cntRemainingBlocks > 0) { offNextBlockPayload = offNextBlockHeader + (cntRemainingBlocks - 1) * 4 + 1; } } }
/** * Capture desktop screen either via native code (JNI) if available or by using * <tt>java.awt.Robot</tt>. * * @see java.awt.Robot * @author Sebastien Vincent */ public class DesktopInteractImpl implements DesktopInteract { /** * The <tt>Logger</tt> used by the <tt>DesktopInteractImpl</tt> class and its instances for * logging output. */ private static final Logger logger = Logger.getLogger(DesktopInteractImpl.class); /** Screen capture robot. */ private Robot robot = null; /** * Constructor. * * @throws AWTException if platform configuration does not allow low-level input control * @throws SecurityException if Robot creation is not permitted */ public DesktopInteractImpl() throws AWTException, SecurityException { robot = new Robot(); } /** * Capture the full desktop screen using native grabber. * * <p>Contrary to other captureScreen method, it only returns raw bytes and not * <tt>BufferedImage</tt>. It is done in order to limit slow operation such as converting ARGB * images (uint32_t) to bytes especially for big big screen. For example a 1920x1200 desktop * consumes 9 MB of memory for grabbing and another 9 MB array for conversion operation. * * @param display index of display * @param output output buffer to store bytes in. Be sure that output length is sufficient * @return true if success, false if JNI error or output length too short */ public boolean captureScreen(int display, byte output[]) { Dimension dim = Toolkit.getDefaultToolkit().getScreenSize(); return captureScreen(display, 0, 0, dim.width, dim.height, output); } /** * Capture the full desktop screen using native grabber. * * <p>Contrary to other captureScreen method, it only returns raw bytes and not * <tt>BufferedImage</tt>. It is done in order to limit slow operation such as converting ARGB * images (uint32_t) to bytes especially for big big screen. For example a 1920x1200 desktop * consumes 9 MB of memory for grabbing and another 9 MB array for conversion operation. * * @param display index of display * @param buffer native output buffer to store bytes in. Be sure that output length is sufficient * @param bufferLength length of native buffer * @return true if success, false if JNI error or output length too short */ public boolean captureScreen(int display, long buffer, int bufferLength) { Dimension dim = Toolkit.getDefaultToolkit().getScreenSize(); return captureScreen(display, 0, 0, dim.width, dim.height, buffer, bufferLength); } /** * Capture a part of the desktop screen using native grabber. * * <p>Contrary to other captureScreen method, it only returns raw bytes and not * <tt>BufferedImage</tt>. It is done in order to limit slow operation such as converting ARGB * images (uint32_t) to bytes especially for big big screen. For example a 1920x1200 desktop * consumes 9 MB of memory for grabbing and another 9 MB array for conversion operation. * * @param display index of display * @param x x position to start capture * @param y y position to start capture * @param width capture width * @param height capture height * @param output output buffer to store bytes in. Be sure that output length is sufficient * @return true if success, false if JNI error or output length too short */ public boolean captureScreen(int display, int x, int y, int width, int height, byte[] output) { return (OSUtils.IS_LINUX || OSUtils.IS_MAC || OSUtils.IS_WINDOWS) && ScreenCapture.grabScreen(display, x, y, width, height, output); } /** * Capture a part of the desktop screen using native grabber. * * <p>Contrary to other captureScreen method, it only returns raw bytes and not * <tt>BufferedImage</tt>. It is done in order to limit slow operation such as converting ARGB * images (uint32_t) to bytes especially for big big screen. For example a 1920x1200 desktop * consumes 9 MB of memory for grabbing and another 9 MB array for conversion operation. * * @param display index of display * @param x x position to start capture * @param y y position to start capture * @param width capture width * @param height capture height * @param buffer native output buffer to store bytes in. Be sure that output length is sufficient * @param bufferLength length of native buffer * @return true if success, false if JNI error or output length too short */ public boolean captureScreen( int display, int x, int y, int width, int height, long buffer, int bufferLength) { return (OSUtils.IS_LINUX || OSUtils.IS_MAC || OSUtils.IS_WINDOWS) && ScreenCapture.grabScreen(display, x, y, width, height, buffer, bufferLength); } /** * Capture the full desktop screen using <tt>java.awt.Robot</tt>. * * @return <tt>BufferedImage</tt> of the desktop screen */ public BufferedImage captureScreen() { Dimension dim = Toolkit.getDefaultToolkit().getScreenSize(); return captureScreen(0, 0, dim.width, dim.height); } /** * Capture a part of the desktop screen using <tt>java.awt.Robot</tt>. * * @param x x position to start capture * @param y y position to start capture * @param width capture width * @param height capture height * @return <tt>BufferedImage</tt> of a part of the desktop screen or null if Robot problem */ public BufferedImage captureScreen(int x, int y, int width, int height) { BufferedImage img = null; Rectangle rect = null; if (robot == null) { /* Robot has not been created so abort */ return null; } if (logger.isInfoEnabled()) logger.info("Begin capture: " + System.nanoTime()); rect = new Rectangle(x, y, width, height); img = robot.createScreenCapture(rect); if (logger.isInfoEnabled()) logger.info("End capture: " + System.nanoTime()); return img; } }
/** * The <tt>BasicRTCPTerminationStrategy</tt> "gateways" PLIs, FIRs, NACKs, etc, in the sense that it * replaces the packet sender information in the PLIs, FIRs, NACKs, etc and it generates its own * SRs/RRs/REMBs based on information that it collects and from information found in FMJ. * * @author George Politis */ public class BasicRTCPTerminationStrategy extends MediaStreamRTCPTerminationStrategy { /** * The <tt>Logger</tt> used by the <tt>BasicRTCPTerminationStrategy</tt> class and its instances * to print debug information. */ private static final Logger logger = Logger.getLogger(BasicRTCPTerminationStrategy.class); /** The maximum number of RTCP report blocks that an RR or an SR can contain. */ private static final int MAX_RTCP_REPORT_BLOCKS = 31; /** The minimum number of RTCP report blocks that an RR or an SR can contain. */ private static final int MIN_RTCP_REPORT_BLOCKS = 0; /** * A reusable array that can be used to hold up to <tt>MAX_RTCP_REPORT_BLOCKS</tt> * <tt>RTCPReportBlock</tt>s. It is assumed that a single thread is accessing this field at a * given time. */ private final RTCPReportBlock[] MAX_RTCP_REPORT_BLOCKS_ARRAY = new RTCPReportBlock[MAX_RTCP_REPORT_BLOCKS]; /** A reusable array that holds 0 <tt>RTCPReportBlock</tt>s. */ private static final RTCPReportBlock[] MIN_RTCP_REPORTS_BLOCKS_ARRAY = new RTCPReportBlock[MIN_RTCP_REPORT_BLOCKS]; /** * The RTP stats map that holds RTP statistics about all the streams that this * <tt>BasicRTCPTerminationStrategy</tt> (as a <tt>TransformEngine</tt>) has observed. */ private final RTPStatsMap rtpStatsMap = new RTPStatsMap(); /** * The RTCP stats map that holds RTCP statistics about all the streams that this * <tt>BasicRTCPTerminationStrategy</tt> (as a <tt>TransformEngine</tt>) has observed. */ private final RemoteClockEstimator remoteClockEstimator = new RemoteClockEstimator(); /** * The <tt>CNameRegistry</tt> holds the CNAMEs that this RTCP termination, seen as a * TransformEngine, has seen. */ private final CNAMERegistry cnameRegistry = new CNAMERegistry(); /** The parser that parses <tt>RawPacket</tt>s to <tt>RTCPCompoundPacket</tt>s. */ private final RTCPPacketParserEx parser = new RTCPPacketParserEx(); /** The generator that generates <tt>RawPacket</tt>s from <tt>RTCPCompoundPacket</tt>s. */ private final RTCPGenerator generator = new RTCPGenerator(); /** * The RTCP feedback gateway responsible for dropping all the stuff that we support in this RTCP * termination strategy. */ private final FeedbackGateway feedbackGateway = new FeedbackGateway(); /** The garbage collector that cleans-up the state of this RTCP termination strategy. */ private final GarbageCollector garbageCollector = new GarbageCollector(); /** The RTP <tt>PacketTransformer</tt> of this <tt>BasicRTCPTerminationStrategy</tt>. */ private final PacketTransformer rtpTransformer = new SinglePacketTransformer() { /** {@inheritDoc} */ @Override public RawPacket transform(RawPacket pkt) { // Update our RTP stats map (packets/octet sent). rtpStatsMap.apply(pkt); return pkt; } /** {@inheritDoc} */ @Override public RawPacket reverseTransform(RawPacket pkt) { // Let everything pass through. return pkt; } }; /** The RTCP <tt>PacketTransformer</tt> of this <tt>BasicRTCPTerminationStrategy</tt>. */ private final PacketTransformer rtcpTransformer = new SinglePacketTransformer() { /** {@inheritDoc} */ @Override public RawPacket transform(RawPacket pkt) { if (pkt == null) { return pkt; } RTCPCompoundPacket inPacket; try { inPacket = (RTCPCompoundPacket) parser.parse(pkt.getBuffer(), pkt.getOffset(), pkt.getLength()); } catch (BadFormatException e) { logger.warn("Failed to terminate an RTCP packet. " + "Dropping packet."); return null; } // Update our RTCP stats map (timestamps). This operation is // read-only. remoteClockEstimator.apply(inPacket); cnameRegistry.update(inPacket); // Remove SRs and RRs from the RTCP packet. pkt = feedbackGateway.gateway(inPacket); return pkt; } /** {@inheritDoc} */ @Override public RawPacket reverseTransform(RawPacket pkt) { // Let everything pass through. return pkt; } }; /** A counter that counts the number of times we've sent "full-blown" SDES. */ private int sdesCounter = 0; /** {@inheritDoc} */ @Override public PacketTransformer getRTPTransformer() { return rtpTransformer; } /** {@inheritDoc} */ @Override public PacketTransformer getRTCPTransformer() { return rtcpTransformer; } /** {@inheritDoc} */ @Override public RawPacket report() { garbageCollector.cleanup(); // TODO Compound RTCP packets should not exceed the MTU of the network // path. // // An individual RTP participant should send only one compound RTCP // packet per report interval in order for the RTCP bandwidth per // participant to be estimated correctly, except when the compound // RTCP packet is split for partial encryption. // // If there are too many sources to fit all the necessary RR packets // into one compound RTCP packet without exceeding the maximum // transmission unit (MTU) of the network path, then only the subset // that will fit into one MTU should be included in each interval. The // subsets should be selected round-robin across multiple intervals so // that all sources are reported. // // It is impossible to know in advance what the MTU of path will be. // There are various algorithms for experimenting to find out, but many // devices do not properly implement (or deliberately ignore) the // necessary standards so it all comes down to trial and error. For that // reason, we can just guess 1200 or 1500 bytes per message. long time = System.currentTimeMillis(); Collection<RTCPPacket> packets = new ArrayList<RTCPPacket>(); // First, we build the RRs. Collection<RTCPRRPacket> rrPackets = makeRTCPRRPackets(time); if (rrPackets != null && rrPackets.size() != 0) { packets.addAll(rrPackets); } // Next, we build the SRs. Collection<RTCPSRPacket> srPackets = makeRTCPSRPackets(time); if (srPackets != null && srPackets.size() != 0) { packets.addAll(srPackets); } // Bail out if we have nothing to report. if (packets.size() == 0) { return null; } // Next, we build the REMB. RTCPREMBPacket rembPacket = makeRTCPREMBPacket(); if (rembPacket != null) { packets.add(rembPacket); } // Finally, we add an SDES packet. RTCPSDESPacket sdesPacket = makeSDESPacket(); if (sdesPacket != null) { packets.add(sdesPacket); } // Prepare the <tt>RTCPCompoundPacket</tt> to return. RTCPPacket rtcpPackets[] = packets.toArray(new RTCPPacket[packets.size()]); RTCPCompoundPacket cp = new RTCPCompoundPacket(rtcpPackets); // Build the <tt>RTCPCompoundPacket</tt> and return the // <tt>RawPacket</tt> to inject to the <tt>MediaStream</tt>. return generator.apply(cp); } /** * (attempts) to get the local SSRC that will be used in the media sender SSRC field of the RTCP * reports. TAG(cat4-local-ssrc-hurricane) * * @return */ private long getLocalSSRC() { return getStream().getStreamRTPManager().getLocalSSRC(); } /** * Makes <tt>RTCPRRPacket</tt>s using information in FMJ. * * @param time * @return A <tt>Collection</tt> of <tt>RTCPRRPacket</tt>s to inject to the <tt>MediaStream</tt>. */ private Collection<RTCPRRPacket> makeRTCPRRPackets(long time) { RTCPReportBlock[] reportBlocks = makeRTCPReportBlocks(time); if (reportBlocks == null || reportBlocks.length == 0) { return null; } Collection<RTCPRRPacket> rrPackets = new ArrayList<RTCPRRPacket>(); // We use the stream's local source ID (SSRC) as the SSRC of packet // sender. long streamSSRC = getLocalSSRC(); // Since a maximum of 31 reception report blocks will fit in an SR // or RR packet, additional RR packets SHOULD be stacked after the // initial SR or RR packet as needed to contain the reception // reports for all sources heard during the interval since the last // report. if (reportBlocks.length > MAX_RTCP_REPORT_BLOCKS) { for (int offset = 0; offset < reportBlocks.length; offset += MAX_RTCP_REPORT_BLOCKS) { RTCPReportBlock[] blocks = (reportBlocks.length - offset < MAX_RTCP_REPORT_BLOCKS) ? new RTCPReportBlock[reportBlocks.length - offset] : MAX_RTCP_REPORT_BLOCKS_ARRAY; System.arraycopy(reportBlocks, offset, blocks, 0, blocks.length); RTCPRRPacket rr = new RTCPRRPacket((int) streamSSRC, blocks); rrPackets.add(rr); } } else { RTCPRRPacket rr = new RTCPRRPacket((int) streamSSRC, reportBlocks); rrPackets.add(rr); } return rrPackets; } /** * Iterate through all the <tt>ReceiveStream</tt>s that this <tt>MediaStream</tt> has and make * <tt>RTCPReportBlock</tt>s for all of them. * * @param time * @return */ private RTCPReportBlock[] makeRTCPReportBlocks(long time) { MediaStream stream = getStream(); // State validation. if (stream == null) { logger.warn("stream is null."); return MIN_RTCP_REPORTS_BLOCKS_ARRAY; } StreamRTPManager streamRTPManager = stream.getStreamRTPManager(); if (streamRTPManager == null) { logger.warn("streamRTPManager is null."); return MIN_RTCP_REPORTS_BLOCKS_ARRAY; } Collection<ReceiveStream> receiveStreams = streamRTPManager.getReceiveStreams(); if (receiveStreams == null || receiveStreams.size() == 0) { logger.info("There are no receive streams to build report " + "blocks for."); return MIN_RTCP_REPORTS_BLOCKS_ARRAY; } SSRCCache cache = streamRTPManager.getSSRCCache(); if (cache == null) { logger.info("cache is null."); return MIN_RTCP_REPORTS_BLOCKS_ARRAY; } // Create the return object. Collection<RTCPReportBlock> rtcpReportBlocks = new ArrayList<RTCPReportBlock>(); // Populate the return object. for (ReceiveStream receiveStream : receiveStreams) { // Dig into the guts of FMJ and get the stats for the current // receiveStream. SSRCInfo info = cache.cache.get((int) receiveStream.getSSRC()); if (!info.ours && info.sender) { RTCPReportBlock rtcpReportBlock = info.makeReceiverReport(time); rtcpReportBlocks.add(rtcpReportBlock); } } return rtcpReportBlocks.toArray(new RTCPReportBlock[rtcpReportBlocks.size()]); } /** * Makes an <tt>RTCPREMBPacket</tt> that provides receiver feedback to the endpoint from which we * receive. * * @return an <tt>RTCPREMBPacket</tt> that provides receiver feedback to the endpoint from which * we receive. */ private RTCPREMBPacket makeRTCPREMBPacket() { // TODO we should only make REMBs if REMB support has been advertised. // Destination RemoteBitrateEstimator remoteBitrateEstimator = ((VideoMediaStream) getStream()).getRemoteBitrateEstimator(); Collection<Integer> ssrcs = remoteBitrateEstimator.getSsrcs(); // TODO(gp) intersect with SSRCs from signaled simulcast layers // NOTE(gp) The Google Congestion Control algorithm (sender side) // doesn't seem to care about the SSRCs in the dest field. long[] dest = new long[ssrcs.size()]; int i = 0; for (Integer ssrc : ssrcs) dest[i++] = ssrc & 0xFFFFFFFFL; // Exp & mantissa long bitrate = remoteBitrateEstimator.getLatestEstimate(); if (bitrate == -1) return null; if (logger.isDebugEnabled()) logger.debug("Estimated bitrate: " + bitrate); // Create and return the packet. // We use the stream's local source ID (SSRC) as the SSRC of packet // sender. long streamSSRC = getLocalSSRC(); return new RTCPREMBPacket(streamSSRC, /* mediaSSRC */ 0L, bitrate, dest); } /** * Makes <tt>RTCPSRPacket</tt>s for all the RTP streams that we're sending. * * @return a <tt>List</tt> of <tt>RTCPSRPacket</tt> for all the RTP streams that we're sending. */ private Collection<RTCPSRPacket> makeRTCPSRPackets(long time) { Collection<RTCPSRPacket> srPackets = new ArrayList<RTCPSRPacket>(); for (RTPStatsEntry rtpStatsEntry : rtpStatsMap.values()) { int ssrc = rtpStatsEntry.getSsrc(); RemoteClock estimate = remoteClockEstimator.estimate(ssrc, time); if (estimate == null) { // We're not going to go far without an estimate.. continue; } RTCPSRPacket srPacket = new RTCPSRPacket(ssrc, MIN_RTCP_REPORTS_BLOCKS_ARRAY); // Set the NTP timestamp for this SR. long estimatedRemoteTime = estimate.getRemoteTime(); long secs = estimatedRemoteTime / 1000L; double fraction = (estimatedRemoteTime - secs * 1000L) / 1000D; srPacket.ntptimestamplsw = (int) (fraction * 4294967296D); srPacket.ntptimestampmsw = secs; // Set the RTP timestamp. srPacket.rtptimestamp = estimate.getRtpTimestamp(); // Fill-in packet and octet send count. srPacket.packetcount = rtpStatsEntry.getPacketsSent(); srPacket.octetcount = rtpStatsEntry.getBytesSent(); srPackets.add(srPacket); } return srPackets; } /** * Makes <tt>RTCPSDES</tt> packets for all the RTP streams that we're sending. * * @return a <tt>List</tt> of <tt>RTCPSDES</tt> packets for all the RTP streams that we're * sending. */ private RTCPSDESPacket makeSDESPacket() { Collection<RTCPSDES> sdesChunks = new ArrayList<RTCPSDES>(); // Create an SDES for our own SSRC. RTCPSDES ownSDES = new RTCPSDES(); SSRCInfo ourinfo = getStream().getStreamRTPManager().getSSRCCache().ourssrc; ownSDES.ssrc = (int) getLocalSSRC(); Collection<RTCPSDESItem> ownItems = new ArrayList<RTCPSDESItem>(); ownItems.add(new RTCPSDESItem(RTCPSDESItem.CNAME, ourinfo.sourceInfo.getCNAME())); // Throttle the source description bandwidth. See RFC3550#6.3.9 // Allocation of Source Description Bandwidth. if (sdesCounter % 3 == 0) { if (ourinfo.name != null && ourinfo.name.getDescription() != null) ownItems.add(new RTCPSDESItem(RTCPSDESItem.NAME, ourinfo.name.getDescription())); if (ourinfo.email != null && ourinfo.email.getDescription() != null) ownItems.add(new RTCPSDESItem(RTCPSDESItem.EMAIL, ourinfo.email.getDescription())); if (ourinfo.phone != null && ourinfo.phone.getDescription() != null) ownItems.add(new RTCPSDESItem(RTCPSDESItem.PHONE, ourinfo.phone.getDescription())); if (ourinfo.loc != null && ourinfo.loc.getDescription() != null) ownItems.add(new RTCPSDESItem(RTCPSDESItem.LOC, ourinfo.loc.getDescription())); if (ourinfo.tool != null && ourinfo.tool.getDescription() != null) ownItems.add(new RTCPSDESItem(RTCPSDESItem.TOOL, ourinfo.tool.getDescription())); if (ourinfo.note != null && ourinfo.note.getDescription() != null) ownItems.add(new RTCPSDESItem(RTCPSDESItem.NOTE, ourinfo.note.getDescription())); } sdesCounter++; ownSDES.items = ownItems.toArray(new RTCPSDESItem[ownItems.size()]); sdesChunks.add(ownSDES); for (Map.Entry<Integer, byte[]> entry : cnameRegistry.entrySet()) { RTCPSDES sdes = new RTCPSDES(); sdes.ssrc = entry.getKey(); sdes.items = new RTCPSDESItem[] {new RTCPSDESItem(RTCPSDESItem.CNAME, entry.getValue())}; } RTCPSDES[] sps = sdesChunks.toArray(new RTCPSDES[sdesChunks.size()]); RTCPSDESPacket sp = new RTCPSDESPacket(sps); return sp; } /** * The garbage collector runs at each reporting interval and cleans up the data structures of this * RTCP termination strategy based on the SSRCs that the owner <tt>MediaStream</tt> is still * sending. */ class GarbageCollector { public void cleanup() { // TODO We need to fix TAG(cat4-local-ssrc-hurricane) and // TAG(cat4-remote-ssrc-hurricane) first. The idea is to remove // from our data structures everything that is not listed in as // a remote SSRC. } } /** * Removes receiver and sender feedback from RTCP packets. Typically this means dropping SRs, RR * report blocks and REMBs. It needs to pass through PLIs, FIRs, NACKs, etc. */ class FeedbackGateway { /** * Removes receiver and sender feedback from RTCP packets. * * @param inPacket the <tt>RTCPCompoundPacket</tt> to filter. * @return the filtered <tt>RawPacket</tt>. */ public RawPacket gateway(RTCPCompoundPacket inPacket) { if (inPacket == null || inPacket.packets == null || inPacket.packets.length == 0) { logger.info("Ignoring empty RTCP packet."); return null; } ArrayList<RTCPPacket> outPackets = new ArrayList<RTCPPacket>(inPacket.packets.length); for (RTCPPacket p : inPacket.packets) { switch (p.type) { case RTCPPacket.RR: case RTCPPacket.SR: case RTCPPacket.SDES: // We generate our own RR/SR/SDES packets. We only want // to forward NACKs/PLIs/etc. break; case RTCPFBPacket.PSFB: RTCPFBPacket psfb = (RTCPFBPacket) p; switch (psfb.fmt) { case RTCPREMBPacket.FMT: // We generate its own REMB packets. break; default: // We let through everything else, like NACK // packets. outPackets.add(psfb); break; } break; default: // We let through everything else, like BYE and APP // packets. outPackets.add(p); break; } } if (outPackets.size() == 0) { return null; } // We have feedback messages to send. Pack them in a compound // RR and send them. TODO Use RFC5506 Reduced-Size RTCP, if the // receiver supports it. Collection<RTCPRRPacket> rrPackets = makeRTCPRRPackets(System.currentTimeMillis()); if (rrPackets != null && rrPackets.size() != 0) { outPackets.addAll(0, rrPackets); } else { logger.warn("We might be sending invalid RTCPs."); } RTCPPacket[] pkts = outPackets.toArray(new RTCPPacket[outPackets.size()]); RTCPCompoundPacket outPacket = new RTCPCompoundPacket(pkts); return generator.apply(outPacket); } } /** Holds the NTP timestamp and the associated RTP timestamp for a given RTP stream. */ class RemoteClock { /** * Ctor. * * @param remoteTime * @param rtpTimestamp */ public RemoteClock(long remoteTime, int rtpTimestamp) { this.remoteTime = remoteTime; this.rtpTimestamp = rtpTimestamp; } /** * The last NTP timestamp that we received for {@link this.ssrc} expressed in millis. Should be * treated a signed long. */ private final long remoteTime; /** * The RTP timestamp associated to {@link this.ntpTimestamp}. The RTP timestamp is an unsigned * int. */ private final int rtpTimestamp; /** @return */ public int getRtpTimestamp() { return rtpTimestamp; } /** @return */ public long getRemoteTime() { return remoteTime; } } /** */ class ReceivedRemoteClock { /** The SSRC. */ private final int ssrc; /** * The <tt>RemoteClock</tt> which was received at {@link this.receivedTime} for this RTP stream. */ private final RemoteClock remoteClock; /** * The local time in millis when we received the RTCP report with the RTP/NTP timestamps. It's a * signed long. */ private final long receivedTime; /** * The clock rate for {@link.ssrc}. We need to have received at least two SRs in order to be * able to calculate this. Unsigned short. */ private final int frequencyHz; /** * Ctor. * * @param ssrc * @param remoteTime * @param rtpTimestamp * @param frequencyHz */ ReceivedRemoteClock(int ssrc, long remoteTime, int rtpTimestamp, int frequencyHz) { this.ssrc = ssrc; this.remoteClock = new RemoteClock(remoteTime, rtpTimestamp); this.frequencyHz = frequencyHz; this.receivedTime = System.currentTimeMillis(); } /** @return */ public RemoteClock getRemoteClock() { return remoteClock; } /** @return */ public long getReceivedTime() { return receivedTime; } /** @return */ public int getSsrc() { return ssrc; } /** @return */ public int getFrequencyHz() { return frequencyHz; } } /** The <tt>RTPStatsEntry</tt> class contains information about an outgoing SSRC. */ class RTPStatsEntry { /** The SSRC of the stream that this instance tracks. */ private final int ssrc; /** * The total number of _payload_ octets (i.e., not including header or padding) transmitted in * RTP data packets by the sender since starting transmission up until the time this SR packet * was generated. This should be treated as an unsigned int. */ private final int bytesSent; /** * The total number of RTP data packets transmitted by the sender (including re-transmissions) * since starting transmission up until the time this SR packet was generated. Re-transmissions * using an RTX stream are tracked in the RTX SSRC. This should be treated as an unsigned int. */ private final int packetsSent; /** @return */ public int getSsrc() { return ssrc; } /** @return */ public int getBytesSent() { return bytesSent; } /** @return */ public int getPacketsSent() { return packetsSent; } /** * Ctor. * * @param ssrc * @param bytesSent */ RTPStatsEntry(int ssrc, int bytesSent, int packetsSent) { this.ssrc = ssrc; this.bytesSent = bytesSent; this.packetsSent = packetsSent; } } /** * The <tt>RtpStatsMap</tt> gathers stats from RTP packets that the <tt>RTCPReportBuilder</tt> * uses to build its reports. */ class RTPStatsMap extends ConcurrentHashMap<Integer, RTPStatsEntry> { /** * Updates this <tt>RTPStatsMap</tt> with information it gets from the <tt>RawPacket</tt>. * * @param pkt the <tt>RawPacket</tt> that is being transmitted. */ public void apply(RawPacket pkt) { int ssrc = pkt.getSSRC(); if (this.containsKey(ssrc)) { RTPStatsEntry oldRtpStatsEntry = this.get(ssrc); // Replace whatever was in there before. A feature of the two's // complement encoding (which is used by Java integers) is that // the bitwise results for add, subtract, and multiply are the // same if both inputs are interpreted as signed values or both // inputs are interpreted as unsigned values. (Other encodings // like one's complement and signed magnitude don't have this // properly.) this.put( ssrc, new RTPStatsEntry( ssrc, oldRtpStatsEntry.getBytesSent() + pkt.getLength() - pkt.getHeaderLength() - pkt.getPaddingSize(), oldRtpStatsEntry.getPacketsSent() + 1)); } else { // Add a new <tt>RTPStatsEntry</tt> in this map. this.put( ssrc, new RTPStatsEntry( ssrc, pkt.getLength() - pkt.getHeaderLength() - pkt.getPaddingSize(), 1)); } } } /** A class that can be used to estimate the remote time at a given local time. */ class RemoteClockEstimator { /** base: 7-Feb-2036 @ 06:28:16 UTC */ private static final long msb0baseTime = 2085978496000L; /** base: 1-Jan-1900 @ 01:00:00 UTC */ private static final long msb1baseTime = -2208988800000L; /** A map holding the received remote clocks. */ private Map<Integer, ReceivedRemoteClock> receivedClocks = new ConcurrentHashMap<Integer, ReceivedRemoteClock>(); /** * Inspect an <tt>RTCPCompoundPacket</tt> and build-up the state for future estimations. * * @param pkt */ public void apply(RTCPCompoundPacket pkt) { if (pkt == null || pkt.packets == null || pkt.packets.length == 0) { return; } for (RTCPPacket rtcpPacket : pkt.packets) { switch (rtcpPacket.type) { case RTCPPacket.SR: RTCPSRPacket srPacket = (RTCPSRPacket) rtcpPacket; // The media sender SSRC. int ssrc = srPacket.ssrc; // Convert 64-bit NTP timestamp to Java standard time. // Note that java time (milliseconds) by definition has // less precision then NTP time (picoseconds) so // converting NTP timestamp to java time and back to NTP // timestamp loses precision. For example, Tue, Dec 17 // 2002 09:07:24.810 EST is represented by a single // Java-based time value of f22cd1fc8a, but its NTP // equivalent are all values ranging from // c1a9ae1c.cf5c28f5 to c1a9ae1c.cf9db22c. // Use round-off on fractional part to preserve going to // lower precision long fraction = Math.round(1000D * srPacket.ntptimestamplsw / 0x100000000L); /* * If the most significant bit (MSB) on the seconds * field is set we use a different time base. The * following text is a quote from RFC-2030 (SNTP v4): * * If bit 0 is set, the UTC time is in the range * 1968-2036 and UTC time is reckoned from 0h 0m 0s UTC * on 1 January 1900. If bit 0 is not set, the time is * in the range 2036-2104 and UTC time is reckoned from * 6h 28m 16s UTC on 7 February 2036. */ long msb = srPacket.ntptimestampmsw & 0x80000000L; long remoteTime = (msb == 0) // use base: 7-Feb-2036 @ 06:28:16 UTC ? msb0baseTime + (srPacket.ntptimestampmsw * 1000) + fraction // use base: 1-Jan-1900 @ 01:00:00 UTC : msb1baseTime + (srPacket.ntptimestampmsw * 1000) + fraction; // Estimate the clock rate of the sender. int frequencyHz = -1; if (receivedClocks.containsKey(ssrc)) { // Calculate the clock rate. ReceivedRemoteClock oldStats = receivedClocks.get(ssrc); RemoteClock oldRemoteClock = oldStats.getRemoteClock(); frequencyHz = Math.round( (float) (((int) srPacket.rtptimestamp - oldRemoteClock.getRtpTimestamp()) & 0xffffffffl) / (remoteTime - oldRemoteClock.getRemoteTime())); } // Replace whatever was in there before. receivedClocks.put( ssrc, new ReceivedRemoteClock( ssrc, remoteTime, (int) srPacket.rtptimestamp, frequencyHz)); break; case RTCPPacket.SDES: break; } } } /** * Estimate the <tt>RemoteClock</tt> of a given RTP stream (identified by its SSRC) at a given * time. * * @param ssrc the SSRC of the RTP stream whose <tt>RemoteClock</tt> we want to estimate. * @param time the local time that will be mapped to a remote time. * @return An estimation of the <tt>RemoteClock</tt> at time "time". */ public RemoteClock estimate(int ssrc, long time) { ReceivedRemoteClock receivedRemoteClock = receivedClocks.get(ssrc); if (receivedRemoteClock == null || receivedRemoteClock.getFrequencyHz() == -1) { // We can't continue if we don't have NTP and RTP timestamps // and/or the original sender frequency, so move to the next // one. return null; } long delayMillis = time - receivedRemoteClock.getReceivedTime(); // Estimate the remote wall clock. long remoteTime = receivedRemoteClock.getRemoteClock().getRemoteTime(); long estimatedRemoteTime = remoteTime + delayMillis; // Drift the RTP timestamp. int rtpTimestamp = receivedRemoteClock.getRemoteClock().getRtpTimestamp() + ((int) delayMillis) * (receivedRemoteClock.getFrequencyHz() / 1000); return new RemoteClock(estimatedRemoteTime, rtpTimestamp); } } /** Keeps track of the CNAMEs of the RTP streams that we've seen. */ class CNAMERegistry extends ConcurrentHashMap<Integer, byte[]> { /** @param inPacket */ public void update(RTCPCompoundPacket inPacket) { // Update CNAMEs. if (inPacket == null || inPacket.packets == null || inPacket.packets.length == 0) { return; } for (RTCPPacket p : inPacket.packets) { switch (p.type) { case RTCPPacket.SDES: RTCPSDESPacket sdesPacket = (RTCPSDESPacket) p; if (sdesPacket.sdes == null || sdesPacket.sdes.length == 0) { continue; } for (RTCPSDES chunk : sdesPacket.sdes) { if (chunk.items == null || chunk.items.length == 0) { continue; } for (RTCPSDESItem sdesItm : chunk.items) { if (sdesItm.type != RTCPSDESItem.CNAME) { continue; } this.put(chunk.ssrc, sdesItm.data); } } break; } } } } }
/** * The <tt>SimulcastEngine</tt> of a <tt>VideoChannel</tt> makes sure to only forward one simulcast * stream at any given point in time to the owner endpoint, viewed as a receiver. * * <p>This class also takes care of "gatewaying" the RTCP SRs that sending endpoints are sending. In * this context "gatewaying" means updating the octet and packet count information in the SRs. Such * a change is necessary because of the pausing/resuming of the simulcast streams that this class * performs. * * @author George Politis */ public class SimulcastEngine implements TransformEngine { /** * The {@link Logger} used by the {@link SimulcastEngine} class to print debug information. Note * that {@link Conference} instances should use {@link #logger} instead. */ private static final Logger classLogger = Logger.getLogger(SimulcastEngine.class); /** The owner of this <tt>SimulcastEngine</tt>. */ private final VideoChannel videoChannel; /** * If the owning endpoint (viewed as a sender) has signaled simulcast, this object receives it. */ private final SimulcastReceiver simulcastReceiver; /** * For each <tt>SimulcastReceiver</tt> we have a <tt>SimulcastSender</tt>. This object manages * those <tt>SimulcastSender</tt>s. */ private final SimulcastSenderManager simulcastSenderManager = new SimulcastSenderManager(this); /** * The RTP stats map that holds RTP statistics about all the simulcast streams that this * <tt>SimulcastEngine</tt> is sending. It allows to modify the packets sent count and packets * octet count in the RTCP SRs, taking into account the pausing of the simulcast streams. The * stats are updated in the RTP transform direction and they are used by the * <tt>SenderReportGateway</tt> that is defined bellow. */ private final RTPStatsMap rtpStatsMap = new RTPStatsMap(); /** * The <tt>SenderReportGateway</tt> responsible for gatewaying sender reports i.e. modifying their * octet count and packet count to reflect the pausing/resuming of the simulcast streams due to * simulcast. * * <p>RTCP termination, which needs to be activated for simulcast, nullifies the effects of the * SenderReportGateway because it generates SRs from scratch. * * <p>The original idea behind having the SenderReportGateway inside the SimulcastEngine was so * that they can be (dis-)activated independently. This is not currently possible. */ private final SenderReportGateway srGateway = new SenderReportGateway(); /** The RTP <tt>PacketTransformer</tt> of this <tt>SimulcastEngine</tt>. */ private final PacketTransformer rtpTransformer = new MyRTPTransformer(); /** The RTCP <tt>PacketTransformer</tt> of this <tt>SimulcastEngine</tt>. */ private final PacketTransformer rtcpTransformer = new MyRTCPTransformer(); /** The {@link Logger} to be used by this instance to print debug information. */ private final Logger logger; /** * Ctor. * * @param videoChannel The <tt>VideoChannel</tt> associated to this <tt>SimulcastEngine</tt>. */ public SimulcastEngine(VideoChannel videoChannel) { this.videoChannel = videoChannel; simulcastReceiver = new SimulcastReceiver( this, ServiceUtils.getService(videoChannel.getBundleContext(), ConfigurationService.class)); this.logger = Logger.getLogger(classLogger, videoChannel.getContent().getConference().getLogger()); } /** * Gets the <tt>SimulcastReceiver</tt> of this <tt>SimulcastReceiver</tt>. * * @return */ public SimulcastReceiver getSimulcastReceiver() { return simulcastReceiver; } /** * Gets the <tt>SimulcastSenderManager</tt> of this <tt>SimulcastEngine</tt>. * * @return the <tt>SimulcastSenderManager</tt> of this <tt>SimulcastEngine</tt>. */ public SimulcastSenderManager getSimulcastSenderManager() { return simulcastSenderManager; } /** * Gets the <tt>VideoChannel</tt> that owns this <tt>SimulcastEngine</tt>. * * @return the <tt>VideoChannel</tt> that owns this <tt>SimulcastEngine</tt>. */ public VideoChannel getVideoChannel() { return videoChannel; } /** {@inheritDoc} */ public PacketTransformer getRTPTransformer() { return rtpTransformer; } /** {@inheritDoc} */ public PacketTransformer getRTCPTransformer() { return rtcpTransformer; } /** Updates octet count and packet count in sender reports. */ private class SenderReportGateway { /** * Updates octet count and packet count in sender reports found in the * <tt>RTCPCompoundPacket</tt>. * * @param pkt * @return {@code true} if the specified {@code pkt} was modified; otherwise, {@code false} */ public boolean gateway(RTCPCompoundPacket pkt) { RTCPPacket[] pkts; boolean modified = false; if (pkt != null && (pkts = pkt.packets) != null && pkts.length != 0) { for (RTCPPacket p : pkts) { switch (p.type) { case RTCPPacket.SR: RTCPSRPacket sr = (RTCPSRPacket) p; int ssrc = sr.ssrc; RTPStatsEntry rtpStats = rtpStatsMap.get(ssrc); if (rtpStats != null) { // Mark the packet as modified and update the octet // and packet count using the information gathered // by rtpStatsMap. sr.octetcount = rtpStats.getBytesSent(); sr.packetcount = rtpStats.getPacketsSent(); modified = true; } break; } } } return modified; } } /** The RTP <tt>PacketTransformer</tt> of this <tt>SimulcastEngine</tt>. */ private class MyRTPTransformer extends SinglePacketTransformer { /** Initializes a new {@code MyRTPTransformer} instance. */ public MyRTPTransformer() { super(RTPPacketPredicate.INSTANCE); } @Override public RawPacket transform(RawPacket p) { // Accepts or drops RTP packets (to be sent from the local peer to // the remote peer) depending on which SimulcastStream is currently // being sent, which SendMode is in use, etc. if (simulcastSenderManager.accept(p)) { // Update rtpStatsMap with the information that we've accepted // to send. rtpStatsMap.apply(p); return p; } else { return null; } } @Override public RawPacket reverseTransform(RawPacket p) { // Forward the received RawPacket (from the remote peer to the local // peer) to the SimulcastReceiver. The latter will, for example, // update the received SimulcastStreams. simulcastReceiver.accepted(p); return p; } } /** The RTCP <tt>PacketTransformer</tt> of this <tt>SimulcastEngine</tt>. */ private class MyRTCPTransformer extends SinglePacketTransformerAdapter { /** The RTCP packet parser that parses RTCP packets from <tt>RawPacket</tt>s. */ private final RTCPPacketParserEx parser = new RTCPPacketParserEx(); /** The RTCP generator that generates <tt>RTCPCompoundPacket</tt>s from <tt>RawPacket</tt>s. */ private final RTCPGenerator generator = new RTCPGenerator(); /** Initializes a new {@code MyRTCPTransformer} instance. */ public MyRTCPTransformer() { super(RTCPPacketPredicate.INSTANCE); } @Override public RawPacket transform(RawPacket p) { // Update octets and packets sent in SRs. RTCPCompoundPacket compound; try { compound = (RTCPCompoundPacket) parser.parse(p.getBuffer(), p.getOffset(), p.getLength()); } catch (BadFormatException e) { logger.warn("Failed to terminate an RTCP packet. Dropping it."); return null; } if (srGateway.gateway(compound)) { return generator.apply(compound); } else { // If the RTCP packet hasn't been modified, send the input // without regenerating it (i.e. optimize). return p; } } } /** @return the {@link Logger} used by this instance. */ public Logger getLogger() { return logger; } }
/** * The source contact service. The will show most recent messages. * * @author Damian Minkov */ public class MessageSourceService extends MetaContactListAdapter implements ContactSourceService, ContactPresenceStatusListener, ContactCapabilitiesListener, ProviderPresenceStatusListener, SubscriptionListener, LocalUserChatRoomPresenceListener, MessageListener, ChatRoomMessageListener, AdHocChatRoomMessageListener { /** The logger for this class. */ private static Logger logger = Logger.getLogger(MessageSourceService.class); /** The display name of this contact source. */ private final String MESSAGE_HISTORY_NAME; /** The type of the source service, the place to be shown in the ui. */ private int sourceServiceType = CONTACT_LIST_TYPE; /** * Whether to show recent messages in history or in contactlist. By default we show it in * contactlist. */ private static final String IN_HISTORY_PROPERTY = "net.java.sip.communicator.impl.msghistory.contactsrc.IN_HISTORY"; /** Property to control number of recent messages. */ private static final String NUMBER_OF_RECENT_MSGS_PROP = "net.java.sip.communicator.impl.msghistory.contactsrc.MSG_NUMBER"; /** Property to control version of recent messages. */ private static final String VER_OF_RECENT_MSGS_PROP = "net.java.sip.communicator.impl.msghistory.contactsrc.MSG_VER"; /** Property to control messages type. Can query for message sub type. */ private static final String IS_MESSAGE_SUBTYPE_SMS_PROP = "net.java.sip.communicator.impl.msghistory.contactsrc.IS_SMS_ENABLED"; /** * The number of recent messages to store in the history, but will retrieve just * <tt>numberOfMessages</tt> */ private static final int NUMBER_OF_MSGS_IN_HISTORY = 100; /** Number of messages to show. */ private int numberOfMessages = 10; /** The structure to save recent messages list. */ private static final String[] STRUCTURE_NAMES = new String[] {"provider", "contact", "timestamp", "ver"}; /** The current version of recent messages. When changed the recent messages are recreated. */ private static String RECENT_MSGS_VER = "2"; /** The structure. */ private static final HistoryRecordStructure recordStructure = new HistoryRecordStructure(STRUCTURE_NAMES); /** Recent messages history ID. */ private static final HistoryID historyID = HistoryID.createFromRawID(new String[] {"recent_messages"}); /** The cache for recent messages. */ private History history = null; /** List of recent messages. */ private final List<ComparableEvtObj> recentMessages = new LinkedList<ComparableEvtObj>(); /** Date of the oldest shown message. */ private Date oldestRecentMessage = null; /** The last query created. */ private MessageSourceContactQuery recentQuery = null; /** The message subtype if any. */ private boolean isSMSEnabled = false; /** Message history service that has created us. */ private MessageHistoryServiceImpl messageHistoryService; /** Constructs MessageSourceService. */ MessageSourceService(MessageHistoryServiceImpl messageHistoryService) { this.messageHistoryService = messageHistoryService; ConfigurationService conf = MessageHistoryActivator.getConfigurationService(); if (conf.getBoolean(IN_HISTORY_PROPERTY, false)) { sourceServiceType = HISTORY_TYPE; } MESSAGE_HISTORY_NAME = MessageHistoryActivator.getResources().getI18NString("service.gui.RECENT_MESSAGES"); numberOfMessages = conf.getInt(NUMBER_OF_RECENT_MSGS_PROP, numberOfMessages); isSMSEnabled = conf.getBoolean(IS_MESSAGE_SUBTYPE_SMS_PROP, isSMSEnabled); RECENT_MSGS_VER = conf.getString(VER_OF_RECENT_MSGS_PROP, RECENT_MSGS_VER); MessageSourceContactPresenceStatus.MSG_SRC_CONTACT_ONLINE.setStatusIcon( MessageHistoryActivator.getResources() .getImageInBytes("service.gui.icons.SMS_STATUS_ICON")); } /** * Returns the display name of this contact source. * * @return the display name of this contact source */ @Override public String getDisplayName() { return MESSAGE_HISTORY_NAME; } /** * Returns default type to indicate that this contact source can be queried by default filters. * * @return the type of this contact source */ @Override public int getType() { return sourceServiceType; } /** * Returns the index of the contact source in the result list. * * @return the index of the contact source in the result list */ @Override public int getIndex() { return 0; } /** * Creates query for the given <tt>searchString</tt>. * * @param queryString the string to search for * @return the created query */ @Override public ContactQuery createContactQuery(String queryString) { recentQuery = (MessageSourceContactQuery) createContactQuery(queryString, numberOfMessages); return recentQuery; } /** * Updates the contact sources in the recent query if any. Done here in order to sync with * recentMessages instance, and to check for already existing instances of contact sources. * Normally called from the query. */ public void updateRecentMessages() { if (recentQuery == null) return; synchronized (recentMessages) { List<SourceContact> currentContactsInQuery = recentQuery.getQueryResults(); for (ComparableEvtObj evtObj : recentMessages) { // the contains will use the correct equals method of // the object evtObj if (!currentContactsInQuery.contains(evtObj)) { MessageSourceContact newSourceContact = new MessageSourceContact(evtObj.getEventObject(), MessageSourceService.this); newSourceContact.initDetails(evtObj.getEventObject()); recentQuery.addQueryResult(newSourceContact); } } } } /** * Searches for entries in cached recent messages in history. * * @param provider the provider which contact messages we will search * @param isStatusChanged is the search because of status changed * @return entries in cached recent messages in history. */ private List<ComparableEvtObj> getCachedRecentMessages( ProtocolProviderService provider, boolean isStatusChanged) { String providerID = provider.getAccountID().getAccountUniqueID(); List<String> recentMessagesContactIDs = getRecentContactIDs( providerID, recentMessages.size() < numberOfMessages ? null : oldestRecentMessage); List<ComparableEvtObj> cachedRecentMessages = new ArrayList<ComparableEvtObj>(); for (String contactID : recentMessagesContactIDs) { Collection<EventObject> res = messageHistoryService.findRecentMessagesPerContact( numberOfMessages, providerID, contactID, isSMSEnabled); processEventObjects(res, cachedRecentMessages, isStatusChanged); } return cachedRecentMessages; } /** * Process list of event objects. Checks whether message source contact already exist for this * event object, if yes just update it with the new values (not sure whether we should do this, as * it may bring old messages) and if status of provider is changed, init its details, updates its * capabilities. It still adds the found messages source contact to the list of the new contacts, * as later we will detect this and fire update event. If nothing found a new contact is created. * * @param res list of event * @param cachedRecentMessages list of newly created source contacts or already existed but * updated with corresponding event object * @param isStatusChanged whether provider status changed and we are processing */ private void processEventObjects( Collection<EventObject> res, List<ComparableEvtObj> cachedRecentMessages, boolean isStatusChanged) { for (EventObject obj : res) { ComparableEvtObj oldMsg = findRecentMessage(obj, recentMessages); if (oldMsg != null) { oldMsg.update(obj); // update if (isStatusChanged && recentQuery != null) recentQuery.updateCapabilities(oldMsg, obj); // we still add it to cachedRecentMessages // later we will find it is duplicate and will fire // update event if (!cachedRecentMessages.contains(oldMsg)) cachedRecentMessages.add(oldMsg); continue; } oldMsg = findRecentMessage(obj, cachedRecentMessages); if (oldMsg == null) { oldMsg = new ComparableEvtObj(obj); if (isStatusChanged && recentQuery != null) recentQuery.updateCapabilities(oldMsg, obj); cachedRecentMessages.add(oldMsg); } } } /** * Access for source contacts impl. * * @return */ boolean isSMSEnabled() { return isSMSEnabled; } /** * Add the ComparableEvtObj, newly added will fire new, for existing fire update and when trimming * the list to desired length fire remove for those that were removed * * @param contactsToAdd */ private void addNewRecentMessages(List<ComparableEvtObj> contactsToAdd) { // now find object to fire new, and object to fire remove // let us find duplicates and fire update List<ComparableEvtObj> duplicates = new ArrayList<ComparableEvtObj>(); for (ComparableEvtObj msgToAdd : contactsToAdd) { if (recentMessages.contains(msgToAdd)) { duplicates.add(msgToAdd); // save update updateRecentMessageToHistory(msgToAdd); } } recentMessages.removeAll(duplicates); // now contacts to add has no duplicates, add them all boolean changed = recentMessages.addAll(contactsToAdd); if (changed) { Collections.sort(recentMessages); if (recentQuery != null) { for (ComparableEvtObj obj : duplicates) recentQuery.updateContact(obj, obj.getEventObject()); } } if (!recentMessages.isEmpty()) oldestRecentMessage = recentMessages.get(recentMessages.size() - 1).getTimestamp(); // trim List<ComparableEvtObj> removedItems = null; if (recentMessages.size() > numberOfMessages) { removedItems = new ArrayList<ComparableEvtObj>( recentMessages.subList(numberOfMessages, recentMessages.size())); recentMessages.removeAll(removedItems); } if (recentQuery != null) { // now fire, removed for all that were in the list // and now are removed after trim if (removedItems != null) { for (ComparableEvtObj msc : removedItems) { if (!contactsToAdd.contains(msc)) recentQuery.fireContactRemoved(msc); } } // fire new for all that were added, and not removed after trim for (ComparableEvtObj msc : contactsToAdd) { if ((removedItems == null || !removedItems.contains(msc)) && !duplicates.contains(msc)) { MessageSourceContact newSourceContact = new MessageSourceContact(msc.getEventObject(), MessageSourceService.this); newSourceContact.initDetails(msc.getEventObject()); recentQuery.addQueryResult(newSourceContact); } } // if recent messages were changed, indexes have change lets // fire event for the last element which will reorder the whole // group if needed. if (changed) recentQuery.fireContactChanged(recentMessages.get(recentMessages.size() - 1)); } } /** * When a provider is added, do not block and start executing in new thread. * * @param provider ProtocolProviderService */ void handleProviderAdded(final ProtocolProviderService provider, final boolean isStatusChanged) { new Thread( new Runnable() { @Override public void run() { handleProviderAddedInSeparateThread(provider, isStatusChanged); } }) .start(); } /** * When a provider is added. As searching can be slow especially when handling special type of * messages (with subType) this need to be run in new Thread. * * @param provider ProtocolProviderService */ private void handleProviderAddedInSeparateThread( ProtocolProviderService provider, boolean isStatusChanged) { // lets check if we have cached recent messages for this provider, and // fire events if found and are newer synchronized (recentMessages) { List<ComparableEvtObj> cachedRecentMessages = getCachedRecentMessages(provider, isStatusChanged); if (cachedRecentMessages.isEmpty()) { // maybe there is no cached history for this // let's check // load it not from cache, but do a local search Collection<EventObject> res = messageHistoryService.findRecentMessagesPerContact( numberOfMessages, provider.getAccountID().getAccountUniqueID(), null, isSMSEnabled); List<ComparableEvtObj> newMsc = new ArrayList<ComparableEvtObj>(); processEventObjects(res, newMsc, isStatusChanged); addNewRecentMessages(newMsc); for (ComparableEvtObj msc : newMsc) { saveRecentMessageToHistory(msc); } } else addNewRecentMessages(cachedRecentMessages); } } /** * Tries to match the event object to already existing ComparableEvtObj in the supplied list. * * @param obj the object that we will try to match. * @param list the list we will search in. * @return the found ComparableEvtObj */ private static ComparableEvtObj findRecentMessage(EventObject obj, List<ComparableEvtObj> list) { Contact contact = null; ChatRoom chatRoom = null; if (obj instanceof MessageDeliveredEvent) { contact = ((MessageDeliveredEvent) obj).getDestinationContact(); } else if (obj instanceof MessageReceivedEvent) { contact = ((MessageReceivedEvent) obj).getSourceContact(); } else if (obj instanceof ChatRoomMessageDeliveredEvent) { chatRoom = ((ChatRoomMessageDeliveredEvent) obj).getSourceChatRoom(); } else if (obj instanceof ChatRoomMessageReceivedEvent) { chatRoom = ((ChatRoomMessageReceivedEvent) obj).getSourceChatRoom(); } for (ComparableEvtObj evt : list) { if ((contact != null && contact.equals(evt.getContact())) || (chatRoom != null && chatRoom.equals(evt.getRoom()))) return evt; } return null; } /** * A provider has been removed. * * @param provider the ProtocolProviderService that has been unregistered. */ void handleProviderRemoved(ProtocolProviderService provider) { // lets remove the recent messages for this provider, and update // with recent messages for the available providers synchronized (recentMessages) { if (provider != null) { List<ComparableEvtObj> removedItems = new ArrayList<ComparableEvtObj>(); for (ComparableEvtObj msc : recentMessages) { if (msc.getProtocolProviderService().equals(provider)) removedItems.add(msc); } recentMessages.removeAll(removedItems); if (!recentMessages.isEmpty()) oldestRecentMessage = recentMessages.get(recentMessages.size() - 1).getTimestamp(); else oldestRecentMessage = null; if (recentQuery != null) { for (ComparableEvtObj msc : removedItems) { recentQuery.fireContactRemoved(msc); } } } // handleProviderRemoved can be invoked due to stopped // history service, if this is the case we do not want to // update messages if (!this.messageHistoryService.isHistoryLoggingEnabled()) return; // lets do the same as we enable provider // for all registered providers and finally fire events List<ComparableEvtObj> contactsToAdd = new ArrayList<ComparableEvtObj>(); for (ProtocolProviderService pps : messageHistoryService.getCurrentlyAvailableProviders()) { contactsToAdd.addAll(getCachedRecentMessages(pps, true)); } addNewRecentMessages(contactsToAdd); } } /** * Searches for contact ids in history of recent messages. * * @param provider * @param after * @return */ List<String> getRecentContactIDs(String provider, Date after) { List<String> res = new ArrayList<String>(); try { History history = getHistory(); if (history != null) { Iterator<HistoryRecord> recs = history.getReader().findLast(NUMBER_OF_MSGS_IN_HISTORY); SimpleDateFormat sdf = new SimpleDateFormat(HistoryService.DATE_FORMAT); while (recs.hasNext()) { HistoryRecord hr = recs.next(); String contact = null; String recordProvider = null; Date timestamp = null; for (int i = 0; i < hr.getPropertyNames().length; i++) { String propName = hr.getPropertyNames()[i]; if (propName.equals(STRUCTURE_NAMES[0])) recordProvider = hr.getPropertyValues()[i]; else if (propName.equals(STRUCTURE_NAMES[1])) contact = hr.getPropertyValues()[i]; else if (propName.equals(STRUCTURE_NAMES[2])) { try { timestamp = sdf.parse(hr.getPropertyValues()[i]); } catch (ParseException e) { timestamp = new Date(Long.parseLong(hr.getPropertyValues()[i])); } } } if (recordProvider == null || contact == null) continue; if (after != null && timestamp != null && timestamp.before(after)) continue; if (recordProvider.equals(provider)) res.add(contact); } } } catch (IOException ex) { logger.error("cannot create recent_messages history", ex); } return res; } /** * Returns the cached recent messages history. * * @return * @throws IOException */ private History getHistory() throws IOException { synchronized (historyID) { HistoryService historyService = MessageHistoryActivator.getMessageHistoryService().getHistoryService(); if (history == null) { history = historyService.createHistory(historyID, recordStructure); // lets check the version if not our version, re-create // history (delete it) HistoryReader reader = history.getReader(); boolean delete = false; QueryResultSet<HistoryRecord> res = reader.findLast(1); if (res != null && res.hasNext()) { HistoryRecord hr = res.next(); if (hr.getPropertyValues().length >= 4) { if (!hr.getPropertyValues()[3].equals(RECENT_MSGS_VER)) delete = true; } else delete = true; } if (delete) { // delete it try { historyService.purgeLocallyStoredHistory(historyID); history = historyService.createHistory(historyID, recordStructure); } catch (IOException ex) { logger.error("Cannot delete recent_messages history", ex); } } } return history; } } /** * Returns the index of the source contact, in the list of recent messages. * * @param messageSourceContact * @return */ int getIndex(MessageSourceContact messageSourceContact) { synchronized (recentMessages) { for (int i = 0; i < recentMessages.size(); i++) if (recentMessages.get(i).equals(messageSourceContact)) return i; return -1; } } /** * Creates query for the given <tt>searchString</tt>. * * @param queryString the string to search for * @param contactCount the maximum count of result contacts * @return the created query */ @Override public ContactQuery createContactQuery(String queryString, int contactCount) { if (!StringUtils.isNullOrEmpty(queryString)) return null; recentQuery = new MessageSourceContactQuery(MessageSourceService.this); return recentQuery; } /** * Updates contact source contacts with status. * * @param evt the ContactPresenceStatusChangeEvent describing the status */ @Override public void contactPresenceStatusChanged(ContactPresenceStatusChangeEvent evt) { if (recentQuery == null) return; synchronized (recentMessages) { for (ComparableEvtObj msg : recentMessages) { if (msg.getContact() != null && msg.getContact().equals(evt.getSourceContact())) { recentQuery.updateContactStatus(msg, evt.getNewStatus()); } } } } @Override public void providerStatusChanged(ProviderPresenceStatusChangeEvent evt) { if (!evt.getNewStatus().isOnline() || evt.getOldStatus().isOnline()) return; handleProviderAdded(evt.getProvider(), true); } @Override public void providerStatusMessageChanged(PropertyChangeEvent evt) {} @Override public void localUserPresenceChanged(LocalUserChatRoomPresenceChangeEvent evt) { if (recentQuery == null) return; ComparableEvtObj srcContact = null; synchronized (recentMessages) { for (ComparableEvtObj msg : recentMessages) { if (msg.getRoom() != null && msg.getRoom().equals(evt.getChatRoom())) { srcContact = msg; break; } } } if (srcContact == null) return; String eventType = evt.getEventType(); if (LocalUserChatRoomPresenceChangeEvent.LOCAL_USER_JOINED.equals(eventType)) { recentQuery.updateContactStatus(srcContact, ChatRoomPresenceStatus.CHAT_ROOM_ONLINE); } else if ((LocalUserChatRoomPresenceChangeEvent.LOCAL_USER_LEFT.equals(eventType) || LocalUserChatRoomPresenceChangeEvent.LOCAL_USER_KICKED.equals(eventType) || LocalUserChatRoomPresenceChangeEvent.LOCAL_USER_DROPPED.equals(eventType))) { recentQuery.updateContactStatus(srcContact, ChatRoomPresenceStatus.CHAT_ROOM_OFFLINE); } } /** * Handles new events. * * @param obj the event object * @param provider the provider * @param id the id of the source of the event */ private void handle(EventObject obj, ProtocolProviderService provider, String id) { // check if provider - contact exist update message content synchronized (recentMessages) { ComparableEvtObj existingMsc = null; for (ComparableEvtObj msc : recentMessages) { if (msc.getProtocolProviderService().equals(provider) && msc.getContactAddress().equals(id)) { // update msc.update(obj); updateRecentMessageToHistory(msc); existingMsc = msc; } } if (existingMsc != null) { Collections.sort(recentMessages); oldestRecentMessage = recentMessages.get(recentMessages.size() - 1).getTimestamp(); if (recentQuery != null) { recentQuery.updateContact(existingMsc, existingMsc.getEventObject()); recentQuery.fireContactChanged(existingMsc); } return; } // if missing create source contact // and update recent messages, trim and sort MessageSourceContact newSourceContact = new MessageSourceContact(obj, MessageSourceService.this); newSourceContact.initDetails(obj); // we have already checked for duplicate ComparableEvtObj newMsg = new ComparableEvtObj(obj); recentMessages.add(newMsg); Collections.sort(recentMessages); oldestRecentMessage = recentMessages.get(recentMessages.size() - 1).getTimestamp(); // trim List<ComparableEvtObj> removedItems = null; if (recentMessages.size() > numberOfMessages) { removedItems = new ArrayList<ComparableEvtObj>( recentMessages.subList(numberOfMessages, recentMessages.size())); recentMessages.removeAll(removedItems); } // save saveRecentMessageToHistory(newMsg); // no query nothing to fire if (recentQuery == null) return; // now fire if (removedItems != null) { for (ComparableEvtObj msc : removedItems) { recentQuery.fireContactRemoved(msc); } } recentQuery.addQueryResult(newSourceContact); } } /** Adds recent message in history. */ private void saveRecentMessageToHistory(ComparableEvtObj msc) { synchronized (historyID) { // and create it try { History history = getHistory(); HistoryWriter writer = history.getWriter(); SimpleDateFormat sdf = new SimpleDateFormat(HistoryService.DATE_FORMAT); writer.addRecord( new String[] { msc.getProtocolProviderService().getAccountID().getAccountUniqueID(), msc.getContactAddress(), sdf.format(msc.getTimestamp()), RECENT_MSGS_VER }, NUMBER_OF_MSGS_IN_HISTORY); } catch (IOException ex) { logger.error("cannot create recent_messages history", ex); return; } } } /** Updates recent message in history. */ private void updateRecentMessageToHistory(final ComparableEvtObj msg) { synchronized (historyID) { // and create it try { History history = getHistory(); HistoryWriter writer = history.getWriter(); writer.updateRecord( new HistoryWriter.HistoryRecordUpdater() { HistoryRecord hr; @Override public void setHistoryRecord(HistoryRecord historyRecord) { this.hr = historyRecord; } @Override public boolean isMatching() { boolean providerFound = false; boolean contactFound = false; for (int i = 0; i < hr.getPropertyNames().length; i++) { String propName = hr.getPropertyNames()[i]; if (propName.equals(STRUCTURE_NAMES[0])) { if (msg.getProtocolProviderService() .getAccountID() .getAccountUniqueID() .equals(hr.getPropertyValues()[i])) { providerFound = true; } } else if (propName.equals(STRUCTURE_NAMES[1])) { if (msg.getContactAddress().equals(hr.getPropertyValues()[i])) { contactFound = true; } } } return contactFound && providerFound; } @Override public Map<String, String> getUpdateChanges() { HashMap<String, String> map = new HashMap<String, String>(); SimpleDateFormat sdf = new SimpleDateFormat(HistoryService.DATE_FORMAT); for (int i = 0; i < hr.getPropertyNames().length; i++) { String propName = hr.getPropertyNames()[i]; if (propName.equals(STRUCTURE_NAMES[0])) { map.put( propName, msg.getProtocolProviderService().getAccountID().getAccountUniqueID()); } else if (propName.equals(STRUCTURE_NAMES[1])) { map.put(propName, msg.getContactAddress()); } else if (propName.equals(STRUCTURE_NAMES[2])) { map.put(propName, sdf.format(msg.getTimestamp())); } else if (propName.equals(STRUCTURE_NAMES[3])) map.put(propName, RECENT_MSGS_VER); } return map; } }); } catch (IOException ex) { logger.error("cannot create recent_messages history", ex); return; } } } @Override public void messageReceived(MessageReceivedEvent evt) { if (isSMSEnabled && evt.getEventType() != MessageReceivedEvent.SMS_MESSAGE_RECEIVED) { return; } handle(evt, evt.getSourceContact().getProtocolProvider(), evt.getSourceContact().getAddress()); } @Override public void messageDelivered(MessageDeliveredEvent evt) { if (isSMSEnabled && !evt.isSmsMessage()) return; handle( evt, evt.getDestinationContact().getProtocolProvider(), evt.getDestinationContact().getAddress()); } /** * Not used. * * @param evt the <tt>MessageFailedEvent</tt> containing the ID of the */ @Override public void messageDeliveryFailed(MessageDeliveryFailedEvent evt) {} @Override public void messageReceived(ChatRoomMessageReceivedEvent evt) { if (isSMSEnabled) return; // ignore non conversation messages if (evt.getEventType() != ChatRoomMessageReceivedEvent.CONVERSATION_MESSAGE_RECEIVED) return; handle( evt, evt.getSourceChatRoom().getParentProvider(), evt.getSourceChatRoom().getIdentifier()); } @Override public void messageDelivered(ChatRoomMessageDeliveredEvent evt) { if (isSMSEnabled) return; handle( evt, evt.getSourceChatRoom().getParentProvider(), evt.getSourceChatRoom().getIdentifier()); } /** * Not used. * * @param evt the <tt>ChatroomMessageDeliveryFailedEvent</tt> containing */ @Override public void messageDeliveryFailed(ChatRoomMessageDeliveryFailedEvent evt) {} @Override public void messageReceived(AdHocChatRoomMessageReceivedEvent evt) { // TODO } @Override public void messageDelivered(AdHocChatRoomMessageDeliveredEvent evt) { // TODO } /** * Not used. * * @param evt the <tt>AdHocChatroomMessageDeliveryFailedEvent</tt> */ @Override public void messageDeliveryFailed(AdHocChatRoomMessageDeliveryFailedEvent evt) {} @Override public void subscriptionCreated(SubscriptionEvent evt) {} @Override public void subscriptionFailed(SubscriptionEvent evt) {} @Override public void subscriptionRemoved(SubscriptionEvent evt) {} @Override public void subscriptionMoved(SubscriptionMovedEvent evt) {} @Override public void subscriptionResolved(SubscriptionEvent evt) {} /** * If a contact is renamed update the locally stored message if any. * * @param evt the <tt>ContactPropertyChangeEvent</tt> containing the source */ @Override public void contactModified(ContactPropertyChangeEvent evt) { if (!evt.getPropertyName().equals(ContactPropertyChangeEvent.PROPERTY_DISPLAY_NAME)) return; Contact contact = evt.getSourceContact(); if (contact == null) return; for (ComparableEvtObj msc : recentMessages) { if (contact.equals(msc.getContact())) { if (recentQuery != null) recentQuery.updateContactDisplayName(msc, contact.getDisplayName()); return; } } } /** * Indicates that a MetaContact has been modified. * * @param evt the MetaContactListEvent containing the corresponding contact */ public void metaContactRenamed(MetaContactRenamedEvent evt) { for (ComparableEvtObj msc : recentMessages) { if (evt.getSourceMetaContact().containsContact(msc.getContact())) { if (recentQuery != null) recentQuery.updateContactDisplayName(msc, evt.getNewDisplayName()); } } } @Override public void supportedOperationSetsChanged(ContactCapabilitiesEvent event) { Contact contact = event.getSourceContact(); if (contact == null) return; for (ComparableEvtObj msc : recentMessages) { if (contact.equals(msc.getContact())) { if (recentQuery != null) recentQuery.updateCapabilities(msc, contact); return; } } } /** Permanently removes all locally stored message history, remove recent contacts. */ public void eraseLocallyStoredHistory() throws IOException { List<ComparableEvtObj> toRemove = null; synchronized (recentMessages) { toRemove = new ArrayList<ComparableEvtObj>(recentMessages); recentMessages.clear(); } if (recentQuery != null) { for (ComparableEvtObj msc : toRemove) { recentQuery.fireContactRemoved(msc); } } } /** * Permanently removes locally stored message history for the metacontact, remove any recent * contacts if any. */ public void eraseLocallyStoredHistory(MetaContact contact) throws IOException { List<ComparableEvtObj> toRemove = null; synchronized (recentMessages) { toRemove = new ArrayList<ComparableEvtObj>(); Iterator<Contact> iter = contact.getContacts(); while (iter.hasNext()) { Contact item = iter.next(); String id = item.getAddress(); ProtocolProviderService provider = item.getProtocolProvider(); for (ComparableEvtObj msc : recentMessages) { if (msc.getProtocolProviderService().equals(provider) && msc.getContactAddress().equals(id)) { toRemove.add(msc); } } } recentMessages.removeAll(toRemove); } if (recentQuery != null) { for (ComparableEvtObj msc : toRemove) { recentQuery.fireContactRemoved(msc); } } } /** * Permanently removes locally stored message history for the chatroom, remove any recent contacts * if any. */ public void eraseLocallyStoredHistory(ChatRoom room) { ComparableEvtObj toRemove = null; synchronized (recentMessages) { for (ComparableEvtObj msg : recentMessages) { if (msg.getRoom() != null && msg.getRoom().equals(room)) { toRemove = msg; break; } } if (toRemove == null) return; recentMessages.remove(toRemove); } if (recentQuery != null) recentQuery.fireContactRemoved(toRemove); } /** Object used to cache recent messages. */ private class ComparableEvtObj implements Comparable<ComparableEvtObj> { private EventObject eventObject; /** The protocol provider. */ private ProtocolProviderService ppService = null; /** The address. */ private String address = null; /** The timestamp. */ private Date timestamp = null; /** The contact instance. */ private Contact contact = null; /** The room instance. */ private ChatRoom room = null; /** * Constructs. * * @param source used to extract initial values. */ ComparableEvtObj(EventObject source) { update(source); } /** * Extract values from <tt>EventObject</tt>. * * @param source */ public void update(EventObject source) { this.eventObject = source; if (source instanceof MessageDeliveredEvent) { MessageDeliveredEvent e = (MessageDeliveredEvent) source; this.contact = e.getDestinationContact(); this.address = contact.getAddress(); this.ppService = contact.getProtocolProvider(); this.timestamp = e.getTimestamp(); } else if (source instanceof MessageReceivedEvent) { MessageReceivedEvent e = (MessageReceivedEvent) source; this.contact = e.getSourceContact(); this.address = contact.getAddress(); this.ppService = contact.getProtocolProvider(); this.timestamp = e.getTimestamp(); } else if (source instanceof ChatRoomMessageDeliveredEvent) { ChatRoomMessageDeliveredEvent e = (ChatRoomMessageDeliveredEvent) source; this.room = e.getSourceChatRoom(); this.address = room.getIdentifier(); this.ppService = room.getParentProvider(); this.timestamp = e.getTimestamp(); } else if (source instanceof ChatRoomMessageReceivedEvent) { ChatRoomMessageReceivedEvent e = (ChatRoomMessageReceivedEvent) source; this.room = e.getSourceChatRoom(); this.address = room.getIdentifier(); this.ppService = room.getParentProvider(); this.timestamp = e.getTimestamp(); } } @Override public String toString() { return "ComparableEvtObj{" + "address='" + address + '\'' + ", ppService=" + ppService + '}'; } /** * The timestamp of the message. * * @return the timestamp of the message. */ public Date getTimestamp() { return timestamp; } /** * The contact. * * @return the contact. */ public Contact getContact() { return contact; } /** * The room. * * @return the room. */ public ChatRoom getRoom() { return room; } /** * The protocol provider. * * @return the protocol provider. */ public ProtocolProviderService getProtocolProviderService() { return ppService; } /** * The address. * * @return the address. */ public String getContactAddress() { if (this.address != null) return this.address; return null; } /** * The event object. * * @return the event object. */ public EventObject getEventObject() { return eventObject; } /** * Compares two ComparableEvtObj. * * @param o the object to compare with * @return 0, less than zero, greater than zero, if equals, less or greater. */ @Override public int compareTo(ComparableEvtObj o) { if (o == null || o.getTimestamp() == null) return 1; return o.getTimestamp().compareTo(getTimestamp()); } /** * Checks if equals, and if this event object is used to create a MessageSourceContact, if the * supplied <tt>Object</tt> is instance of MessageSourceContact. * * @param o the object to check. * @return <tt>true</tt> if equals. */ @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || (!(o instanceof MessageSourceContact) && getClass() != o.getClass())) return false; if (o instanceof ComparableEvtObj) { ComparableEvtObj that = (ComparableEvtObj) o; if (!address.equals(that.address)) return false; if (!ppService.equals(that.ppService)) return false; } else if (o instanceof MessageSourceContact) { MessageSourceContact that = (MessageSourceContact) o; if (!address.equals(that.getContactAddress())) return false; if (!ppService.equals(that.getProtocolProviderService())) return false; } else return false; return true; } @Override public int hashCode() { int result = address.hashCode(); result = 31 * result + ppService.hashCode(); return result; } } }
/** * A <tt>Recorder</tt> implementation which attaches to an <tt>RTPTranslator</tt>. * * @author Vladimir Marinov * @author Boris Grozev */ public class RecorderRtpImpl implements Recorder, ReceiveStreamListener, ActiveSpeakerChangedListener, ControllerListener { /** * The <tt>Logger</tt> used by the <tt>RecorderRtpImpl</tt> class and its instances for logging * output. */ private static final Logger logger = Logger.getLogger(RecorderRtpImpl.class); // values hard-coded to match chrome // TODO: allow to set them dynamically private static final byte redPayloadType = 116; private static final byte ulpfecPayloadType = 117; private static final byte vp8PayloadType = 100; private static final byte opusPayloadType = 111; private static final Format redFormat = new VideoFormat(Constants.RED); private static final Format ulpfecFormat = new VideoFormat(Constants.ULPFEC); private static final Format vp8RtpFormat = new VideoFormat(Constants.VP8_RTP); private static final Format vp8Format = new VideoFormat(Constants.VP8); private static final Format opusFormat = new AudioFormat(Constants.OPUS_RTP, 48000, Format.NOT_SPECIFIED, Format.NOT_SPECIFIED); private static final int FMJ_VIDEO_JITTER_BUFFER_MIN_SIZE = 300; /** The <tt>ContentDescriptor</tt> to use when saving audio. */ private static final ContentDescriptor AUDIO_CONTENT_DESCRIPTOR = new ContentDescriptor(FileTypeDescriptor.MPEG_AUDIO); /** The suffix for audio file names. */ private static final String AUDIO_FILENAME_SUFFIX = ".mp3"; /** The suffix for video file names. */ private static final String VIDEO_FILENAME_SUFFIX = ".webm"; static { Registry.set("video_jitter_buffer_MIN_SIZE", FMJ_VIDEO_JITTER_BUFFER_MIN_SIZE); } /** The <tt>RTPTranslator</tt> that this recorder is/will be attached to. */ private RTPTranslatorImpl translator; /** * The custom <tt>RTPConnector</tt> that this instance uses to read from {@link #translator} and * write to {@link #rtpManager}. */ private RTPConnectorImpl rtpConnector; /** Path to the directory where the output files will be stored. */ private String path; /** The <tt>RTCPFeedbackMessageSender</tt> that we use to send RTCP FIR messages. */ private RTCPFeedbackMessageSender rtcpFeedbackSender; /** * The {@link RTPManager} instance we use to handle the packets coming from * <tt>RTPTranslator</tt>. */ private RTPManager rtpManager; /** * The instance which should be notified when events related to recordings (such as the start or * end of a recording) occur. */ private RecorderEventHandlerImpl eventHandler; /** * Holds the <tt>ReceiveStreams</tt> added to this instance by {@link #rtpManager} and additional * information associated with each one (e.g. the <tt>Processor</tt>, if any, used for it). */ private final HashSet<ReceiveStreamDesc> receiveStreams = new HashSet<ReceiveStreamDesc>(); private final Set<Long> activeVideoSsrcs = new HashSet<Long>(); /** * The <tt>ActiveSpeakerDetector</tt> which will listen to the audio receive streams of this * <tt>RecorderRtpImpl</tt> and notify it about changes to the active speaker via calls to {@link * #activeSpeakerChanged(long)} */ private ActiveSpeakerDetector activeSpeakerDetector = null; StreamRTPManager streamRTPManager; private SynchronizerImpl synchronizer; private boolean started = false; /** * Constructor. * * @param translator the <tt>RTPTranslator</tt> to which this instance will attach in order to * record media. */ public RecorderRtpImpl(RTPTranslator translator) { this.translator = (RTPTranslatorImpl) translator; activeSpeakerDetector = new ActiveSpeakerDetectorImpl(); activeSpeakerDetector.addActiveSpeakerChangedListener(this); } /** Implements {@link Recorder#addListener(Recorder.Listener)}. */ @Override public void addListener(Listener listener) {} /** Implements {@link Recorder#removeListener(Recorder.Listener)}. */ @Override public void removeListener(Listener listener) {} /** Implements {@link Recorder#getSupportedFormats()}. */ @Override public List<String> getSupportedFormats() { return null; } /** Implements {@link Recorder#setMute(boolean)}. */ @Override public void setMute(boolean mute) {} /** * Implements {@link Recorder#getFilename()}. Returns null, since we don't have a (single) * associated filename. */ @Override public String getFilename() { return null; } /** * Sets the instance which should be notified when events related to recordings (such as the start * or end of a recording) occur. */ public void setEventHandler(RecorderEventHandler eventHandler) { if (this.eventHandler == null || (this.eventHandler != eventHandler && this.eventHandler.handler != eventHandler)) { if (this.eventHandler == null) this.eventHandler = new RecorderEventHandlerImpl(eventHandler); else this.eventHandler.handler = eventHandler; } } /** * {@inheritDoc} * * @param format unused, since this implementation records multiple streams using potentially * different formats. * @param dirname the path to the directory into which this <tt>Recorder</tt> will store the * recorded media files. */ @Override public void start(String format, String dirname) throws IOException, MediaException { if (logger.isInfoEnabled()) logger.info("Starting, format=" + format + " " + hashCode()); path = dirname; MediaService mediaService = LibJitsi.getMediaService(); /* * Note that we use only one RTPConnector for both the RTPTranslator * and the RTPManager instances. The this.translator will write to its * output streams, and this.rtpManager will read from its input streams. */ rtpConnector = new RTPConnectorImpl(redPayloadType, ulpfecPayloadType); rtpManager = RTPManager.newInstance(); /* * Add the formats that we know about. */ rtpManager.addFormat(vp8RtpFormat, vp8PayloadType); rtpManager.addFormat(opusFormat, opusPayloadType); rtpManager.addReceiveStreamListener(this); /* * Note: When this.rtpManager sends RTCP sender/receiver reports, they * will end up being written to its own input stream. This is not * expected to cause problems, but might be something to keep an eye on. */ rtpManager.initialize(rtpConnector); /* * Register a fake call participant. * TODO: can we use a more generic MediaStream here? */ streamRTPManager = new StreamRTPManager( mediaService.createMediaStream( new MediaDeviceImpl(new CaptureDeviceInfo(), MediaType.VIDEO)), translator); streamRTPManager.initialize(rtpConnector); rtcpFeedbackSender = translator.getRtcpFeedbackMessageSender(); translator.addFormat(streamRTPManager, opusFormat, opusPayloadType); // ((RTPTranslatorImpl)videoRTPTranslator).addFormat(streamRTPManager, redFormat, // redPayloadType); // ((RTPTranslatorImpl)videoRTPTranslator).addFormat(streamRTPManager, ulpfecFormat, // ulpfecPayloadType); // ((RTPTranslatorImpl)videoRTPTranslator).addFormat(streamRTPManager, // mediaFormatImpl.getFormat(), vp8PayloadType); started = true; } @Override public void stop() { if (started) { if (logger.isInfoEnabled()) logger.info("Stopping " + hashCode()); // remove the recorder from the translator (e.g. stop new packets from // being written to rtpConnector if (streamRTPManager != null) streamRTPManager.dispose(); HashSet<ReceiveStreamDesc> streamsToRemove = new HashSet<ReceiveStreamDesc>(); synchronized (receiveStreams) { streamsToRemove.addAll(receiveStreams); } for (ReceiveStreamDesc r : streamsToRemove) removeReceiveStream(r, false); rtpConnector.rtcpPacketTransformer.close(); rtpConnector.rtpPacketTransformer.close(); rtpManager.dispose(); started = false; } } /** * Implements {@link ReceiveStreamListener#update(ReceiveStreamEvent)}. * * <p>{@link #rtpManager} will use this to notify us of <tt>ReceiveStreamEvent</tt>s. */ @Override public void update(ReceiveStreamEvent event) { if (event == null) return; ReceiveStream receiveStream = event.getReceiveStream(); if (event instanceof NewReceiveStreamEvent) { if (receiveStream == null) { logger.warn("NewReceiveStreamEvent: null"); return; } final long ssrc = getReceiveStreamSSRC(receiveStream); ReceiveStreamDesc receiveStreamDesc = findReceiveStream(ssrc); if (receiveStreamDesc != null) { String s = "NewReceiveStreamEvent for an existing SSRC. "; if (receiveStream != receiveStreamDesc.receiveStream) s += "(but different ReceiveStream object)"; logger.warn(s); return; } else receiveStreamDesc = new ReceiveStreamDesc(receiveStream); if (logger.isInfoEnabled()) logger.info("New ReceiveStream, ssrc=" + ssrc); // Find the format of the ReceiveStream DataSource dataSource = receiveStream.getDataSource(); if (dataSource instanceof PushBufferDataSource) { Format format = null; PushBufferDataSource pbds = (PushBufferDataSource) dataSource; for (PushBufferStream pbs : pbds.getStreams()) { if ((format = pbs.getFormat()) != null) break; } if (format == null) { logger.error("Failed to handle new ReceiveStream: " + "Failed to determine format"); return; } receiveStreamDesc.format = format; } else { logger.error("Failed to handle new ReceiveStream: " + "Unsupported DataSource"); return; } int rtpClockRate = -1; if (receiveStreamDesc.format instanceof AudioFormat) rtpClockRate = (int) ((AudioFormat) receiveStreamDesc.format).getSampleRate(); else if (receiveStreamDesc.format instanceof VideoFormat) rtpClockRate = 90000; getSynchronizer().setRtpClockRate(ssrc, rtpClockRate); // create a Processor and configure it Processor processor = null; try { processor = Manager.createProcessor(receiveStream.getDataSource()); } catch (NoProcessorException npe) { logger.error("Failed to create Processor: ", npe); return; } catch (IOException ioe) { logger.error("Failed to create Processor: ", ioe); return; } if (logger.isInfoEnabled()) logger.info("Created processor for SSRC=" + ssrc); processor.addControllerListener(this); receiveStreamDesc.processor = processor; final int streamCount; synchronized (receiveStreams) { receiveStreams.add(receiveStreamDesc); streamCount = receiveStreams.size(); } /* * XXX TODO IRBABOON * This is a terrible hack which works around a failure to realize() * some of the Processor-s for audio streams, when multiple streams * start nearly simultaneously. The cause of the problem is currently * unknown (and synchronizing all FMJ calls in RecorderRtpImpl * does not help). * XXX TODO NOOBABRI */ if (receiveStreamDesc.format instanceof AudioFormat) { final Processor p = processor; new Thread() { @Override public void run() { // delay configuring the processors for the different // audio streams to decrease the probability that they // run together. try { int ms = 450 * (streamCount - 1); logger.warn( "Sleeping for " + ms + "ms before" + " configuring processor for SSRC=" + ssrc + " " + System.currentTimeMillis()); Thread.sleep(ms); } catch (Exception e) { } p.configure(); } }.run(); } else { processor.configure(); } } else if (event instanceof TimeoutEvent) { if (receiveStream == null) { // TODO: we might want to get the list of ReceiveStream-s from // rtpManager and compare it to our list, to see if we should // remove a stream. logger.warn("TimeoutEvent: null."); return; } // FMJ silently creates new ReceiveStream instances, so we have to // recognize them by the SSRC. ReceiveStreamDesc receiveStreamDesc = findReceiveStream(getReceiveStreamSSRC(receiveStream)); if (receiveStreamDesc != null) { if (logger.isInfoEnabled()) { logger.info("ReceiveStream timeout, ssrc=" + receiveStreamDesc.ssrc); } removeReceiveStream(receiveStreamDesc, true); } } else if (event != null && logger.isInfoEnabled()) { logger.info("Unhandled ReceiveStreamEvent (" + event.getClass().getName() + "): " + event); } } private void removeReceiveStream(ReceiveStreamDesc receiveStream, boolean emptyJB) { if (receiveStream.format instanceof VideoFormat) { rtpConnector.packetBuffer.disable(receiveStream.ssrc); emptyPacketBuffer(receiveStream.ssrc); } if (receiveStream.dataSink != null) { try { receiveStream.dataSink.stop(); } catch (IOException e) { logger.error("Failed to stop DataSink " + e); } receiveStream.dataSink.close(); } if (receiveStream.processor != null) { receiveStream.processor.stop(); receiveStream.processor.close(); } DataSource dataSource = receiveStream.receiveStream.getDataSource(); if (dataSource != null) { try { dataSource.stop(); } catch (IOException ioe) { logger.warn("Failed to stop DataSource"); } dataSource.disconnect(); } synchronized (receiveStreams) { receiveStreams.remove(receiveStream); } } /** * Implements {@link ControllerListener#controllerUpdate(ControllerEvent)}. Handles events from * the <tt>Processor</tt>s that this instance uses to transcode media. * * @param ev the event to handle. */ public void controllerUpdate(ControllerEvent ev) { if (ev == null || ev.getSourceController() == null) { return; } Processor processor = (Processor) ev.getSourceController(); ReceiveStreamDesc desc = findReceiveStream(processor); if (desc == null) { logger.warn("Event from an orphaned processor, ignoring: " + ev); return; } if (ev instanceof ConfigureCompleteEvent) { if (logger.isInfoEnabled()) { logger.info( "Configured processor for ReceiveStream ssrc=" + desc.ssrc + " (" + desc.format + ")" + " " + System.currentTimeMillis()); } boolean audio = desc.format instanceof AudioFormat; if (audio) { ContentDescriptor cd = processor.setContentDescriptor(AUDIO_CONTENT_DESCRIPTOR); if (!AUDIO_CONTENT_DESCRIPTOR.equals(cd)) { logger.error( "Failed to set the Processor content " + "descriptor to " + AUDIO_CONTENT_DESCRIPTOR + ". Actual result: " + cd); removeReceiveStream(desc, false); return; } } for (TrackControl track : processor.getTrackControls()) { Format trackFormat = track.getFormat(); if (audio) { final long ssrc = desc.ssrc; SilenceEffect silenceEffect; if (Constants.OPUS_RTP.equals(desc.format.getEncoding())) { silenceEffect = new SilenceEffect(48000); } else { // We haven't tested that the RTP timestamps survive // the journey through the chain when codecs other than // opus are in use, so for the moment we rely on FMJ's // timestamps for non-opus formats. silenceEffect = new SilenceEffect(); } silenceEffect.setListener( new SilenceEffect.Listener() { boolean first = true; @Override public void onSilenceNotInserted(long timestamp) { if (first) { first = false; // send event only audioRecordingStarted(ssrc, timestamp); } else { // change file and send event resetRecording(ssrc, timestamp); } } }); desc.silenceEffect = silenceEffect; AudioLevelEffect audioLevelEffect = new AudioLevelEffect(); audioLevelEffect.setAudioLevelListener( new SimpleAudioLevelListener() { @Override public void audioLevelChanged(int level) { activeSpeakerDetector.levelChanged(ssrc, level); } }); try { // We add an effect, which will insert "silence" in // place of lost packets. track.setCodecChain(new Codec[] {silenceEffect, audioLevelEffect}); } catch (UnsupportedPlugInException upie) { logger.warn("Failed to insert silence effect: " + upie); // But do go on, a recording without extra silence is // better than nothing ;) } } else { // transcode vp8/rtp to vp8 (i.e. depacketize vp8) if (trackFormat.matches(vp8RtpFormat)) track.setFormat(vp8Format); else { logger.error("Unsupported track format: " + trackFormat + " for ssrc=" + desc.ssrc); // we currently only support vp8 removeReceiveStream(desc, false); return; } } } processor.realize(); } else if (ev instanceof RealizeCompleteEvent) { desc.dataSource = processor.getDataOutput(); long ssrc = desc.ssrc; boolean audio = desc.format instanceof AudioFormat; String suffix = audio ? AUDIO_FILENAME_SUFFIX : VIDEO_FILENAME_SUFFIX; // XXX '\' on windows? String filename = getNextFilename(path + "/" + ssrc, suffix); desc.filename = filename; DataSink dataSink; if (audio) { try { dataSink = Manager.createDataSink(desc.dataSource, new MediaLocator("file:" + filename)); } catch (NoDataSinkException ndse) { logger.error("Could not create DataSink: " + ndse); removeReceiveStream(desc, false); return; } } else { dataSink = new WebmDataSink(filename, desc.dataSource); } if (logger.isInfoEnabled()) logger.info( "Created DataSink (" + dataSink + ") for SSRC=" + ssrc + ". Output filename: " + filename); try { dataSink.open(); } catch (IOException e) { logger.error("Failed to open DataSink (" + dataSink + ") for" + " SSRC=" + ssrc + ": " + e); removeReceiveStream(desc, false); return; } if (!audio) { final WebmDataSink webmDataSink = (WebmDataSink) dataSink; webmDataSink.setSsrc(ssrc); webmDataSink.setEventHandler(eventHandler); webmDataSink.setKeyFrameControl( new KeyFrameControlAdapter() { @Override public boolean requestKeyFrame(boolean urgent) { return requestFIR(webmDataSink); } }); } try { dataSink.start(); } catch (IOException e) { logger.error( "Failed to start DataSink (" + dataSink + ") for" + " SSRC=" + ssrc + ". " + e); removeReceiveStream(desc, false); return; } if (logger.isInfoEnabled()) logger.info("Started DataSink for SSRC=" + ssrc); desc.dataSink = dataSink; processor.start(); } else if (logger.isDebugEnabled()) { logger.debug( "Unhandled ControllerEvent from the Processor for ssrc=" + desc.ssrc + ": " + ev); } } /** * Restarts the recording for a specific SSRC. * * @param ssrc the SSRC for which to restart recording. RTP packet of the new recording). */ private void resetRecording(long ssrc, long timestamp) { ReceiveStreamDesc receiveStream = findReceiveStream(ssrc); // we only restart audio recordings if (receiveStream != null && receiveStream.format instanceof AudioFormat) { String newFilename = getNextFilename(path + "/" + ssrc, AUDIO_FILENAME_SUFFIX); // flush the buffer contained in the MP3 encoder String s = "trying to flush ssrc=" + ssrc; Processor p = receiveStream.processor; if (p != null) { s += " p!=null"; for (TrackControl tc : p.getTrackControls()) { Object o = tc.getControl(FlushableControl.class.getName()); if (o != null) ((FlushableControl) o).flush(); } } if (logger.isInfoEnabled()) { logger.info("Restarting recording for SSRC=" + ssrc + ". New filename: " + newFilename); } receiveStream.dataSink.close(); receiveStream.dataSink = null; // flush the FMJ jitter buffer // DataSource ds = receiveStream.receiveStream.getDataSource(); // if (ds instanceof net.sf.fmj.media.protocol.rtp.DataSource) // ((net.sf.fmj.media.protocol.rtp.DataSource)ds).flush(); receiveStream.filename = newFilename; try { receiveStream.dataSink = Manager.createDataSink( receiveStream.dataSource, new MediaLocator("file:" + newFilename)); } catch (NoDataSinkException ndse) { logger.warn("Could not reset recording for SSRC=" + ssrc + ": " + ndse); removeReceiveStream(receiveStream, false); } try { receiveStream.dataSink.open(); receiveStream.dataSink.start(); } catch (IOException ioe) { logger.warn("Could not reset recording for SSRC=" + ssrc + ": " + ioe); removeReceiveStream(receiveStream, false); } audioRecordingStarted(ssrc, timestamp); } } private void audioRecordingStarted(long ssrc, long timestamp) { ReceiveStreamDesc desc = findReceiveStream(ssrc); if (desc == null) return; RecorderEvent event = new RecorderEvent(); event.setType(RecorderEvent.Type.RECORDING_STARTED); event.setMediaType(MediaType.AUDIO); event.setSsrc(ssrc); event.setRtpTimestamp(timestamp); event.setFilename(desc.filename); if (eventHandler != null) eventHandler.handleEvent(event); } /** * Handles a request from a specific <tt>DataSink</tt> to request a keyframe by sending an RTCP * feedback FIR message to the media source. * * @param dataSink the <tt>DataSink</tt> which requests that a keyframe be requested with a FIR * message. * @return <tt>true</tt> if a keyframe was successfully requested, <tt>false</tt> otherwise */ private boolean requestFIR(WebmDataSink dataSink) { ReceiveStreamDesc desc = findReceiveStream(dataSink); if (desc != null && rtcpFeedbackSender != null) { return rtcpFeedbackSender.sendFIR((int) desc.ssrc); } return false; } /** * Returns "prefix"+"suffix" if the file with this name does not exist. Otherwise, returns the * first inexistant filename of the form "prefix-"+i+"suffix", for an integer i. i is bounded by * 100 to prevent hanging, and on failure to find an inexistant filename the method will return * null. * * @param prefix * @param suffix * @return */ private String getNextFilename(String prefix, String suffix) { if (!new File(prefix + suffix).exists()) return prefix + suffix; int i = 1; String s; do { s = prefix + "-" + i + suffix; if (!new File(s).exists()) return s; i++; } while (i < 1000); // don't hang indefinitely... return null; } /** * Finds the <tt>ReceiveStreamDesc</tt> with a particular <tt>Processor</tt> * * @param processor The <tt>Processor</tt> to match. * @return the <tt>ReceiveStreamDesc</tt> with a particular <tt>Processor</tt>, or <tt>null</tt>. */ private ReceiveStreamDesc findReceiveStream(Processor processor) { if (processor == null) return null; synchronized (receiveStreams) { for (ReceiveStreamDesc r : receiveStreams) if (processor.equals(r.processor)) return r; } return null; } /** * Finds the <tt>ReceiveStreamDesc</tt> with a particular <tt>DataSink</tt> * * @param dataSink The <tt>DataSink</tt> to match. * @return the <tt>ReceiveStreamDesc</tt> with a particular <tt>DataSink</tt>, or <tt>null</tt>. */ private ReceiveStreamDesc findReceiveStream(DataSink dataSink) { if (dataSink == null) return null; synchronized (receiveStreams) { for (ReceiveStreamDesc r : receiveStreams) if (dataSink.equals(r.dataSink)) return r; } return null; } /** * Finds the <tt>ReceiveStreamDesc</tt> with a particular SSRC. * * @param ssrc The SSRC to match. * @return the <tt>ReceiveStreamDesc</tt> with a particular SSRC, or <tt>null</tt>. */ private ReceiveStreamDesc findReceiveStream(long ssrc) { synchronized (receiveStreams) { for (ReceiveStreamDesc r : receiveStreams) if (ssrc == r.ssrc) return r; } return null; } /** * Gets the SSRC of a <tt>ReceiveStream</tt> as a (non-negative) <tt>long</tt>. * * <p>FMJ stores the 32-bit SSRC values in <tt>int</tt>s, and the <tt>ReceiveStream.getSSRC()</tt> * implementation(s) don't take care of converting the negative <tt>int</tt> values sometimes * resulting from reading of a 32-bit field into the correct unsigned <tt>long</tt> value. So do * the conversion here. * * @param receiveStream the <tt>ReceiveStream</tt> for which to get the SSRC. * @return the SSRC of <tt>receiveStream</tt> an a (non-negative) <tt>long</tt>. */ private long getReceiveStreamSSRC(ReceiveStream receiveStream) { return 0xffffffffL & receiveStream.getSSRC(); } /** * Implements {@link ActiveSpeakerChangedListener#activeSpeakerChanged(long)}. Notifies this * <tt>RecorderRtpImpl</tt> that the audio <tt>ReceiveStream</tt> considered active has changed, * and that the new active stream has SSRC <tt>ssrc</tt>. * * @param ssrc the SSRC of the new active stream. */ @Override public void activeSpeakerChanged(long ssrc) { if (eventHandler != null) { RecorderEvent e = new RecorderEvent(); e.setAudioSsrc(ssrc); // TODO: how do we time this? e.setInstant(System.currentTimeMillis()); e.setType(RecorderEvent.Type.SPEAKER_CHANGED); e.setMediaType(MediaType.VIDEO); eventHandler.handleEvent(e); } } private void handleRtpPacket(RawPacket pkt) { if (pkt != null && pkt.getPayloadType() == vp8PayloadType) { int ssrc = pkt.getSSRC(); if (!activeVideoSsrcs.contains(ssrc & 0xffffffffL)) { synchronized (activeVideoSsrcs) { if (!activeVideoSsrcs.contains(ssrc & 0xffffffffL)) { activeVideoSsrcs.add(ssrc & 0xffffffffL); rtcpFeedbackSender.sendFIR(ssrc); } } } } } private void handleRtcpPacket(RawPacket pkt) { getSynchronizer().addRTCPPacket(pkt); eventHandler.nudge(); } public SynchronizerImpl getSynchronizer() { if (synchronizer == null) synchronizer = new SynchronizerImpl(); return synchronizer; } public void setSynchronizer(Synchronizer synchronizer) { if (synchronizer instanceof SynchronizerImpl) { this.synchronizer = (SynchronizerImpl) synchronizer; } } public void connect(Recorder recorder) { if (!(recorder instanceof RecorderRtpImpl)) return; ((RecorderRtpImpl) recorder).setSynchronizer(getSynchronizer()); } private void emptyPacketBuffer(long ssrc) { RawPacket[] pkts = rtpConnector.packetBuffer.emptyBuffer(ssrc); RTPConnectorImpl.OutputDataStreamImpl dataStream; try { dataStream = rtpConnector.getDataOutputStream(); } catch (IOException ioe) { logger.error("Failed to empty packet buffer for SSRC=" + ssrc + ": " + ioe); return; } for (RawPacket pkt : pkts) dataStream.write( pkt.getBuffer(), pkt.getOffset(), pkt.getLength(), false /* already transformed */); } /** The <tt>RTPConnector</tt> implementation used by this <tt>RecorderRtpImpl</tt>. */ private class RTPConnectorImpl implements RTPConnector { private PushSourceStreamImpl controlInputStream; private OutputDataStreamImpl controlOutputStream; private PushSourceStreamImpl dataInputStream; private OutputDataStreamImpl dataOutputStream; private SourceTransferHandler dataTransferHandler; private SourceTransferHandler controlTransferHandler; private RawPacket pendingDataPacket = new RawPacket(); private RawPacket pendingControlPacket = new RawPacket(); private PacketTransformer rtpPacketTransformer = null; private PacketTransformer rtcpPacketTransformer = null; /** The PacketBuffer instance which we use as a jitter buffer. */ private PacketBuffer packetBuffer; private RTPConnectorImpl(byte redPT, byte ulpfecPT) { packetBuffer = new PacketBuffer(); // The chain of transformers will be applied in reverse order for // incoming packets. TransformEngine transformEngine = new TransformEngineChain( new TransformEngine[] { packetBuffer, new TransformEngineImpl(), new CompoundPacketEngine(), new FECTransformEngine(ulpfecPT, (byte) -1), new REDTransformEngine(redPT, (byte) -1) }); rtpPacketTransformer = transformEngine.getRTPTransformer(); rtcpPacketTransformer = transformEngine.getRTCPTransformer(); } private RTPConnectorImpl() {} @Override public void close() { try { if (dataOutputStream != null) dataOutputStream.close(); if (controlOutputStream != null) controlOutputStream.close(); } catch (IOException ioe) { throw new UndeclaredThrowableException(ioe); } } @Override public PushSourceStream getControlInputStream() throws IOException { if (controlInputStream == null) { controlInputStream = new PushSourceStreamImpl(true); } return controlInputStream; } @Override public OutputDataStream getControlOutputStream() throws IOException { if (controlOutputStream == null) { controlOutputStream = new OutputDataStreamImpl(true); } return controlOutputStream; } @Override public PushSourceStream getDataInputStream() throws IOException { if (dataInputStream == null) { dataInputStream = new PushSourceStreamImpl(false); } return dataInputStream; } @Override public OutputDataStreamImpl getDataOutputStream() throws IOException { if (dataOutputStream == null) { dataOutputStream = new OutputDataStreamImpl(false); } return dataOutputStream; } @Override public double getRTCPBandwidthFraction() { return -1; } @Override public double getRTCPSenderBandwidthFraction() { return -1; } @Override public int getReceiveBufferSize() { // TODO Auto-generated method stub return 0; } @Override public int getSendBufferSize() { // TODO Auto-generated method stub return 0; } @Override public void setReceiveBufferSize(int arg0) throws IOException { // TODO Auto-generated method stub } @Override public void setSendBufferSize(int arg0) throws IOException { // TODO Auto-generated method stub } private class OutputDataStreamImpl implements OutputDataStream { boolean isControlStream; private RawPacket[] rawPacketArray = new RawPacket[1]; public OutputDataStreamImpl(boolean isControlStream) { this.isControlStream = isControlStream; } public int write(byte[] buffer, int offset, int length) { return write(buffer, offset, length, true); } public int write(byte[] buffer, int offset, int length, boolean transform) { RawPacket pkt = rawPacketArray[0]; if (pkt == null) pkt = new RawPacket(); rawPacketArray[0] = pkt; byte[] pktBuf = pkt.getBuffer(); if (pktBuf == null || pktBuf.length < length) { pktBuf = new byte[length]; pkt.setBuffer(pktBuf); } System.arraycopy(buffer, offset, pktBuf, 0, length); pkt.setOffset(0); pkt.setLength(length); if (transform) { PacketTransformer packetTransformer = isControlStream ? rtcpPacketTransformer : rtpPacketTransformer; if (packetTransformer != null) rawPacketArray = packetTransformer.reverseTransform(rawPacketArray); } SourceTransferHandler transferHandler; PushSourceStream pushSourceStream; try { if (isControlStream) { transferHandler = controlTransferHandler; pushSourceStream = getControlInputStream(); } else { transferHandler = dataTransferHandler; pushSourceStream = getDataInputStream(); } } catch (IOException ioe) { throw new UndeclaredThrowableException(ioe); } for (int i = 0; i < rawPacketArray.length; i++) { RawPacket packet = rawPacketArray[i]; // keep the first element for reuse if (i != 0) rawPacketArray[i] = null; if (packet != null) { if (isControlStream) pendingControlPacket = packet; else pendingDataPacket = packet; if (transferHandler != null) { transferHandler.transferData(pushSourceStream); } } } return length; } public void close() throws IOException {} } /** * A dummy implementation of {@link PushSourceStream}. * * @author Vladimir Marinov */ private class PushSourceStreamImpl implements PushSourceStream { private boolean isControlStream = false; public PushSourceStreamImpl(boolean isControlStream) { this.isControlStream = isControlStream; } /** Not implemented because there are currently no uses of the underlying functionality. */ @Override public boolean endOfStream() { return false; } /** Not implemented because there are currently no uses of the underlying functionality. */ @Override public ContentDescriptor getContentDescriptor() { return null; } /** Not implemented because there are currently no uses of the underlying functionality. */ @Override public long getContentLength() { return 0; } /** Not implemented because there are currently no uses of the underlying functionality. */ @Override public Object getControl(String arg0) { return null; } /** Not implemented because there are currently no uses of the underlying functionality. */ @Override public Object[] getControls() { return null; } /** Not implemented because there are currently no uses of the underlying functionality. */ @Override public int getMinimumTransferSize() { if (isControlStream) { if (pendingControlPacket.getBuffer() != null) { return pendingControlPacket.getLength(); } } else { if (pendingDataPacket.getBuffer() != null) { return pendingDataPacket.getLength(); } } return 0; } @Override public int read(byte[] buffer, int offset, int length) throws IOException { RawPacket pendingPacket; if (isControlStream) { pendingPacket = pendingControlPacket; } else { pendingPacket = pendingDataPacket; } int bytesToRead = 0; byte[] pendingPacketBuffer = pendingPacket.getBuffer(); if (pendingPacketBuffer != null) { int pendingPacketLength = pendingPacket.getLength(); bytesToRead = length > pendingPacketLength ? pendingPacketLength : length; System.arraycopy( pendingPacketBuffer, pendingPacket.getOffset(), buffer, offset, bytesToRead); } return bytesToRead; } /** * {@inheritDoc} * * <p>We keep the first non-null <tt>SourceTransferHandler</tt> that was set, because we don't * want it to be overwritten when we initialize a second <tt>RTPManager</tt> with this * <tt>RTPConnector</tt>. * * <p>See {@link RecorderRtpImpl#start(String, String)} */ @Override public void setTransferHandler(SourceTransferHandler transferHandler) { if (isControlStream) { if (RTPConnectorImpl.this.controlTransferHandler == null) { RTPConnectorImpl.this.controlTransferHandler = transferHandler; } } else { if (RTPConnectorImpl.this.dataTransferHandler == null) { RTPConnectorImpl.this.dataTransferHandler = transferHandler; } } } } /** * A transform engine implementation which allows <tt>RecorderRtpImpl</tt> to intercept RTP and * RTCP packets in. */ private class TransformEngineImpl implements TransformEngine { SinglePacketTransformer rtpTransformer = new SinglePacketTransformer() { @Override public RawPacket transform(RawPacket pkt) { return pkt; } @Override public RawPacket reverseTransform(RawPacket pkt) { RecorderRtpImpl.this.handleRtpPacket(pkt); return pkt; } @Override public void close() {} }; SinglePacketTransformer rtcpTransformer = new SinglePacketTransformer() { @Override public RawPacket transform(RawPacket pkt) { return pkt; } @Override public RawPacket reverseTransform(RawPacket pkt) { RecorderRtpImpl.this.handleRtcpPacket(pkt); if (pkt != null && pkt.getRTCPPayloadType() == 203) { // An RTCP BYE packet. Remove the receive stream before // it gets to FMJ, because we want to, for example, // flush the packet buffer before that. long ssrc = pkt.getRTCPSSRC() & 0xffffffffl; if (logger.isInfoEnabled()) logger.info("RTCP BYE for SSRC=" + ssrc); ReceiveStreamDesc receiveStream = findReceiveStream(ssrc); if (receiveStream != null) removeReceiveStream(receiveStream, false); } return pkt; } @Override public void close() {} }; @Override public PacketTransformer getRTPTransformer() { return rtpTransformer; } @Override public PacketTransformer getRTCPTransformer() { return rtcpTransformer; } } } private class RecorderEventHandlerImpl implements RecorderEventHandler { private RecorderEventHandler handler; private final Set<RecorderEvent> pendingEvents = new HashSet<RecorderEvent>(); private RecorderEventHandlerImpl(RecorderEventHandler handler) { this.handler = handler; } @Override public boolean handleEvent(RecorderEvent ev) { if (ev == null) return true; if (RecorderEvent.Type.RECORDING_STARTED.equals(ev.getType())) { long instant = getSynchronizer().getLocalTime(ev.getSsrc(), ev.getRtpTimestamp()); if (instant != -1) { ev.setInstant(instant); return handler.handleEvent(ev); } else { pendingEvents.add(ev); return true; } } return handler.handleEvent(ev); } private void nudge() { for (Iterator<RecorderEvent> iter = pendingEvents.iterator(); iter.hasNext(); ) { RecorderEvent ev = iter.next(); long instant = getSynchronizer().getLocalTime(ev.getSsrc(), ev.getRtpTimestamp()); if (instant != -1) { iter.remove(); ev.setInstant(instant); handler.handleEvent(ev); } } } @Override public void close() { for (RecorderEvent ev : pendingEvents) handler.handleEvent(ev); } } /** Represents a <tt>ReceiveStream</tt> for the purposes of this <tt>RecorderRtpImpl</tt>. */ private class ReceiveStreamDesc { /** * The actual <tt>ReceiveStream</tt> which is represented by this <tt>ReceiveStreamDesc</tt>. */ private ReceiveStream receiveStream; /** The SSRC of the stream. */ long ssrc; /** * The <tt>Processor</tt> used to transcode this receive stream into a format appropriate for * saving to a file. */ private Processor processor; /** The <tt>DataSink</tt> which saves the <tt>this.dataSource</tt> to a file. */ private DataSink dataSink; /** * The <tt>DataSource</tt> for this receive stream which is to be saved using a * <tt>DataSink</tt> (i.e. the <tt>DataSource</tt> "after" all needed transcoding is done). */ private DataSource dataSource; /** The name of the file into which this stream is being saved. */ private String filename; /** The (original) format of this receive stream. */ private Format format; /** The <tt>SilenceEffect</tt> used for this stream (for audio streams only). */ private SilenceEffect silenceEffect; private ReceiveStreamDesc(ReceiveStream receiveStream) { this.receiveStream = receiveStream; this.ssrc = getReceiveStreamSSRC(receiveStream); } } }
/** * This class is used to keep track of stats of all the streams of all the fake users * (<tt>FakeUser</tt>), generate new new stats, writes stats to files, print them etc... */ public class HammerStats implements Runnable { /** * The <tt>Logger</tt> used by the <tt>HammerStats</tt> class and its instances for logging * output. */ private static final Logger logger = Logger.getLogger(HammerStats.class); /** A boolean used to stop the run method of this <tt>HammerStats</tt>. */ private boolean threadStop = false; /** The name (not the path or location) of the directory where the stats files will be written. */ private static final String STATS_DIR_NAME = "stats"; /** * The path to the stats directory. All stats will be written in files located in this directory. */ private final String statsDirectoryPath; /** The file that will contain the overall stats */ private final File overallStatsFile; /** The file that will contain all the stats recorded by run() */ private final File allStatsFile; /** * An <tt>List</tt> of <tt>FakeUserStats</tt> that contains the <tt>MediaStreamStats</tt>s of the * <tt>FakeUser</tt>. It is used to keep track of the streams' stats. */ private final ArrayList<FakeUserStats> fakeUserStatsList = new ArrayList<FakeUserStats>(); /** The time (in seconds) the HammerStats wait between two updates. */ private int timeBetweenUpdate = 5; /** The boolean used to know if the logging of all the stats in the run method is enable. */ private boolean allStatsLogging = false; /** * The boolean used to know if the logging of the summary stats (like mean, standard deviation, * min, max...) computed at each polling from all the streams' stats is enable of not. */ private boolean summaryStatsLogging = false; /** * The boolean used to know if the logging of the overall stats (like mean, standard deviation, * min, max...) computed from all the streams' stats collected is enable of not. */ private boolean overallStatsLogging; /** The HammerSummaryStats used to compute summary stats from the audio streams' stats. */ HammerSummaryStats audioSummaryStats = new HammerSummaryStats(); /** The HammerSummaryStats used to compute summary stats from the video streams' stats. */ HammerSummaryStats videoSummaryStats = new HammerSummaryStats(); /** Initialize an instance of a <tt>HammerStats</tt> with the default stats directory path. */ public HammerStats() { /* this( System.getProperty(Main.PNAME_SC_HOME_DIR_LOCATION) + File.separator + System.getProperty(Main.PNAME_SC_HOME_DIR_NAME) + File.separator + HammerStats.STATS_DIR_NAME); */ this(System.getProperty("user.home") + "/Desktop/stats"); } /** * Initialize an instance of a <tt>HammerStats</tt> with a custom stats directory path. * * @param statsDirectoryPath the path to the stats directory, where the stats files will be saved. */ public HammerStats(String statsDirectoryPath) { this.statsDirectoryPath = statsDirectoryPath + File.separator + new SimpleDateFormat("yyyy-MM-dd' 'HH'h'mm'm'ss's'").format(new Date()); System.out.println("Stats directory: " + statsDirectoryPath); this.overallStatsFile = new File(this.statsDirectoryPath + File.separator + "overallStats.json"); this.allStatsFile = new File(this.statsDirectoryPath + File.separator + "AllAndSummaryStats.json"); logger.info("Stats directory : " + this.statsDirectoryPath); } /** * Add a <tt>FakeUserStats</tt> to the list this <tt>HammerStats</tt> is watching * * @param fakeUserStats the <tt>FakeUserStats</tt> that will added. */ public synchronized void addFakeUsersStats(FakeUserStats fakeUserStats) { if (fakeUserStats == null) { throw new NullPointerException("FakeUserStats can't be null"); } fakeUserStatsList.add(fakeUserStats); } /** * Keep track, collect and update the stats of all the <tt>MediaStreamStats</tt> this * <tt>HammerStats</tt> handles. * * <p>Also write the results in the stats files. */ public void run() { PrintWriter writer = null; StringBuilder allBldr = new StringBuilder(); String delim; String delim_ = ""; synchronized (this) { threadStop = false; } logger.info("Running the main loop"); System.out.println("Inside HammerStats run method.\n"); while (!threadStop) { synchronized (this) { if (overallStatsLogging || allStatsLogging || summaryStatsLogging) { if (allStatsLogging || summaryStatsLogging) { if (writer == null) { try { writer = new PrintWriter(allStatsFile, "UTF-8"); writer.print("[\n"); } catch (FileNotFoundException e) { logger.fatal("HammerStats stopping due to FileNotFound", e); stop(); } catch (UnsupportedEncodingException e) { logger.fatal("HammerStats stopping due to " + "UnsupportedEncoding", e); } } // Clear the StringBuilder allBldr.setLength(0); writer.print(delim_ + '\n'); delim_ = ","; writer.print("{\n"); writer.print(" \"timestamp\":" + System.currentTimeMillis() + ",\n"); } delim = ""; logger.info("Updating the MediaStreamStats"); for (FakeUserStats stats : fakeUserStatsList) { // We update the stats before using/reading them. stats.updateStats(); } for (FakeUserStats stats : fakeUserStatsList) { if (allStatsLogging) { allBldr.append(delim + stats.getStatsJSON(2) + '\n'); delim = ","; } if (summaryStatsLogging || overallStatsLogging) { logger.info( "Adding stats values from the" + " MediaStreamStats to their" + " HammerSummaryStats objects"); audioSummaryStats.add(stats.getMediaStreamStats(MediaType.AUDIO)); videoSummaryStats.add(stats.getMediaStreamStats(MediaType.VIDEO)); } } if (allStatsLogging) { logger.info("Writing all stats to file"); writer.print(" \"users\":\n"); writer.print(" [\n"); writer.print(allBldr.toString()); writer.print(" ]"); if (summaryStatsLogging) writer.print(','); writer.print('\n'); } if (summaryStatsLogging) { logger.info("Writing summary stats to file"); writer.print(" \"summary\":\n"); writer.print(" {\n"); writer.print(" \"max\":\n"); writer.print(" {\n"); writer.print(" \"audio\":"); writer.print(audioSummaryStats.getMaxJSON() + ",\n"); writer.print(" \"video\":"); writer.print(videoSummaryStats.getMaxJSON() + '\n'); writer.print(" },\n"); writer.print(" \"mean\":\n"); writer.print(" {\n"); writer.print(" \"audio\":"); writer.print(audioSummaryStats.getMeanJSON() + ",\n"); writer.print(" \"video\":"); writer.print(videoSummaryStats.getMeanJSON() + '\n'); writer.print(" },\n"); writer.print(" \"min\":\n"); writer.print(" {\n"); writer.print(" \"audio\":"); writer.print(audioSummaryStats.getMinJSON() + ",\n"); writer.print(" \"video\":"); writer.print(videoSummaryStats.getMinJSON() + '\n'); writer.print(" },\n"); writer.print(" \"standard_deviation\":\n"); writer.print(" {\n"); writer.print(" \"audio\":"); writer.print(audioSummaryStats.getStandardDeviationJSON() + ",\n"); writer.print(" \"video\":"); writer.print(videoSummaryStats.getStandardDeviationJSON() + '\n'); writer.print(" }\n"); writer.print(" }\n"); } if (allStatsLogging || summaryStatsLogging) { writer.append("}"); writer.flush(); } } if (summaryStatsLogging || overallStatsLogging) { logger.info( "Clearing the HammerSummaryStats by creating new" + " SummaryStats objects for each watched stats"); audioSummaryStats.clear(); videoSummaryStats.clear(); } } try { Thread.sleep(timeBetweenUpdate * 1000); } catch (InterruptedException e) { logger.fatal("Error during sleep in main loop : " + e); stop(); } } logger.info("Exiting the main loop"); if (writer != null) { writer.print("]\n"); writer.close(); } if (overallStatsLogging) writeOverallStats(); } /** * Provoke the stop of the method run(). The method run() won't be stopped right away : but the * loop will be broken at the next iteration. * * <p>If the method run() is not running, calling this method won't do anything */ public synchronized void stop() { if (!threadStop) { logger.info("Stopping the main loop"); System.out.println("Stopping the HammerStats stop.\n"); threadStop = true; } } /** * Write the overall stats of the <tt>MediaStream</tt> this <tt>MediaStreamStats</tt> keep track * in its file. */ public void writeOverallStats() { try { logger.info("Writing overall stats to file"); PrintWriter writer = new PrintWriter(overallStatsFile, "UTF-8"); writer.print(getOverallStatsJSON() + '\n'); writer.close(); } catch (FileNotFoundException e) { logger.fatal("Overall stats file opening error", e); } catch (UnsupportedEncodingException e) { logger.fatal("Overall stats file opening error", e); } } /** * print the overall stats of the <tt>MediaStream</tt> this <tt>MediaStreamStats</tt> keep track * to the PrintStream given as argument. * * @param ps the <tt>PrintStream</tt> used to print the stats */ public void printOverallStats(PrintStream ps) { ps.println(getOverallStatsJSON()); } /** * Create and return the String that contains the overall stats (in JSON). * * @return the String that contains the overall stats. */ protected String getOverallStatsJSON() { StringBuilder bldr = new StringBuilder(); bldr.append("{\n"); bldr.append(" \"max\":\n"); bldr.append(" {\n"); bldr.append(" \"audio\":"); bldr.append(audioSummaryStats.getAggregateMaxJSON() + ",\n"); bldr.append(" \"video\":"); bldr.append(videoSummaryStats.getAggregateMaxJSON() + '\n'); bldr.append(" },\n"); bldr.append(" \"mean\":\n"); bldr.append(" {\n"); bldr.append(" \"audio\":"); bldr.append(audioSummaryStats.getAggregateMeanJSON() + ",\n"); bldr.append(" \"video\":"); bldr.append(videoSummaryStats.getAggregateMeanJSON() + '\n'); bldr.append(" },\n"); bldr.append(" \"min\":\n"); bldr.append(" {\n"); bldr.append(" \"audio\":"); bldr.append(audioSummaryStats.getAggregateMinJSON() + ",\n"); bldr.append(" \"video\":"); bldr.append(videoSummaryStats.getAggregateMinJSON() + '\n'); bldr.append(" },\n"); bldr.append(" \"standard_deviation\":\n"); bldr.append(" {\n"); bldr.append(" \"audio\":"); bldr.append(audioSummaryStats.getAggregateStandardDeviationJSON() + ",\n"); bldr.append(" \"video\":"); bldr.append(videoSummaryStats.getAggregateStandardDeviationJSON() + '\n'); bldr.append(" },\n"); bldr.append(" \"sum\":\n"); bldr.append(" {\n"); bldr.append(" \"audio\":"); bldr.append(audioSummaryStats.getAggregateSumJSON() + ",\n"); bldr.append(" \"video\":"); bldr.append(videoSummaryStats.getAggregateSumJSON() + '\n'); bldr.append(" }\n"); bldr.append("}\n"); return bldr.toString(); } /** * Set the time this <tt>HammerStats</tt> will wait between 2 updates of stats. * * @param timeval the time of the wait, in seconds */ public void setTimeBetweenUpdate(int timeval) { if (timeval <= 0) timeval = 1; this.timeBetweenUpdate = timeval; } /** Get the time (in seconds) this <tt>HammerStats</tt> will wait between 2 updates of stats. */ public int getTimeBetweenUpdate() { return this.timeBetweenUpdate; } /** * Enable or disable the logging of all the stats collected by this <tt>HammerStats</tt>. * * @param allStats the boolean that enable of disable the logging. */ public void setAllStatsLogging(boolean allStats) { this.allStatsLogging = allStats; if (allStats) { File saveDir = new File(this.statsDirectoryPath); if (!saveDir.exists()) { logger.info("Creating stats directory at : " + this.statsDirectoryPath); saveDir.mkdirs(); } } } /** * Enable or disable the logging of the summary stats computed with all the stats collected by * this <tt>HammerStats</tt>. * * @param summaryStats the boolean that enable of disable the logging. */ public void setSummaryStatsLogging(boolean summaryStats) { this.summaryStatsLogging = summaryStats; if (summaryStats) { File saveDir = new File(this.statsDirectoryPath); if (!saveDir.exists()) { logger.info("Creating stats directory at : " + this.statsDirectoryPath); saveDir.mkdirs(); } } } /** * Enable or disable the logging of all the stats collected by this <tt>HammerStats</tt>. * * @param overallStats the boolean that enable of disable the logging. */ public void setOverallStatsLogging(boolean overallStats) { this.overallStatsLogging = overallStats; if (overallStats) { File saveDir = new File(this.statsDirectoryPath); if (!saveDir.exists()) { logger.info("Creating stats directory at : " + this.statsDirectoryPath); saveDir.mkdirs(); } } } /** * A private class used to keep track and compute the summary stats and the aggregate summary * stats from all the <tt>MediaStreamStats</tt>'s possible stats. * * @author Thomas Kuntz */ private class HammerSummaryStats { /* * All the AggregateSummaryStatistics will be used to create new * SummaryStatistics (for the option "-summarystats") that will * be use to compute summary stats like max/min/std dev... , * and also be used to compute overall stats. * * At each iteration of the loop in run(), all stats are added to their * corresponding SummaryStatistics, that will automatically add them * to their related AggregateSummaryStatistics. * After that, the SummaryStatistics are used to get the summary stats * (in JSON), and are replace by newly created SummaryStatistics by * the AggregateSummaryStatistics (for the next iteration). */ AggregateSummaryStatistics aggregateDownloadJitterMs = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateDownloadPercentLoss = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateDownloadRateKiloBitPerSec = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateJitterBufferDelayMs = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateJitterBufferDelayPackets = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateNbDiscarded = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateNbDiscardedFull = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateNbDiscardedLate = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateNbDiscardedReset = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateNbDiscardedShrink = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateNbFec = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateNbPackets = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateNbPacketsLost = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateNbReceivedBytes = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateNbSentBytes = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregatePacketQueueCountPackets = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregatePacketQueueSize = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregatePercentDiscarded = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateRttMs = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateUploadJitterMs = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateUploadPercentLoss = new AggregateSummaryStatistics(); AggregateSummaryStatistics aggregateUploadRateKiloBitPerSec = new AggregateSummaryStatistics(); SummaryStatistics downloadJitterMs; SummaryStatistics downloadPercentLoss; SummaryStatistics downloadRateKiloBitPerSec; SummaryStatistics jitterBufferDelayMs; SummaryStatistics jitterBufferDelayPackets; SummaryStatistics nbDiscarded; SummaryStatistics nbDiscardedFull; SummaryStatistics nbDiscardedLate; SummaryStatistics nbDiscardedReset; SummaryStatistics nbDiscardedShrink; SummaryStatistics nbFec; SummaryStatistics nbPackets; SummaryStatistics nbPacketsLost; SummaryStatistics nbReceivedBytes; SummaryStatistics nbSentBytes; SummaryStatistics packetQueueCountPackets; SummaryStatistics packetQueueSize; SummaryStatistics percentDiscarded; SummaryStatistics rttMs; SummaryStatistics uploadJitterMs; SummaryStatistics uploadPercentLoss; SummaryStatistics uploadRateKiloBitPerSec; /** Create a new HammerSummaryStats */ public HammerSummaryStats() { clear(); } /** * Add the stats contained by <tt>stats<tt> to their corresponding <tt>SummaryStats</tt> * objects. * * @param stats the stats of a stream that will be added. */ public void add(MediaStreamStats stats) { downloadJitterMs.addValue(stats.getDownloadJitterMs()); downloadPercentLoss.addValue(stats.getDownloadPercentLoss()); downloadRateKiloBitPerSec.addValue(stats.getDownloadRateKiloBitPerSec()); jitterBufferDelayMs.addValue(stats.getJitterBufferDelayMs()); jitterBufferDelayPackets.addValue(stats.getJitterBufferDelayPackets()); nbDiscarded.addValue(stats.getNbDiscarded()); nbDiscardedFull.addValue(stats.getNbDiscardedFull()); nbDiscardedLate.addValue(stats.getNbDiscardedLate()); nbDiscardedReset.addValue(stats.getNbDiscardedReset()); nbDiscardedShrink.addValue(stats.getNbDiscardedShrink()); nbFec.addValue(stats.getNbFec()); nbPackets.addValue(stats.getNbPackets()); nbPacketsLost.addValue(stats.getNbPacketsLost()); nbReceivedBytes.addValue(stats.getNbReceivedBytes()); nbSentBytes.addValue(stats.getNbSentBytes()); packetQueueCountPackets.addValue(stats.getPacketQueueCountPackets()); packetQueueSize.addValue(stats.getPacketQueueSize()); percentDiscarded.addValue(stats.getPercentDiscarded()); rttMs.addValue(stats.getRttMs()); uploadJitterMs.addValue(stats.getUploadJitterMs()); uploadPercentLoss.addValue(stats.getUploadPercentLoss()); uploadRateKiloBitPerSec.addValue(stats.getUploadRateKiloBitPerSec()); } /** * Create new <tt>SummaryStatistics</tt> from the <tt>AggregateSummaryStatistics</tt> for all * the stream's stats that are watched. */ public void clear() { downloadJitterMs = aggregateDownloadJitterMs.createContributingStatistics(); downloadPercentLoss = aggregateDownloadPercentLoss.createContributingStatistics(); downloadRateKiloBitPerSec = aggregateDownloadRateKiloBitPerSec.createContributingStatistics(); jitterBufferDelayMs = aggregateJitterBufferDelayMs.createContributingStatistics(); jitterBufferDelayPackets = aggregateJitterBufferDelayPackets.createContributingStatistics(); nbDiscarded = aggregateNbDiscarded.createContributingStatistics(); nbDiscardedFull = aggregateNbDiscardedFull.createContributingStatistics(); nbDiscardedLate = aggregateNbDiscardedLate.createContributingStatistics(); nbDiscardedReset = aggregateNbDiscardedReset.createContributingStatistics(); nbDiscardedShrink = aggregateNbDiscardedShrink.createContributingStatistics(); nbFec = aggregateNbFec.createContributingStatistics(); nbPackets = aggregateNbPackets.createContributingStatistics(); nbPacketsLost = aggregateNbPacketsLost.createContributingStatistics(); nbReceivedBytes = aggregateNbReceivedBytes.createContributingStatistics(); nbSentBytes = aggregateNbSentBytes.createContributingStatistics(); packetQueueCountPackets = aggregatePacketQueueCountPackets.createContributingStatistics(); packetQueueSize = aggregatePacketQueueSize.createContributingStatistics(); percentDiscarded = aggregatePercentDiscarded.createContributingStatistics(); rttMs = aggregateRttMs.createContributingStatistics(); uploadJitterMs = aggregateUploadJitterMs.createContributingStatistics(); uploadPercentLoss = aggregateUploadPercentLoss.createContributingStatistics(); uploadRateKiloBitPerSec = aggregateUploadRateKiloBitPerSec.createContributingStatistics(); } /** * Get the Max of all the stats that are watched, for all the stats added with add() since the * last call to clear() in JSON. * * @return The Max of all the stats since last clear() in JSON. */ public String getMaxJSON() { String str = String.format( FakeUserStats.jsonMediaStreamStatsTemplate, -1, // ssrc not needed here downloadJitterMs.getMax(), downloadPercentLoss.getMax(), downloadRateKiloBitPerSec.getMax(), jitterBufferDelayMs.getMax(), jitterBufferDelayPackets.getMax(), nbDiscarded.getMax(), nbDiscardedFull.getMax(), nbDiscardedLate.getMax(), nbDiscardedReset.getMax(), nbDiscardedShrink.getMax(), nbFec.getMax(), nbPackets.getMax(), nbPacketsLost.getMax(), nbReceivedBytes.getMax(), nbSentBytes.getMax(), packetQueueCountPackets.getMax(), packetQueueSize.getMax(), percentDiscarded.getMax(), rttMs.getMax(), uploadJitterMs.getMax(), uploadPercentLoss.getMax(), uploadRateKiloBitPerSec.getMax()); return str; } /** * Get the Mean of all the stats that are watched, for all the stats added with add() between * the latest call to clear() and now (in JSON). * * @return The Mean of all the stats since last clear() in JSON. */ public String getMeanJSON() { String str = String.format( FakeUserStats.jsonMediaStreamStatsTemplate, -1, // ssrc not needed here downloadJitterMs.getMean(), downloadPercentLoss.getMean(), downloadRateKiloBitPerSec.getMean(), jitterBufferDelayMs.getMean(), jitterBufferDelayPackets.getMean(), nbDiscarded.getMean(), nbDiscardedFull.getMean(), nbDiscardedLate.getMean(), nbDiscardedReset.getMean(), nbDiscardedShrink.getMean(), nbFec.getMean(), nbPackets.getMean(), nbPacketsLost.getMean(), nbReceivedBytes.getMean(), nbSentBytes.getMean(), packetQueueCountPackets.getMean(), packetQueueSize.getMean(), percentDiscarded.getMean(), rttMs.getMean(), uploadJitterMs.getMean(), uploadPercentLoss.getMean(), uploadRateKiloBitPerSec.getMean()); return str; } /** * Get the Min of all the stats that are watched, for all the stats added with add() since the * last call to clear() in JSON. * * @return The Min of all the stats since last clear() in JSON. */ public String getMinJSON() { String str = String.format( FakeUserStats.jsonMediaStreamStatsTemplate, -1, // ssrc not needed here downloadJitterMs.getMin(), downloadPercentLoss.getMin(), downloadRateKiloBitPerSec.getMin(), jitterBufferDelayMs.getMin(), jitterBufferDelayPackets.getMin(), nbDiscarded.getMin(), nbDiscardedFull.getMin(), nbDiscardedLate.getMin(), nbDiscardedReset.getMin(), nbDiscardedShrink.getMin(), nbFec.getMin(), nbPackets.getMin(), nbPacketsLost.getMin(), nbReceivedBytes.getMin(), nbSentBytes.getMin(), packetQueueCountPackets.getMin(), packetQueueSize.getMin(), percentDiscarded.getMin(), rttMs.getMin(), uploadJitterMs.getMin(), uploadPercentLoss.getMin(), uploadRateKiloBitPerSec.getMin()); return str; } /** * Get the Standard Deviation of all the stats that are watched, for all the stats added with * add() since the last call to clear() in JSON. * * @return The Standard Deviation of all the stats since last clear() in JSON. */ public String getStandardDeviationJSON() { String str = String.format( FakeUserStats.jsonMediaStreamStatsTemplate, -1, // ssrc not needed here downloadJitterMs.getStandardDeviation(), downloadPercentLoss.getStandardDeviation(), downloadRateKiloBitPerSec.getStandardDeviation(), jitterBufferDelayMs.getStandardDeviation(), jitterBufferDelayPackets.getStandardDeviation(), nbDiscarded.getStandardDeviation(), nbDiscardedFull.getStandardDeviation(), nbDiscardedLate.getStandardDeviation(), nbDiscardedReset.getStandardDeviation(), nbDiscardedShrink.getStandardDeviation(), nbFec.getStandardDeviation(), nbPackets.getStandardDeviation(), nbPacketsLost.getStandardDeviation(), nbReceivedBytes.getStandardDeviation(), nbSentBytes.getStandardDeviation(), packetQueueCountPackets.getStandardDeviation(), packetQueueSize.getStandardDeviation(), percentDiscarded.getStandardDeviation(), rttMs.getStandardDeviation(), uploadJitterMs.getStandardDeviation(), uploadPercentLoss.getStandardDeviation(), uploadRateKiloBitPerSec.getStandardDeviation()); return str; } /** * Get the Sum of all the stats that are watched, for all the stats added with add() since the * last call to clear() in JSON. * * @return The Sum of all the stats since last clear() in JSON. */ public String getSumJSON() { String str = String.format( FakeUserStats.jsonMediaStreamStatsTemplate, -1, // ssrc not needed here downloadJitterMs.getSum(), downloadPercentLoss.getSum(), downloadRateKiloBitPerSec.getSum(), jitterBufferDelayMs.getSum(), jitterBufferDelayPackets.getSum(), nbDiscarded.getSum(), nbDiscardedFull.getSum(), nbDiscardedLate.getSum(), nbDiscardedReset.getSum(), nbDiscardedShrink.getSum(), nbFec.getSum(), nbPackets.getSum(), nbPacketsLost.getSum(), nbReceivedBytes.getSum(), nbSentBytes.getSum(), packetQueueCountPackets.getSum(), packetQueueSize.getSum(), percentDiscarded.getSum(), rttMs.getSum(), uploadJitterMs.getSum(), uploadPercentLoss.getSum(), uploadRateKiloBitPerSec.getSum()); return str; } /** * Get the Variance of all the stats that are watched, for all the stats added with add() since * the last call to clear() (in JSON). * * @return The Variance of all the stats since last clear() in JSON. */ public String getVarianceJSON() { String str = String.format( FakeUserStats.jsonMediaStreamStatsTemplate, -1, // ssrc not needed here downloadJitterMs.getVariance(), downloadPercentLoss.getVariance(), downloadRateKiloBitPerSec.getVariance(), jitterBufferDelayMs.getVariance(), jitterBufferDelayPackets.getVariance(), nbDiscarded.getVariance(), nbDiscardedFull.getVariance(), nbDiscardedLate.getVariance(), nbDiscardedReset.getVariance(), nbDiscardedShrink.getVariance(), nbFec.getVariance(), nbPackets.getVariance(), nbPacketsLost.getVariance(), nbReceivedBytes.getVariance(), nbSentBytes.getVariance(), packetQueueCountPackets.getVariance(), packetQueueSize.getVariance(), percentDiscarded.getVariance(), rttMs.getVariance(), uploadJitterMs.getVariance(), uploadPercentLoss.getVariance(), uploadRateKiloBitPerSec.getVariance()); return str; } /** * Get the Max of all the stats that are watched, for all the stats added with add() since the * creation of this <tt>HammerSummaryStats</tt> * * @return The Max of all the stats in JSON. */ public String getAggregateMaxJSON() { String str = String.format( FakeUserStats.jsonMediaStreamStatsTemplate, -1, // ssrc not needed here aggregateDownloadJitterMs.getMax(), aggregateDownloadPercentLoss.getMax(), aggregateDownloadRateKiloBitPerSec.getMax(), aggregateJitterBufferDelayMs.getMax(), aggregateJitterBufferDelayPackets.getMax(), aggregateNbDiscarded.getMax(), aggregateNbDiscardedFull.getMax(), aggregateNbDiscardedLate.getMax(), aggregateNbDiscardedReset.getMax(), aggregateNbDiscardedShrink.getMax(), aggregateNbFec.getMax(), aggregateNbPackets.getMax(), aggregateNbPacketsLost.getMax(), aggregateNbReceivedBytes.getMax(), aggregateNbSentBytes.getMax(), aggregatePacketQueueCountPackets.getMax(), aggregatePacketQueueSize.getMax(), aggregatePercentDiscarded.getMax(), aggregateRttMs.getMax(), aggregateUploadJitterMs.getMax(), aggregateUploadPercentLoss.getMax(), aggregateUploadRateKiloBitPerSec.getMax()); return str; } /** * Get the Mean of all the stats that are watched, for all the stats added with add() since the * creation of this <tt>HammerSummaryStats</tt> * * @return The Mean of all the stats in JSON. */ public String getAggregateMeanJSON() { String str = String.format( FakeUserStats.jsonMediaStreamStatsTemplate, -1, // ssrc not needed here aggregateDownloadJitterMs.getMean(), aggregateDownloadPercentLoss.getMean(), aggregateDownloadRateKiloBitPerSec.getMean(), aggregateJitterBufferDelayMs.getMean(), aggregateJitterBufferDelayPackets.getMean(), aggregateNbDiscarded.getMean(), aggregateNbDiscardedFull.getMean(), aggregateNbDiscardedLate.getMean(), aggregateNbDiscardedReset.getMean(), aggregateNbDiscardedShrink.getMean(), aggregateNbFec.getMean(), aggregateNbPackets.getMean(), aggregateNbPacketsLost.getMean(), aggregateNbReceivedBytes.getMean(), aggregateNbSentBytes.getMean(), aggregatePacketQueueCountPackets.getMean(), aggregatePacketQueueSize.getMean(), aggregatePercentDiscarded.getMean(), aggregateRttMs.getMean(), aggregateUploadJitterMs.getMean(), aggregateUploadPercentLoss.getMean(), aggregateUploadRateKiloBitPerSec.getMean()); return str; } /** * Get the Min of all the stats that are watched, for all the stats added with add() since the * creation of this <tt>HammerSummaryStats</tt> * * @return The Min of all the stats in JSON. */ public String getAggregateMinJSON() { String str = String.format( FakeUserStats.jsonMediaStreamStatsTemplate, -1, // ssrc not needed here aggregateDownloadJitterMs.getMin(), aggregateDownloadPercentLoss.getMin(), aggregateDownloadRateKiloBitPerSec.getMin(), aggregateJitterBufferDelayMs.getMin(), aggregateJitterBufferDelayPackets.getMin(), aggregateNbDiscarded.getMin(), aggregateNbDiscardedFull.getMin(), aggregateNbDiscardedLate.getMin(), aggregateNbDiscardedReset.getMin(), aggregateNbDiscardedShrink.getMin(), aggregateNbFec.getMin(), aggregateNbPackets.getMin(), aggregateNbPacketsLost.getMin(), aggregateNbReceivedBytes.getMin(), aggregateNbSentBytes.getMin(), aggregatePacketQueueCountPackets.getMin(), aggregatePacketQueueSize.getMin(), aggregatePercentDiscarded.getMin(), aggregateRttMs.getMin(), aggregateUploadJitterMs.getMin(), aggregateUploadPercentLoss.getMin(), aggregateUploadRateKiloBitPerSec.getMin()); return str; } /** * Get the Standard Deviation of all the stats that are watched, for all the stats added with * add() since the creation of this <tt>HammerSummaryStats</tt> * * @return The Variance of all the stats in JSON. */ public String getAggregateStandardDeviationJSON() { String str = String.format( FakeUserStats.jsonMediaStreamStatsTemplate, -1, // ssrc not needed here aggregateDownloadJitterMs.getStandardDeviation(), aggregateDownloadPercentLoss.getStandardDeviation(), aggregateDownloadRateKiloBitPerSec.getStandardDeviation(), aggregateJitterBufferDelayMs.getStandardDeviation(), aggregateJitterBufferDelayPackets.getStandardDeviation(), aggregateNbDiscarded.getStandardDeviation(), aggregateNbDiscardedFull.getStandardDeviation(), aggregateNbDiscardedLate.getStandardDeviation(), aggregateNbDiscardedReset.getStandardDeviation(), aggregateNbDiscardedShrink.getStandardDeviation(), aggregateNbFec.getStandardDeviation(), aggregateNbPackets.getStandardDeviation(), aggregateNbPacketsLost.getStandardDeviation(), aggregateNbReceivedBytes.getStandardDeviation(), aggregateNbSentBytes.getStandardDeviation(), aggregatePacketQueueCountPackets.getStandardDeviation(), aggregatePacketQueueSize.getStandardDeviation(), aggregatePercentDiscarded.getStandardDeviation(), aggregateRttMs.getStandardDeviation(), aggregateUploadJitterMs.getStandardDeviation(), aggregateUploadPercentLoss.getStandardDeviation(), aggregateUploadRateKiloBitPerSec.getStandardDeviation()); return str; } /** * Get the Sum of all the stats that are watched, for all the stats added with add() since the * creation of this <tt>HammerSummaryStats</tt> * * @return The Sum of all the stats in JSON. */ public String getAggregateSumJSON() { String str = String.format( FakeUserStats.jsonMediaStreamStatsTemplate, -1, // ssrc not needed here aggregateDownloadJitterMs.getSum(), aggregateDownloadPercentLoss.getSum(), aggregateDownloadRateKiloBitPerSec.getSum(), aggregateJitterBufferDelayMs.getSum(), aggregateJitterBufferDelayPackets.getSum(), aggregateNbDiscarded.getSum(), aggregateNbDiscardedFull.getSum(), aggregateNbDiscardedLate.getSum(), aggregateNbDiscardedReset.getSum(), aggregateNbDiscardedShrink.getSum(), aggregateNbFec.getSum(), aggregateNbPackets.getSum(), aggregateNbPacketsLost.getSum(), aggregateNbReceivedBytes.getSum(), aggregateNbSentBytes.getSum(), aggregatePacketQueueCountPackets.getSum(), aggregatePacketQueueSize.getSum(), aggregatePercentDiscarded.getSum(), aggregateRttMs.getSum(), aggregateUploadJitterMs.getSum(), aggregateUploadPercentLoss.getSum(), aggregateUploadRateKiloBitPerSec.getSum()); return str; } /** * Get the Variance of all the stats that are watched, for all the stats added with add() since * the creation of this <tt>HammerSummaryStats</tt> * * @return The Variance of all the stats JSON. */ public String getAggregateVarianceJSON() { String str = String.format( FakeUserStats.jsonMediaStreamStatsTemplate, -1, // ssrc not needed here aggregateDownloadJitterMs.getVariance(), aggregateDownloadPercentLoss.getVariance(), aggregateDownloadRateKiloBitPerSec.getVariance(), aggregateJitterBufferDelayMs.getVariance(), aggregateJitterBufferDelayPackets.getVariance(), aggregateNbDiscarded.getVariance(), aggregateNbDiscardedFull.getVariance(), aggregateNbDiscardedLate.getVariance(), aggregateNbDiscardedReset.getVariance(), aggregateNbDiscardedShrink.getVariance(), aggregateNbFec.getVariance(), aggregateNbPackets.getVariance(), aggregateNbPacketsLost.getVariance(), aggregateNbReceivedBytes.getVariance(), aggregateNbSentBytes.getVariance(), aggregatePacketQueueCountPackets.getVariance(), aggregatePacketQueueSize.getVariance(), aggregatePercentDiscarded.getVariance(), aggregateRttMs.getVariance(), aggregateUploadJitterMs.getVariance(), aggregateUploadPercentLoss.getVariance(), aggregateUploadRateKiloBitPerSec.getVariance()); return str; } } }
/** * Represents a conference in the terms of Jitsi Videobridge. * * @author Lyubomir Marinov * @author Boris Grozev * @author Hristo Terezov * @author George Politis */ public class Conference extends PropertyChangeNotifier implements PropertyChangeListener { /** * The name of the <tt>Conference</tt> property <tt>endpoints</tt> which lists the * <tt>Endpoint</tt>s participating in/contributing to the <tt>Conference</tt>. */ public static final String ENDPOINTS_PROPERTY_NAME = Conference.class.getName() + ".endpoints"; /** * The <tt>Logger</tt> used by the <tt>Conference</tt> class and its instances to print debug * information. */ private static final Logger logger = Logger.getLogger(Conference.class); /** The <tt>Content</tt>s of this <tt>Conference</tt>. */ private final List<Content> contents = new LinkedList<>(); /** * An instance used to save information about the endpoints of this <tt>Conference</tt>, when * media recording is enabled. */ private EndpointRecorder endpointRecorder = null; /** The <tt>Endpoint</tt>s participating in this <tt>Conference</tt>. */ private final List<WeakReference<Endpoint>> endpoints = new LinkedList<>(); /** * The indicator which determines whether {@link #expire()} has been called on this * <tt>Conference</tt>. */ private boolean expired = false; /** * The JID of the conference focus who has initialized this instance and from whom requests to * manage this instance must come or they will be ignored. If <tt>null</tt> value is assigned we * don't care who modifies the conference. */ private final String focus; /** The (unique) identifier/ID of this instance. */ private final String id; /** The world readable name of this instance if any. */ private String name; /** * The time in milliseconds of the last activity related to this <tt>Conference</tt>. In the time * interval between the last activity and now, this <tt>Conference</tt> is considered inactive. */ private long lastActivityTime; /** * If {@link #focus} is <tt>null</tt> the value of the last known focus is stored in this member. */ private String lastKnownFocus; /** * The <tt>PropertyChangeListener</tt> which listens to <tt>PropertyChangeEvent</tt>s on behalf of * this instance while referencing it by a <tt>WeakReference</tt>. */ private final PropertyChangeListener propertyChangeListener = new WeakReferencePropertyChangeListener(this); /** * The <tt>RecorderEventHandler</tt> which is used to handle recording events for this * <tt>Conference</tt>. */ private RecorderEventHandlerImpl recorderEventHandler = null; /** Whether media recording is currently enabled for this <tt>Conference</tt>. */ private boolean recording = false; /** * The directory into which files associated with media recordings for this <tt>Conference</tt> * will be stored. */ private String recordingDirectory = null; /** * The path to the directory into which files associated with media recordings for this * <tt>Conference</tt> will be stored. */ private String recordingPath = null; /** The speech activity (representation) of the <tt>Endpoint</tt>s of this <tt>Conference</tt>. */ private final ConferenceSpeechActivity speechActivity; /** * Maps an ID of a channel-bundle to the <tt>TransportManager</tt> instance responsible for its * transport. */ private final Map<String, IceUdpTransportManager> transportManagers = new HashMap<>(); /** The <tt>Videobridge</tt> which has initialized this <tt>Conference</tt>. */ private final Videobridge videobridge; /** * The <tt>WebRtcpDataStreamListener</tt> which listens to the <tt>SctpConnection</tt>s of the * <tt>Endpoint</tt>s participating in this multipoint conference in order to detect when they are * ready (to fire initial events such as the current dominant speaker in this multipoint * conference). */ private final WebRtcDataStreamListener webRtcDataStreamListener = new WebRtcDataStreamAdapter() { /** {@inheritDoc} */ @Override public void onSctpConnectionReady(SctpConnection source) { Conference.this.sctpConnectionReady(source); } }; /** * Initializes a new <tt>Conference</tt> instance which is to represent a conference in the terms * of Jitsi Videobridge which has a specific (unique) ID and is managed by a conference focus with * a specific JID. * * @param videobridge the <tt>Videobridge</tt> on which the new <tt>Conference</tt> instance is to * be initialized * @param id the (unique) ID of the new instance to be initialized * @param focus the JID of the conference focus who has requested the initialization of the new * instance and from whom further/future requests to manage the new instance must come or they * will be ignored. Pass <tt>null</tt> to override this safety check. */ public Conference(Videobridge videobridge, String id, String focus) { if (videobridge == null) throw new NullPointerException("videobridge"); if (id == null) throw new NullPointerException("id"); this.videobridge = videobridge; this.id = id; this.focus = focus; this.lastKnownFocus = focus; speechActivity = new ConferenceSpeechActivity(this); speechActivity.addPropertyChangeListener(propertyChangeListener); EventAdmin eventAdmin = videobridge.getEventAdmin(); if (eventAdmin != null) eventAdmin.sendEvent(EventFactory.conferenceCreated(this)); } /** * Used to send a message to a subset of endpoints in the call, primary use case being a message * that has originated from an endpoint (as opposed to a message originating from the bridge and * being sent to all endpoints in the call, for that see broadcastMessageOnDataChannels below * * @param msg * @param endpoints */ public void sendMessageOnDataChannels(String msg, List<Endpoint> endpoints) { for (Endpoint endpoint : endpoints) { try { endpoint.sendMessageOnDataChannel(msg); } catch (IOException e) { logger.error("Failed to send message on data channel.", e); } } } /** * Broadcasts string message to all participants over default data channel. * * @param msg the message to be advertised across conference peers. */ private void broadcastMessageOnDataChannels(String msg) { sendMessageOnDataChannels(msg, getEndpoints()); } /** * Checks whether <tt>path</tt> is a valid directory for recording (creates it if necessary). * * @param path the path to the directory to check. * @return <tt>true</tt> if the directory <tt>path</tt> can be used for media recording, * <tt>false</tt> otherwise. */ private boolean checkRecordingDirectory(String path) { if (path == null || "".equals(path)) return false; File dir = new File(path); if (!dir.exists()) { dir.mkdir(); if (!dir.exists()) return false; } if (!dir.isDirectory() || !dir.canWrite()) return false; return true; } /** * Closes given {@link #transportManagers} of this <tt>Conference</tt> and removes corresponding * channel bundle. */ void closeTransportManager(TransportManager transportManager) { synchronized (transportManagers) { for (Iterator<IceUdpTransportManager> i = transportManagers.values().iterator(); i.hasNext(); ) { if (i.next() == transportManager) { i.remove(); // Presumably, we have a single association for // transportManager. break; } } // Close manager try { transportManager.close(); } catch (Throwable t) { logger.warn( "Failed to close an IceUdpTransportManager of" + " conference " + getID() + "!", t); // The whole point of explicitly closing the // transportManagers of this Conference is to prevent memory // leaks. Hence, it does not make sense to possibly leave // TransportManagers open because a TransportManager has // failed to close. if (t instanceof InterruptedException) Thread.currentThread().interrupt(); else if (t instanceof ThreadDeath) throw (ThreadDeath) t; } } } /** Closes the {@link #transportManagers} of this <tt>Conference</tt>. */ private void closeTransportManagers() { synchronized (transportManagers) { for (Iterator<IceUdpTransportManager> i = transportManagers.values().iterator(); i.hasNext(); ) { IceUdpTransportManager transportManager = i.next(); i.remove(); closeTransportManager(transportManager); } } } /** * Initializes a new <tt>String</tt> to be sent over an <tt>SctpConnection</tt> in order to notify * an <tt>Endpoint</tt> that the dominant speaker in this multipoint conference has changed to a * specific <tt>Endpoint</tt>. * * @param dominantSpeaker the dominant speaker in this multipoint conference * @return a new <tt>String</tt> to be sent over an <tt>SctpConnection</tt> in order to notify an * <tt>Endpoint</tt> that the dominant speaker in this multipoint conference has changed to * <tt>dominantSpeaker</tt> */ private String createDominantSpeakerEndpointChangeEvent(Endpoint dominantSpeaker) { return "{\"colibriClass\":\"DominantSpeakerEndpointChangeEvent\"," + "\"dominantSpeakerEndpoint\":\"" + JSONValue.escape(dominantSpeaker.getID()) + "\"}"; } /** * Adds the channel-bundles of this <tt>Conference</tt> as * <tt>ColibriConferenceIQ.ChannelBundle</tt> instances in <tt>iq</tt>. * * @param iq the <tt>ColibriConferenceIQ</tt> in which to describe. */ void describeChannelBundles(ColibriConferenceIQ iq) { synchronized (transportManagers) { for (Map.Entry<String, IceUdpTransportManager> entry : transportManagers.entrySet()) { ColibriConferenceIQ.ChannelBundle responseBundleIQ = new ColibriConferenceIQ.ChannelBundle(entry.getKey()); entry.getValue().describe(responseBundleIQ); iq.addChannelBundle(responseBundleIQ); } } } /** * Sets the values of the properties of a specific <tt>ColibriConferenceIQ</tt> to the values of * the respective properties of this instance. Thus, the specified <tt>iq</tt> may be thought of * as a description of this instance. * * <p><b>Note</b>: The copying of the values is deep i.e. the <tt>Contents</tt>s of this instance * are described in the specified <tt>iq</tt>. * * @param iq the <tt>ColibriConferenceIQ</tt> to set the values of the properties of this instance * on */ public void describeDeep(ColibriConferenceIQ iq) { describeShallow(iq); if (isRecording()) { ColibriConferenceIQ.Recording recordingIQ = new ColibriConferenceIQ.Recording(State.ON.toString()); recordingIQ.setDirectory(getRecordingDirectory()); iq.setRecording(recordingIQ); } for (Content content : getContents()) { ColibriConferenceIQ.Content contentIQ = iq.getOrCreateContent(content.getName()); for (Channel channel : content.getChannels()) { if (channel instanceof SctpConnection) { ColibriConferenceIQ.SctpConnection sctpConnectionIQ = new ColibriConferenceIQ.SctpConnection(); channel.describe(sctpConnectionIQ); contentIQ.addSctpConnection(sctpConnectionIQ); } else { ColibriConferenceIQ.Channel channelIQ = new ColibriConferenceIQ.Channel(); channel.describe(channelIQ); contentIQ.addChannel(channelIQ); } } } } /** * Sets the values of the properties of a specific <tt>ColibriConferenceIQ</tt> to the values of * the respective properties of this instance. Thus, the specified <tt>iq</tt> may be thought of * as a description of this instance. * * <p><b>Note</b>: The copying of the values is shallow i.e. the <tt>Content</tt>s of this * instance are not described in the specified <tt>iq</tt>. * * @param iq the <tt>ColibriConferenceIQ</tt> to set the values of the properties of this instance * on */ public void describeShallow(ColibriConferenceIQ iq) { iq.setID(getID()); iq.setName(getName()); } /** * Notifies this instance that {@link #speechActivity} has identified a speaker switch event in * this multipoint conference and there is now a new dominant speaker. */ private void dominantSpeakerChanged() { Endpoint dominantSpeaker = speechActivity.getDominantEndpoint(); if (logger.isTraceEnabled()) { logger.trace( "The dominant speaker in conference " + getID() + " is now the endpoint " + ((dominantSpeaker == null) ? "(null)" : dominantSpeaker.getID()) + "."); } if (dominantSpeaker != null) { broadcastMessageOnDataChannels(createDominantSpeakerEndpointChangeEvent(dominantSpeaker)); if (isRecording() && (recorderEventHandler != null)) recorderEventHandler.dominantSpeakerChanged(dominantSpeaker); } } /** * Notifies this instance that there was a change in the value of a property of an * <tt>Endpoint</tt> participating in this multipoint conference. * * @param endpoint the <tt>Endpoint</tt> which is the source of the event/notification and is * participating in this multipoint conference * @param ev a <tt>PropertyChangeEvent</tt> which specifies the source of the event/notification, * the name of the property and the old and new values of that property */ private void endpointPropertyChange(Endpoint endpoint, PropertyChangeEvent ev) { String propertyName = ev.getPropertyName(); boolean maybeRemoveEndpoint; if (Endpoint.SCTP_CONNECTION_PROPERTY_NAME.equals(propertyName)) { // The SctpConnection of/associated with an Endpoint has changed. We // may want to fire initial events over that SctpConnection (as soon // as it is ready). SctpConnection oldValue = (SctpConnection) ev.getOldValue(); SctpConnection newValue = (SctpConnection) ev.getNewValue(); endpointSctpConnectionChanged(endpoint, oldValue, newValue); // The SctpConnection may have expired. maybeRemoveEndpoint = (newValue == null); } else if (Endpoint.CHANNELS_PROPERTY_NAME.equals(propertyName)) { // An RtpChannel may have expired. maybeRemoveEndpoint = true; } else { maybeRemoveEndpoint = false; } if (maybeRemoveEndpoint) { // It looks like there is a chance that the Endpoint may have // expired. Endpoints are held by this Conference via WeakReferences // but WeakReferences are unpredictable. We have functionality // though which could benefit from discovering that an Endpoint has // expired as quickly as possible (e.g. ConferenceSpeechActivity). // Consequently, try to expedite the removal of expired Endpoints. if (endpoint.getSctpConnection() == null && endpoint.getChannelCount(null) == 0) { removeEndpoint(endpoint); } } } /** * Notifies this instance that the <tt>SctpConnection</tt> of/associated with a specific * <tt>Endpoint</tt> participating in this <tt>Conference</tt> has changed. * * @param endpoint the <tt>Endpoint</tt> participating in this <tt>Conference</tt> which has had * its (associated) <tt>SctpConnection</tt> changed */ private void endpointSctpConnectionChanged( Endpoint endpoint, SctpConnection oldValue, SctpConnection newValue) { // We want to fire initial events (e.g. dominant speaker) over the // SctpConnection as soon as it is ready. if (oldValue != null) { oldValue.removeChannelListener(webRtcDataStreamListener); } if (newValue != null) { newValue.addChannelListener(webRtcDataStreamListener); // The SctpConnection may itself be ready already. If this is the // case, then it has now become ready for this Conference. if (newValue.isReady()) sctpConnectionReady(newValue); } } /** * Expires this <tt>Conference</tt>, its <tt>Content</tt>s and their respective <tt>Channel</tt>s. * Releases the resources acquired by this instance throughout its life time and prepares it to be * garbage collected. */ public void expire() { synchronized (this) { if (expired) return; else expired = true; } EventAdmin eventAdmin = videobridge.getEventAdmin(); if (eventAdmin != null) eventAdmin.sendEvent(EventFactory.conferenceExpired(this)); setRecording(false); if (recorderEventHandler != null) { recorderEventHandler.close(); recorderEventHandler = null; } Videobridge videobridge = getVideobridge(); try { videobridge.expireConference(this); } finally { // Expire the Contents of this Conference. for (Content content : getContents()) { try { content.expire(); } catch (Throwable t) { logger.warn( "Failed to expire content " + content.getName() + " of conference " + getID() + "!", t); if (t instanceof InterruptedException) Thread.currentThread().interrupt(); else if (t instanceof ThreadDeath) throw (ThreadDeath) t; } } // Close the transportManagers of this Conference. Normally, there // will be no TransportManager left to close at this point because // all Channels have expired and the last Channel to be removed from // a TransportManager closes the TransportManager. However, a // Channel may have expired before it has learned of its // TransportManager and then the TransportManager will not close. closeTransportManagers(); if (logger.isInfoEnabled()) { logger.info( "Expired conference " + getID() + ". " + videobridge.getConferenceCountString()); } } } /** * Expires a specific <tt>Content</tt> of this <tt>Conference</tt> (i.e. if the specified * <tt>content</tt> is not in the list of <tt>Content</tt>s of this <tt>Conference</tt>, does * nothing). * * @param content the <tt>Content</tt> to be expired by this <tt>Conference</tt> */ public void expireContent(Content content) { boolean expireContent; synchronized (contents) { if (contents.contains(content)) { contents.remove(content); expireContent = true; } else expireContent = false; } if (expireContent) content.expire(); } /** * Finds a <tt>Channel</tt> of this <tt>Conference</tt> which receives a specific SSRC and is with * a specific <tt>MediaType</tt>. * * @param receiveSSRC the SSRC of a received RTP stream whose receiving <tt>Channel</tt> in this * <tt>Conference</tt> is to be found * @param mediaType the <tt>MediaType</tt> of the <tt>Channel</tt> to be found * @return the <tt>Channel</tt> in this <tt>Conference</tt> which receives the specified * <tt>ssrc</tt> and is with the specified <tt>mediaType</tt>; otherwise, <tt>null</tt> */ public Channel findChannelByReceiveSSRC(long receiveSSRC, MediaType mediaType) { for (Content content : getContents()) { if (mediaType.equals(content.getMediaType())) { Channel channel = content.findChannelByReceiveSSRC(receiveSSRC); if (channel != null) return channel; } } return null; } /** * Finds an <tt>Endpoint</tt> of this <tt>Conference</tt> which sends an RTP stream with a * specific SSRC and with a specific <tt>MediaType</tt>. * * @param receiveSSRC the SSRC of an RTP stream received by this <tt>Conference</tt> whose sending * <tt>Endpoint</tt> is to be found * @param mediaType the <tt>MediaType</tt> of the RTP stream identified by the specified * <tt>ssrc</tt> * @return <tt>Endpoint</tt> of this <tt>Conference</tt> which sends an RTP stream with the * specified <tt>ssrc</tt> and with the specified <tt>mediaType</tt>; otherwise, <tt>null</tt> */ Endpoint findEndpointByReceiveSSRC(long receiveSSRC, MediaType mediaType) { Channel channel = findChannelByReceiveSSRC(receiveSSRC, mediaType); return (channel == null) ? null : channel.getEndpoint(); } /** * Returns the OSGi <tt>BundleContext</tt> in which this Conference is executing. * * @return the OSGi <tt>BundleContext</tt> in which the Conference is executing. */ public BundleContext getBundleContext() { return getVideobridge().getBundleContext(); } /** * Gets the <tt>Content</tt>s of this <tt>Conference</tt>. * * @return the <tt>Content</tt>s of this <tt>Conference</tt> */ public Content[] getContents() { synchronized (contents) { return contents.toArray(new Content[contents.size()]); } } /** * Gets an <tt>Endpoint</tt> participating in this <tt>Conference</tt> which has a specific * identifier/ID. * * @param id the identifier/ID of the <tt>Endpoint</tt> which is to be returned * @return an <tt>Endpoint</tt> participating in this <tt>Conference</tt> which has the specified * <tt>id</tt> or <tt>null</tt> */ public Endpoint getEndpoint(String id) { return getEndpoint(id, /* create */ false); } /** * Gets an <tt>Endpoint</tt> participating in this <tt>Conference</tt> which has a specific * identifier/ID. If an <tt>Endpoint</tt> participating in this <tt>Conference</tt> with the * specified <tt>id</tt> does not exist at the time the method is invoked, the method optionally * initializes a new <tt>Endpoint</tt> instance with the specified <tt>id</tt> and adds it to the * list of <tt>Endpoint</tt>s participating in this <tt>Conference</tt>. * * @param id the identifier/ID of the <tt>Endpoint</tt> which is to be returned * @return an <tt>Endpoint</tt> participating in this <tt>Conference</tt> which has the specified * <tt>id</tt> or <tt>null</tt> if there is no such <tt>Endpoint</tt> and <tt>create</tt> * equals <tt>false</tt> */ private Endpoint getEndpoint(String id, boolean create) { Endpoint endpoint = null; boolean changed = false; synchronized (endpoints) { for (Iterator<WeakReference<Endpoint>> i = endpoints.iterator(); i.hasNext(); ) { Endpoint e = i.next().get(); if (e == null) { i.remove(); changed = true; } else if (e.getID().equals(id)) { endpoint = e; } } if (create && endpoint == null) { endpoint = new Endpoint(id, this); // The propertyChangeListener will weakly reference this // Conference and will unregister itself from the endpoint // sooner or later. endpoint.addPropertyChangeListener(propertyChangeListener); endpoints.add(new WeakReference<>(endpoint)); changed = true; EventAdmin eventAdmin = videobridge.getEventAdmin(); if (eventAdmin != null) eventAdmin.sendEvent(EventFactory.endpointCreated(endpoint)); } } if (changed) firePropertyChange(ENDPOINTS_PROPERTY_NAME, null, null); return endpoint; } /** * Returns the number of <tt>Endpoint</tt>s in this <tt>Conference</tt>. * * @return the number of <tt>Endpoint</tt>s in this <tt>Conference</tt>. */ public int getEndpointCount() { return getEndpoints().size(); } /** * Returns the <tt>EndpointRecorder</tt> instance used to save the endpoints information for this * <tt>Conference</tt>. Creates an instance if none exists. * * @return the <tt>EndpointRecorder</tt> instance used to save the endpoints information for this * <tt>Conference</tt>. */ private EndpointRecorder getEndpointRecorder() { if (endpointRecorder == null) { try { endpointRecorder = new EndpointRecorder(getRecordingPath() + "/endpoints.json"); } catch (IOException ioe) { logger.warn("Could not create EndpointRecorder. " + ioe); } } return endpointRecorder; } /** * Gets the <tt>Endpoint</tt>s participating in/contributing to this <tt>Conference</tt>. * * @return the <tt>Endpoint</tt>s participating in/contributing to this <tt>Conference</tt> */ public List<Endpoint> getEndpoints() { List<Endpoint> endpoints; boolean changed = false; synchronized (this.endpoints) { endpoints = new ArrayList<>(this.endpoints.size()); for (Iterator<WeakReference<Endpoint>> i = this.endpoints.iterator(); i.hasNext(); ) { Endpoint endpoint = i.next().get(); if (endpoint == null) { i.remove(); changed = true; } else { endpoints.add(endpoint); } } } if (changed) firePropertyChange(ENDPOINTS_PROPERTY_NAME, null, null); return endpoints; } /** * Gets the JID of the conference focus who has initialized this instance and from whom requests * to manage this instance must come or they will be ignored. * * @return the JID of the conference focus who has initialized this instance and from whom * requests to manage this instance must come or they will be ignored */ public final String getFocus() { return focus; } /** * Gets the (unique) identifier/ID of this instance. * * @return the (unique) identifier/ID of this instance */ public final String getID() { return id; } /** * Gets the time in milliseconds of the last activity related to this <tt>Conference</tt>. * * @return the time in milliseconds of the last activity related to this <tt>Conference</tt> */ public long getLastActivityTime() { synchronized (this) { return lastActivityTime; } } /** * Returns the JID of the last known focus. * * @return the JID of the last known focus. */ public String getLastKnowFocus() { return lastKnownFocus; } /** * Returns a <tt>MediaService</tt> implementation (if any). * * @return a <tt>MediaService</tt> implementation (if any) */ MediaService getMediaService() { MediaService mediaService = ServiceUtils.getService(getBundleContext(), MediaService.class); // TODO For an unknown reason, ServiceUtils2.getService fails to // retrieve the MediaService implementation. In the form of a temporary // workaround, get it through LibJitsi. if (mediaService == null) mediaService = LibJitsi.getMediaService(); return mediaService; } /** * Gets a <tt>Content</tt> of this <tt>Conference</tt> which has a specific name. If a * <tt>Content</tt> of this <tt>Conference</tt> with the specified <tt>name</tt> does not exist at * the time the method is invoked, the method initializes a new <tt>Content</tt> instance with the * specified <tt>name</tt> and adds it to the list of <tt>Content</tt>s of this * <tt>Conference</tt>. * * @param name the name of the <tt>Content</tt> which is to be returned * @return a <tt>Content</tt> of this <tt>Conference</tt> which has the specified <tt>name</tt> */ public Content getOrCreateContent(String name) { Content content; synchronized (contents) { for (Content aContent : contents) { if (aContent.getName().equals(name)) { aContent.touch(); // It seems the content is still active. return aContent; } } content = new Content(this, name); if (isRecording()) { content.setRecording(true, getRecordingPath()); } contents.add(content); } if (logger.isInfoEnabled()) { /* * The method Videobridge.getChannelCount() should better be * executed outside synchronized blocks in order to reduce the risks * of causing deadlocks. */ Videobridge videobridge = getVideobridge(); logger.info( "Created content " + name + " of conference " + getID() + ". " + videobridge.getConferenceCountString()); } return content; } /** * Gets an <tt>Endpoint</tt> participating in this <tt>Conference</tt> which has a specific * identifier/ID. If an <tt>Endpoint</tt> participating in this <tt>Conference</tt> with the * specified <tt>id</tt> does not exist at the time the method is invoked, the method initializes * a new <tt>Endpoint</tt> instance with the specified <tt>id</tt> and adds it to the list of * <tt>Endpoint</tt>s participating in this <tt>Conference</tt>. * * @param id the identifier/ID of the <tt>Endpoint</tt> which is to be returned * @return an <tt>Endpoint</tt> participating in this <tt>Conference</tt> which has the specified * <tt>id</tt> */ public Endpoint getOrCreateEndpoint(String id) { return getEndpoint(id, /* create */ true); } RecorderEventHandler getRecorderEventHandler() { if (recorderEventHandler == null) { Throwable t; try { recorderEventHandler = new RecorderEventHandlerImpl( this, getMediaService() .createRecorderEventHandlerJson(getRecordingPath() + "/metadata.json")); t = null; } catch (IOException ioe) { t = ioe; } catch (IllegalArgumentException iae) { t = iae; } if (t != null) logger.warn("Could not create RecorderEventHandler. " + t); } return recorderEventHandler; } /** * Returns the directory where the recording should be stored * * @return the directory of the new recording */ String getRecordingDirectory() { if (this.recordingDirectory == null) { SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd.HH-mm-ss."); this.recordingDirectory = dateFormat.format(new Date()) + getID() + ((name != null) ? "_" + name : ""); } return this.recordingDirectory; } /** * Returns the path to the directory where the media recording related files should be saved, or * <tt>null</tt> if recording is not enabled in the configuration, or a recording path has not * been configured. * * @return the path to the directory where the media recording related files should be saved, or * <tt>null</tt> if recording is not enabled in the configuration, or a recording path has not * been configured. */ String getRecordingPath() { if (recordingPath == null) { ConfigurationService cfg = getVideobridge().getConfigurationService(); if (cfg != null) { boolean recordingIsEnabled = cfg.getBoolean(Videobridge.ENABLE_MEDIA_RECORDING_PNAME, false); if (recordingIsEnabled) { String path = cfg.getString(Videobridge.MEDIA_RECORDING_PATH_PNAME, null); if (path != null) { this.recordingPath = path + "/" + this.getRecordingDirectory(); } } } } return recordingPath; } /** * Gets the speech activity (representation) of the <tt>Endpoint</tt>s of this * <tt>Conference</tt>. * * @return the speech activity (representation) of the <tt>Endpoint</tt>s of this * <tt>Conference</tt> */ public ConferenceSpeechActivity getSpeechActivity() { return speechActivity; } /** * Returns, the <tt>TransportManager</tt> instance for the channel-bundle with ID * <tt>channelBundleId</tt>, or <tt>null</tt> if one doesn't exist. * * @param channelBundleId the ID of the channel-bundle for which to return the * <tt>TransportManager</tt>. * @return the <tt>TransportManager</tt> instance for the channel-bundle with ID * <tt>channelBundleId</tt>, or <tt>null</tt> if one doesn't exist. */ TransportManager getTransportManager(String channelBundleId) { return getTransportManager(channelBundleId, false); } /** * Returns, the <tt>TransportManager</tt> instance for the channel-bundle with ID * <tt>channelBundleId</tt>. If no instance exists and <tt>create</tt> is <tt>true</tt>, one will * be created. * * @param channelBundleId the ID of the channel-bundle for which to return the * <tt>TransportManager</tt>. * @param create whether to create a new instance, if one doesn't exist. * @return the <tt>TransportManager</tt> instance for the channel-bundle with ID * <tt>channelBundleId</tt>. */ IceUdpTransportManager getTransportManager(String channelBundleId, boolean create) { IceUdpTransportManager transportManager; synchronized (transportManagers) { transportManager = transportManagers.get(channelBundleId); if (transportManager == null && create && !isExpired()) { try { // FIXME: the initiator is hard-coded // We assume rtcp-mux when bundle is used, so we make only // one component. transportManager = new IceUdpTransportManager(this, true, 1); } catch (IOException ioe) { throw new UndeclaredThrowableException(ioe); } transportManagers.put(channelBundleId, transportManager); } } return transportManager; } /** * Gets the <tt>Videobridge</tt> which has initialized this <tt>Conference</tt>. * * @return the <tt>Videobridge</tt> which has initialized this <tt>Conference</tt> */ public final Videobridge getVideobridge() { return videobridge; } /** * Gets the indicator which determines whether this <tt>Conference</tt> has expired. * * @return <tt>true</tt> if this <tt>Conference</tt> has expired; otherwise, <tt>false</tt> */ public boolean isExpired() { // Conference starts with expired equal to false and the only assignment // to expired is to set it to true so there is no need to synchronize // the reading of expired. return expired; } /** * Checks whether media recording is currently enabled for this <tt>Conference</tt>. * * @return <tt>true</tt> if media recording is currently enabled for this <tt>Conference</tt>, * false otherwise. */ public boolean isRecording() { boolean recording = this.recording; // if one of the contents is not recording, stop all recording if (recording) { synchronized (contents) { for (Content content : contents) { MediaType mediaType = content.getMediaType(); if (!MediaType.VIDEO.equals(mediaType) && !MediaType.AUDIO.equals(mediaType)) continue; if (!content.isRecording()) recording = false; } } } if (this.recording != recording) setRecording(recording); return this.recording; } /** * Notifies this instance that there was a change in the value of a property of an object in which * this instance is interested. * * @param ev a <tt>PropertyChangeEvent</tt> which specifies the object of interest, the name of * the property and the old and new values of that property */ @Override public void propertyChange(PropertyChangeEvent ev) { Object source = ev.getSource(); if (isExpired()) { // An expired Conference is to be treated like a null Conference // i.e. it does not handle any PropertyChangeEvents. If possible, // make sure that no further PropertyChangeEvents will be delivered // to this Conference. if (source instanceof PropertyChangeNotifier) { ((PropertyChangeNotifier) source).removePropertyChangeListener(propertyChangeListener); } } else if (source == speechActivity) { speechActivityPropertyChange(ev); } else if (source instanceof Endpoint) { // We care about PropertyChangeEvents from Endpoint but only if the // Endpoint in question is still participating in this Conference. Endpoint endpoint = getEndpoint(((Endpoint) source).getID()); if (endpoint != null) endpointPropertyChange(endpoint, ev); } } /** * Removes a specific <tt>Endpoint</tt> instance from this list of <tt>Endpoint</tt>s * participating in this multipoint conference. * * @param endpoint the <tt>Endpoint</tt> to remove * @return <tt>true</tt> if the list of <tt>Endpoint</tt>s participating in this multipoint * conference changed as a result of the execution of the method; otherwise, <tt>false</tt> */ private boolean removeEndpoint(Endpoint endpoint) { boolean removed = false; synchronized (endpoints) { for (Iterator<WeakReference<Endpoint>> i = endpoints.iterator(); i.hasNext(); ) { Endpoint e = i.next().get(); if (e == null || e == endpoint) { i.remove(); removed = true; } } if (endpoint != null) { endpoint.expire(); } } if (removed) firePropertyChange(ENDPOINTS_PROPERTY_NAME, null, null); return removed; } /** * Notifies this instance that a specific <tt>SctpConnection</tt> has become ready i.e. connected * to a/the remote peer and operational. * * @param sctpConnection the <tt>SctpConnection</tt> which has become ready and is the cause of * the method invocation */ private void sctpConnectionReady(SctpConnection sctpConnection) { /* * We want to fire initial events over the SctpConnection as soon as it * is ready, we do not want to fire them multiple times i.e. every time * the SctpConnection becomes ready. */ sctpConnection.removeChannelListener(webRtcDataStreamListener); if (!isExpired() && !sctpConnection.isExpired() && sctpConnection.isReady()) { Endpoint endpoint = sctpConnection.getEndpoint(); if (endpoint != null) endpoint = getEndpoint(endpoint.getID()); if (endpoint != null) { /* * It appears that this Conference, the SctpConnection and the * Endpoint are in states which allow them to fire the initial * events. */ Endpoint dominantSpeaker = speechActivity.getDominantEndpoint(); if (dominantSpeaker != null) { try { endpoint.sendMessageOnDataChannel( createDominantSpeakerEndpointChangeEvent(dominantSpeaker)); } catch (IOException e) { logger.error("Failed to send message on data channel.", e); } } /* * Determining the instant at which an SctpConnection associated * with an Endpoint becomes ready (i.e. connected to the remote * peer and operational) is a multi-step ordeal. The Conference * class implements the procedure so do not make other classes * implement it as well. */ endpoint.sctpConnectionReady(sctpConnection); } } } /** * Sets the JID of the last known focus. * * @param jid the JID of the last known focus. */ public void setLastKnownFocus(String jid) { lastKnownFocus = jid; } /** * Attempts to enable or disable media recording for this <tt>Conference</tt>. * * @param recording whether to enable or disable recording. * @return the state of the media recording for this <tt>Conference</tt> after the attempt to * enable (or disable). */ public boolean setRecording(boolean recording) { if (recording != this.recording) { if (recording) { // try enable recording if (logger.isDebugEnabled()) { logger.debug("Starting recording for conference with id=" + getID()); } String path = getRecordingPath(); boolean failedToStart = !checkRecordingDirectory(path); if (!failedToStart) { RecorderEventHandler handler = getRecorderEventHandler(); if (handler == null) failedToStart = true; } if (!failedToStart) { EndpointRecorder endpointRecorder = getEndpointRecorder(); if (endpointRecorder == null) { failedToStart = true; } else { for (Endpoint endpoint : getEndpoints()) endpointRecorder.updateEndpoint(endpoint); } } /* * The Recorders of the Contents need to share a single * Synchronizer, we take it from the first Recorder. */ boolean first = true; Synchronizer synchronizer = null; for (Content content : contents) { MediaType mediaType = content.getMediaType(); if (!MediaType.VIDEO.equals(mediaType) && !MediaType.AUDIO.equals(mediaType)) { continue; } if (!failedToStart) failedToStart = !content.setRecording(true, path); if (failedToStart) break; if (first) { first = false; synchronizer = content.getRecorder().getSynchronizer(); } else { Recorder recorder = content.getRecorder(); if (recorder != null) recorder.setSynchronizer(synchronizer); } content.feedKnownSsrcsToSynchronizer(); } if (failedToStart) { recording = false; logger.warn("Failed to start media recording for conference " + getID()); } } // either we were asked to disable recording, or we failed to // enable it if (!recording) { if (logger.isDebugEnabled()) { logger.debug("Stopping recording for conference with id=" + getID()); } for (Content content : contents) { MediaType mediaType = content.getMediaType(); if (MediaType.AUDIO.equals(mediaType) || MediaType.VIDEO.equals(mediaType)) { content.setRecording(false, null); } } if (recorderEventHandler != null) recorderEventHandler.close(); recorderEventHandler = null; recordingPath = null; recordingDirectory = null; if (endpointRecorder != null) endpointRecorder.close(); endpointRecorder = null; } this.recording = recording; } return this.recording; } /** * Notifies this <tt>Conference</tt> that the ordered list of <tt>Endpoint</tt>s of {@link * #speechActivity} i.e. the dominant speaker history has changed. * * <p>This instance notifies the video <tt>Channel</tt>s about the change so that they may update * their last-n lists and report to this instance which <tt>Endpoint</tt>s are to be asked for * video keyframes. */ private void speechActivityEndpointsChanged() { List<Endpoint> endpoints = null; for (Content content : getContents()) { if (MediaType.VIDEO.equals(content.getMediaType())) { Set<Endpoint> endpointsToAskForKeyframes = null; endpoints = speechActivity.getEndpoints(); for (Channel channel : content.getChannels()) { if (!(channel instanceof RtpChannel)) continue; RtpChannel rtpChannel = (RtpChannel) channel; List<Endpoint> channelEndpointsToAskForKeyframes = rtpChannel.speechActivityEndpointsChanged(endpoints); if ((channelEndpointsToAskForKeyframes != null) && !channelEndpointsToAskForKeyframes.isEmpty()) { if (endpointsToAskForKeyframes == null) { endpointsToAskForKeyframes = new HashSet<>(); } endpointsToAskForKeyframes.addAll(channelEndpointsToAskForKeyframes); } } if ((endpointsToAskForKeyframes != null) && !endpointsToAskForKeyframes.isEmpty()) { content.askForKeyframes(endpointsToAskForKeyframes); } } } } /** * Notifies this instance that there was a change in the value of a property of {@link * #speechActivity}. * * @param ev a <tt>PropertyChangeEvent</tt> which specifies the source of the event/notification, * the name of the property and the old and new values of that property */ private void speechActivityPropertyChange(PropertyChangeEvent ev) { String propertyName = ev.getPropertyName(); if (ConferenceSpeechActivity.DOMINANT_ENDPOINT_PROPERTY_NAME.equals(propertyName)) { // The dominant speaker in this Conference has changed. We will // likely want to notify the Endpoints participating in this // Conference. dominantSpeakerChanged(); } else if (ConferenceSpeechActivity.ENDPOINTS_PROPERTY_NAME.equals(propertyName)) { speechActivityEndpointsChanged(); } } /** * Sets the time in milliseconds of the last activity related to this <tt>Conference</tt> to the * current system time. */ public void touch() { long now = System.currentTimeMillis(); synchronized (this) { if (getLastActivityTime() < now) lastActivityTime = now; } } /** * Updates an <tt>Endpoint</tt> of this <tt>Conference</tt> with the information contained in * <tt>colibriEndpoint</tt>. The ID of <tt>colibriEndpoint</tt> is used to select the * <tt>Endpoint</tt> to update. * * @param colibriEndpoint a <tt>ColibriConferenceIQ.Endpoint</tt> instance that contains * information to be set on an <tt>Endpoint</tt> instance of this <tt>Conference</tt>. */ void updateEndpoint(ColibriConferenceIQ.Endpoint colibriEndpoint) { String id = colibriEndpoint.getId(); if (id != null) { Endpoint endpoint = getEndpoint(id); if (endpoint != null) { String oldDisplayName = endpoint.getDisplayName(); String newDisplayName = colibriEndpoint.getDisplayName(); if ((oldDisplayName == null && newDisplayName != null) || (oldDisplayName != null && !oldDisplayName.equals(newDisplayName))) { endpoint.setDisplayName(newDisplayName); if (isRecording() && endpointRecorder != null) endpointRecorder.updateEndpoint(endpoint); EventAdmin eventAdmin = getVideobridge().getEventAdmin(); if (eventAdmin != null) { eventAdmin.sendEvent(EventFactory.endpointDisplayNameChanged(endpoint)); } } } } } /** * Sets the conference name. * * @param name the new name. */ public void setName(String name) { this.name = name; } /** * Gets the conference name. * * @return the conference name */ public String getName() { return name; } }
/** * A depacketizer from VP8. See {@link "http://tools.ietf.org/html/draft-ietf-payload-vp8-11"} * * @author Boris Grozev * @author George Politis */ public class DePacketizer extends AbstractCodec2 { /** * The <tt>Logger</tt> used by the <tt>DePacketizer</tt> class and its instances for logging * output. */ private static final Logger logger = Logger.getLogger(DePacketizer.class); /** Whether trace logging is enabled. */ private static final boolean TRACE = logger.isTraceEnabled(); /** * A <tt>Comparator</tt> implementation for RTP sequence numbers. Compares <tt>a</tt> and * <tt>b</tt>, taking into account the wrap at 2^16. * * <p>IMPORTANT: This is a valid <tt>Comparator</tt> implementation only if used for subsets of * [0, 2^16) which don't span more than 2^15 elements. * * <p>E.g. it works for: [0, 2^15-1] and ([50000, 2^16) u [0, 10000]) Doesn't work for: [0, 2^15] * and ([0, 2^15-1] u {2^16-1}) and [0, 2^16) * * <p>NOTE: An identical implementation for Integers can be found in the class SeqNumComparator. * Sequence numbers are 16 bits and unsigned, so an Integer should be sufficient to hold that. */ private static final Comparator<? super Long> seqNumComparator = new Comparator<Long>() { @Override public int compare(Long a, Long b) { if (a.equals(b)) return 0; else if (a > b) { if (a - b < 32768) return 1; else return -1; } else // a < b { if (b - a < 32768) return -1; else return 1; } } }; /** * Stores the RTP payloads (VP8 payload descriptor stripped) from RTP packets belonging to a * single VP8 compressed frame. */ private SortedMap<Long, Container> data = new TreeMap<Long, Container>(seqNumComparator); /** Stores unused <tt>Container</tt>'s. */ private Queue<Container> free = new ArrayBlockingQueue<Container>(100); /** * Stores the first (earliest) sequence number stored in <tt>data</tt>, or -1 if <tt>data</tt> is * empty. */ private long firstSeq = -1; /** * Stores the last (latest) sequence number stored in <tt>data</tt>, or -1 if <tt>data</tt> is * empty. */ private long lastSeq = -1; /** * Stores the value of the <tt>PictureID</tt> field for the VP8 compressed frame, parts of which * are currently stored in <tt>data</tt>, or -1 if the <tt>PictureID</tt> field is not in use or * <tt>data</tt> is empty. */ private int pictureId = -1; /** * Stores the RTP timestamp of the packets stored in <tt>data</tt>, or -1 if they don't have a * timestamp set. */ private long timestamp = -1; /** Whether we have stored any packets in <tt>data</tt>. Equivalent to <tt>data.isEmpty()</tt>. */ private boolean empty = true; /** * Whether we have stored in <tt>data</tt> the last RTP packet of the VP8 compressed frame, parts * of which are currently stored in <tt>data</tt>. */ private boolean haveEnd = false; /** * Whether we have stored in <tt>data</tt> the first RTP packet of the VP8 compressed frame, parts * of which are currently stored in <tt>data</tt>. */ private boolean haveStart = false; /** * Stores the sum of the lengths of the data stored in <tt>data</tt>, that is the total length of * the VP8 compressed frame to be constructed. */ private int frameLength = 0; /** The sequence number of the last RTP packet, which was included in the output. */ private long lastSentSeq = -1; /** Initializes a new <tt>JNIEncoder</tt> instance. */ public DePacketizer() { super( "VP8 RTP DePacketizer", VideoFormat.class, new VideoFormat[] {new VideoFormat(Constants.VP8)}); inputFormats = new VideoFormat[] {new VideoFormat(Constants.VP8_RTP)}; } /** {@inheritDoc} */ @Override protected void doClose() {} /** {@inheritDoc} */ @Override protected void doOpen() throws ResourceUnavailableException { if (logger.isInfoEnabled()) logger.info("Opened VP8 depacketizer"); } /** * Re-initializes the fields which store information about the currently held data. Empties * <tt>data</tt>. */ private void reinit() { firstSeq = lastSeq = timestamp = -1; pictureId = -1; empty = true; haveEnd = haveStart = false; frameLength = 0; Iterator<Map.Entry<Long, Container>> it = data.entrySet().iterator(); Map.Entry<Long, Container> e; while (it.hasNext()) { e = it.next(); free.offer(e.getValue()); it.remove(); } } /** * Checks whether the currently held VP8 compressed frame is complete (e.g all its packets are * stored in <tt>data</tt>). * * @return <tt>true</tt> if the currently help VP8 compressed frame is complete, <tt>false</tt> * otherwise. */ private boolean frameComplete() { return haveStart && haveEnd && !haveMissing(); } /** * Checks whether there are packets with sequence numbers between <tt>firstSeq</tt> and * <tt>lastSeq</tt> which are *not* stored in <tt>data</tt>. * * @return <tt>true</tt> if there are packets with sequence numbers between <tt>firstSeq</tt> and * <tt>lastSeq</tt> which are *not* stored in <tt>data</tt>. */ private boolean haveMissing() { Set<Long> seqs = data.keySet(); long s = firstSeq; while (s != lastSeq) { if (!seqs.contains(s)) return true; s = (s + 1) % (1 << 16); } return false; } /** {@inheritDoc} */ @Override protected int doProcess(Buffer inBuffer, Buffer outBuffer) { byte[] inData = (byte[]) inBuffer.getData(); int inOffset = inBuffer.getOffset(); if (!VP8PayloadDescriptor.isValid(inData, inOffset)) { logger.warn("Invalid RTP/VP8 packet discarded."); outBuffer.setDiscard(true); return BUFFER_PROCESSED_FAILED; // XXX: FAILED or OK? } long inSeq = inBuffer.getSequenceNumber(); long inRtpTimestamp = inBuffer.getRtpTimeStamp(); int inPictureId = VP8PayloadDescriptor.getPictureId(inData, inOffset); boolean inMarker = (inBuffer.getFlags() & Buffer.FLAG_RTP_MARKER) != 0; boolean inIsStartOfFrame = VP8PayloadDescriptor.isStartOfFrame(inData, inOffset); int inLength = inBuffer.getLength(); int inPdSize = VP8PayloadDescriptor.getSize(inData, inOffset); int inPayloadLength = inLength - inPdSize; if (empty && lastSentSeq != -1 && seqNumComparator.compare(inSeq, lastSentSeq) != 1) { if (logger.isInfoEnabled()) logger.info("Discarding old packet (while empty) " + inSeq); outBuffer.setDiscard(true); return BUFFER_PROCESSED_OK; } if (!empty) { // if the incoming packet has a different PictureID or timestamp // than those of the current frame, then it belongs to a different // frame. if ((inPictureId != -1 && pictureId != -1 && inPictureId != pictureId) | (timestamp != -1 && inRtpTimestamp != -1 && inRtpTimestamp != timestamp)) { if (seqNumComparator.compare(inSeq, firstSeq) != 1) // inSeq <= firstSeq { // the packet belongs to a previous frame. discard it if (logger.isInfoEnabled()) logger.info("Discarding old packet " + inSeq); outBuffer.setDiscard(true); return BUFFER_PROCESSED_OK; } else // inSeq > firstSeq (and also presumably isSeq > lastSeq) { // the packet belongs to a subsequent frame (to the one // currently being held). Drop the current frame. if (logger.isInfoEnabled()) logger.info( "Discarding saved packets on arrival of" + " a packet for a subsequent frame: " + inSeq); // TODO: this would be the place to complain about the // not-well-received PictureID by sending a RTCP SLI or NACK. reinit(); } } } // a whole frame in a single packet. avoid the extra copy to // this.data and output it immediately. if (empty && inMarker && inIsStartOfFrame) { byte[] outData = validateByteArraySize(outBuffer, inPayloadLength, false); System.arraycopy(inData, inOffset + inPdSize, outData, 0, inPayloadLength); outBuffer.setOffset(0); outBuffer.setLength(inPayloadLength); outBuffer.setRtpTimeStamp(inBuffer.getRtpTimeStamp()); if (TRACE) logger.trace("Out PictureID=" + inPictureId); lastSentSeq = inSeq; return BUFFER_PROCESSED_OK; } // add to this.data Container container = free.poll(); if (container == null) container = new Container(); if (container.buf == null || container.buf.length < inPayloadLength) container.buf = new byte[inPayloadLength]; if (data.get(inSeq) != null) { if (logger.isInfoEnabled()) logger.info("(Probable) duplicate packet detected, discarding " + inSeq); outBuffer.setDiscard(true); return BUFFER_PROCESSED_OK; } System.arraycopy(inData, inOffset + inPdSize, container.buf, 0, inPayloadLength); container.len = inPayloadLength; data.put(inSeq, container); // update fields frameLength += inPayloadLength; if (firstSeq == -1 || (seqNumComparator.compare(firstSeq, inSeq) == 1)) firstSeq = inSeq; if (lastSeq == -1 || (seqNumComparator.compare(inSeq, lastSeq) == 1)) lastSeq = inSeq; if (empty) { // the first received packet for the current frame was just added empty = false; timestamp = inRtpTimestamp; pictureId = inPictureId; } if (inMarker) haveEnd = true; if (inIsStartOfFrame) haveStart = true; // check if we have a full frame if (frameComplete()) { byte[] outData = validateByteArraySize(outBuffer, frameLength, false); int ptr = 0; Container b; for (Map.Entry<Long, Container> entry : data.entrySet()) { b = entry.getValue(); System.arraycopy(b.buf, 0, outData, ptr, b.len); ptr += b.len; } outBuffer.setOffset(0); outBuffer.setLength(frameLength); outBuffer.setRtpTimeStamp(inBuffer.getRtpTimeStamp()); if (TRACE) logger.trace("Out PictureID=" + inPictureId); lastSentSeq = lastSeq; // prepare for the next frame reinit(); return BUFFER_PROCESSED_OK; } else { // frame not complete yet outBuffer.setDiscard(true); return OUTPUT_BUFFER_NOT_FILLED; } } /** * Returns true if the buffer contains a VP8 key frame at offset <tt>offset</tt>. * * @param buff the byte buffer to check * @param off the offset in the byte buffer where the actual data starts * @param len the length of the data in the byte buffer * @return true if the buffer contains a VP8 key frame at offset <tt>offset</tt>. */ public static boolean isKeyFrame(byte[] buff, int off, int len) { if (buff == null || buff.length < off + len || len < RawPacket.FIXED_HEADER_SIZE) { return false; } // Check if this is the start of a VP8 partition in the payload // descriptor. if (!DePacketizer.VP8PayloadDescriptor.isValid(buff, off)) { return false; } if (!DePacketizer.VP8PayloadDescriptor.isStartOfFrame(buff, off)) { return false; } int szVP8PayloadDescriptor = DePacketizer.VP8PayloadDescriptor.getSize(buff, off); return DePacketizer.VP8PayloadHeader.isKeyFrame(buff, off + szVP8PayloadDescriptor); } /** * A class that represents the VP8 Payload Descriptor structure defined in {@link * "http://tools.ietf.org/html/draft-ietf-payload-vp8-10"} */ public static class VP8PayloadDescriptor { /** I bit from the X byte of the Payload Descriptor. */ private static final byte I_BIT = (byte) 0x80; /** K bit from the X byte of the Payload Descriptor. */ private static final byte K_BIT = (byte) 0x10; /** L bit from the X byte of the Payload Descriptor. */ private static final byte L_BIT = (byte) 0x40; /** I bit from the I byte of the Payload Descriptor. */ private static final byte M_BIT = (byte) 0x80; /** Maximum length of a VP8 Payload Descriptor. */ public static final int MAX_LENGTH = 6; /** S bit from the first byte of the Payload Descriptor. */ private static final byte S_BIT = (byte) 0x10; /** T bit from the X byte of the Payload Descriptor. */ private static final byte T_BIT = (byte) 0x20; /** X bit from the first byte of the Payload Descriptor. */ private static final byte X_BIT = (byte) 0x80; /** * Gets the temporal layer index (TID), if that's set. * * @param buf the byte buffer that holds the VP8 packet. * @param off the offset in the byte buffer where the VP8 packet starts. * @param len the length of the VP8 packet. * @return the temporal layer index (TID), if that's set, -1 otherwise. */ public static int getTemporalLayerIndex(byte[] buf, int off, int len) { if (buf == null || buf.length < off + len || len < 2) { return -1; } if ((buf[off] & X_BIT) == 0 || (buf[off + 1] & T_BIT) == 0) { return -1; } int sz = getSize(buf, off); if (buf.length < off + sz || sz < 1) { return -1; } return (buf[off + sz - 1] & 0xc0) >> 6; } /** * Returns a simple Payload Descriptor, with PartID = 0, the 'start of partition' bit set * according to <tt>startOfPartition</tt>, and all other bits set to 0. * * @param startOfPartition whether to 'start of partition' bit should be set * @return a simple Payload Descriptor, with PartID = 0, the 'start of partition' bit set * according to <tt>startOfPartition</tt>, and all other bits set to 0. */ public static byte[] create(boolean startOfPartition) { byte[] pd = new byte[1]; pd[0] = startOfPartition ? (byte) 0x10 : 0; return pd; } /** * The size in bytes of the Payload Descriptor at offset <tt>offset</tt> in <tt>input</tt>. The * size is between 1 and 6. * * @param input input * @param offset offset * @return The size in bytes of the Payload Descriptor at offset <tt>offset</tt> in * <tt>input</tt>, or -1 if the input is not a valid VP8 Payload Descriptor. The size is * between 1 and 6. */ public static int getSize(byte[] input, int offset) { if (!isValid(input, offset)) return -1; if ((input[offset] & X_BIT) == 0) return 1; int size = 2; if ((input[offset + 1] & I_BIT) != 0) { size++; if ((input[offset + 2] & M_BIT) != 0) size++; } if ((input[offset + 1] & L_BIT) != 0) size++; if ((input[offset + 1] & (T_BIT | K_BIT)) != 0) size++; return size; } /** * Gets the value of the PictureID field of a VP8 Payload Descriptor. * * @param input * @param offset * @return the value of the PictureID field of a VP8 Payload Descriptor, or -1 if the fields is * not present. */ private static int getPictureId(byte[] input, int offset) { if (!isValid(input, offset)) return -1; if ((input[offset] & X_BIT) == 0 || (input[offset + 1] & I_BIT) == 0) return -1; boolean isLong = (input[offset + 2] & M_BIT) != 0; if (isLong) return (input[offset + 2] & 0x7f) << 8 | (input[offset + 3] & 0xff); else return input[offset + 2] & 0x7f; } public static boolean isValid(byte[] input, int offset) { return true; } /** * Checks whether the '<tt>start of partition</tt>' bit is set in the VP8 Payload Descriptor at * offset <tt>offset</tt> in <tt>input</tt>. * * @param input input * @param offset offset * @return <tt>true</tt> if the '<tt>start of partition</tt>' bit is set, <tt>false</tt> * otherwise. */ public static boolean isStartOfPartition(byte[] input, int offset) { return (input[offset] & S_BIT) != 0; } /** * Returns <tt>true</tt> if both the '<tt>start of partition</tt>' bit is set and the * <tt>PID</tt> fields has value 0 in the VP8 Payload Descriptor at offset <tt>offset</tt> in * <tt>input</tt>. * * @param input * @param offset * @return <tt>true</tt> if both the '<tt>start of partition</tt>' bit is set and the * <tt>PID</tt> fields has value 0 in the VP8 Payload Descriptor at offset <tt>offset</tt> * in <tt>input</tt>. */ public static boolean isStartOfFrame(byte[] input, int offset) { return isStartOfPartition(input, offset) && getPartitionId(input, offset) == 0; } /** * Returns the value of the <tt>PID</tt> (partition ID) field of the VP8 Payload Descriptor at * offset <tt>offset</tt> in <tt>input</tt>. * * @param input * @param offset * @return the value of the <tt>PID</tt> (partition ID) field of the VP8 Payload Descriptor at * offset <tt>offset</tt> in <tt>input</tt>. */ public static int getPartitionId(byte[] input, int offset) { return input[offset] & 0x07; } } /** * A class that represents the VP8 Payload Header structure defined in {@link * "http://tools.ietf.org/html/draft-ietf-payload-vp8-10"} */ public static class VP8PayloadHeader { /** S bit of the Payload Descriptor. */ private static final byte S_BIT = (byte) 0x01; /** * Returns true if the <tt>P</tt> (inverse key frame flag) field of the VP8 Payload Header at * offset <tt>offset</tt> in <tt>input</tt> is 0. * * @return true if the <tt>P</tt> (inverse key frame flag) field of the VP8 Payload Header at * offset <tt>offset</tt> in <tt>input</tt> is 0, false otherwise. */ public static boolean isKeyFrame(byte[] input, int offset) { // When set to 0 the current frame is a key frame. When set to 1 // the current frame is an interframe. Defined in [RFC6386] return (input[offset] & S_BIT) == 0; } } /** A simple container for a <tt>byte[]</tt> and an integer. */ private static class Container { /** This <tt>Container</tt>'s data. */ private byte[] buf; /** Length used. */ private int len = 0; } }
/** * @author Emil Ivov * @author Lyubomir Marinov */ public class ConfigurationActivator implements BundleActivator { /** The <tt>Logger</tt> used by the <tt>ConfigurationActivator</tt> class for logging output. */ private static final Logger logger = Logger.getLogger(ConfigurationActivator.class); /** The currently registered {@link ConfigurationService} instance. */ private ConfigurationService cs; /** * Starts the configuration service * * @param bundleContext the <tt>BundleContext</tt> as provided by the OSGi framework. * @throws Exception if anything goes wrong */ public void start(BundleContext bundleContext) throws Exception { FileAccessService fas = ServiceUtils.getService(bundleContext, FileAccessService.class); if (fas != null) { File usePropFileConfig; try { usePropFileConfig = fas.getPrivatePersistentFile(".usepropfileconfig", FileCategory.PROFILE); } catch (Exception ise) { // There is somewhat of a chicken-and-egg dependency between // FileConfigurationServiceImpl and ConfigurationServiceImpl: // FileConfigurationServiceImpl throws IllegalStateException if // certain System properties are not set, // ConfigurationServiceImpl will make sure that these properties // are set but it will do that later. // A SecurityException is thrown when the destination // is not writable or we do not have access to that folder usePropFileConfig = null; } if (usePropFileConfig != null && usePropFileConfig.exists()) { logger.info("Using properties file configuration store."); this.cs = LibJitsi.getConfigurationService(); } } if (this.cs == null) { this.cs = new JdbcConfigService(fas); } bundleContext.registerService(ConfigurationService.class.getName(), this.cs, null); fixPermissions(this.cs); } /** * Causes the configuration service to store the properties object and unregisters the * configuration service. * * @param bundleContext <tt>BundleContext</tt> * @throws Exception if anything goes wrong while storing the properties managed by the * <tt>ConfigurationService</tt> implementation provided by this bundle and while * unregistering the service in question */ public void stop(BundleContext bundleContext) throws Exception { this.cs.storeConfiguration(); this.cs = null; } /** * Makes home folder and the configuration file readable and writable only to the owner. * * @param cs the <tt>ConfigurationService</tt> instance to check for home folder and configuration * file. */ private static void fixPermissions(ConfigurationService cs) { if (!OSUtils.IS_LINUX && !OSUtils.IS_MAC) return; try { // let's check config file and config folder File homeFolder = new File(cs.getScHomeDirLocation(), cs.getScHomeDirName()); Set<PosixFilePermission> perms = new HashSet<PosixFilePermission>() { { add(PosixFilePermission.OWNER_READ); add(PosixFilePermission.OWNER_WRITE); add(PosixFilePermission.OWNER_EXECUTE); } }; Files.setPosixFilePermissions(Paths.get(homeFolder.getAbsolutePath()), perms); String fileName = cs.getConfigurationFilename(); if (fileName != null) { File cf = new File(homeFolder, fileName); if (cf.exists()) { perms = new HashSet<PosixFilePermission>() { { add(PosixFilePermission.OWNER_READ); add(PosixFilePermission.OWNER_WRITE); } }; Files.setPosixFilePermissions(Paths.get(cf.getAbsolutePath()), perms); } } } catch (Throwable t) { logger.error("Error creating c lib instance for fixing file permissions", t); if (t instanceof InterruptedException) Thread.currentThread().interrupt(); else if (t instanceof ThreadDeath) throw (ThreadDeath) t; } } }
/** * The stream used by JMF for our image streaming. * * @author Sebastien Vincent * @author Lyubomir Marinov * @author Damian Minkov */ public class ImageStream extends AbstractVideoPullBufferStream<DataSource> { /** * The <tt>Logger</tt> used by the <tt>ImageStream</tt> class and its instances for logging * output. */ private static final Logger logger = Logger.getLogger(ImageStream.class); /** * The pool of <tt>ByteBuffer</tt>s this instances is using to optimize the allocations and * deallocations of <tt>ByteBuffer</tt>s. */ private final ByteBufferPool byteBufferPool = new ByteBufferPool(); /** Desktop interaction (screen capture, key press, ...). */ private DesktopInteract desktopInteract = null; /** Index of display that we will capture from. */ private int displayIndex = -1; /** Sequence number. */ private long seqNo = 0; /** X origin. */ private int x = 0; /** Y origin. */ private int y = 0; /** * Initializes a new <tt>ImageStream</tt> instance which is to have a specific * <tt>FormatControl</tt> * * @param dataSource the <tt>DataSource</tt> which is creating the new instance so that it becomes * one of its <tt>streams</tt> * @param formatControl the <tt>FormatControl</tt> of the new instance which is to specify the * format in which it is to provide its media data */ ImageStream(DataSource dataSource, FormatControl formatControl) { super(dataSource, formatControl); } /** * Blocks and reads into a <tt>Buffer</tt> from this <tt>PullBufferStream</tt>. * * @param buffer the <tt>Buffer</tt> this <tt>PullBufferStream</tt> is to read into * @throws IOException if an I/O error occurs while this <tt>PullBufferStream</tt> reads into the * specified <tt>Buffer</tt> * @see AbstractVideoPullBufferStream#doRead(Buffer) */ @Override protected void doRead(Buffer buffer) throws IOException { /* * Determine the Format in which we're expected to output. We cannot * rely on the Format always being specified in the Buffer because it is * not its responsibility, the DataSource of this ImageStream knows the * output Format. */ Format format = buffer.getFormat(); if (format == null) { format = getFormat(); if (format != null) buffer.setFormat(format); } if (format instanceof AVFrameFormat) { Object o = buffer.getData(); AVFrame frame; if (o instanceof AVFrame) frame = (AVFrame) o; else { frame = new AVFrame(); buffer.setData(frame); } AVFrameFormat avFrameFormat = (AVFrameFormat) format; Dimension size = avFrameFormat.getSize(); ByteBuffer data = readScreenNative(size); if (data != null) { if (frame.avpicture_fill(data, avFrameFormat) < 0) { data.free(); throw new IOException("avpicture_fill"); } } else { /* * This can happen when we disconnect a monitor from computer * before or during grabbing. */ throw new IOException("Failed to grab screen."); } } else { byte[] bytes = (byte[]) buffer.getData(); Dimension size = ((VideoFormat) format).getSize(); bytes = readScreen(bytes, size); buffer.setData(bytes); buffer.setOffset(0); buffer.setLength(bytes.length); } buffer.setHeader(null); buffer.setTimeStamp(System.nanoTime()); buffer.setSequenceNumber(seqNo); buffer.setFlags(Buffer.FLAG_SYSTEM_TIME | Buffer.FLAG_LIVE_DATA); seqNo++; } /** * Read screen. * * @param output output buffer for screen bytes * @param dim dimension of the screen * @return raw bytes, it could be equal to output or not. Take care in the caller to check if * output is the returned value. */ public byte[] readScreen(byte[] output, Dimension dim) { VideoFormat format = (VideoFormat) getFormat(); Dimension formatSize = format.getSize(); int width = formatSize.width; int height = formatSize.height; BufferedImage scaledScreen = null; BufferedImage screen = null; byte data[] = null; int size = width * height * 4; // If output is not large enough, enlarge it. if ((output == null) || (output.length < size)) output = new byte[size]; /* get desktop screen via native grabber if available */ if (desktopInteract.captureScreen(displayIndex, x, y, dim.width, dim.height, output)) { return output; } System.out.println("failed to grab with native! " + output.length); /* OK native grabber failed or is not available, * try with AWT Robot and convert it to the right format * * Note that it is very memory consuming since memory are allocated * to capture screen (via Robot) and then for converting to raw bytes * Moreover support for multiple display has not yet been investigated * * Normally not of our supported platform (Windows (x86, x64), * Linux (x86, x86-64), Mac OS X (i386, x86-64, ppc) and * FreeBSD (x86, x86-64) should go here. */ screen = desktopInteract.captureScreen(); if (screen != null) { /* convert to ARGB BufferedImage */ scaledScreen = ImgStreamingUtils.getScaledImage(screen, width, height, BufferedImage.TYPE_INT_ARGB); /* get raw bytes */ data = ImgStreamingUtils.getImageBytes(scaledScreen, output); } screen = null; scaledScreen = null; return data; } /** * Read screen and store result in native buffer. * * @param dim dimension of the video * @return true if success, false otherwise */ private ByteBuffer readScreenNative(Dimension dim) { int size = dim.width * dim.height * 4 + FFmpeg.FF_INPUT_BUFFER_PADDING_SIZE; ByteBuffer data = byteBufferPool.getBuffer(size); data.setLength(size); /* get desktop screen via native grabber */ boolean b; try { b = desktopInteract.captureScreen( displayIndex, x, y, dim.width, dim.height, data.getPtr(), data.getLength()); } catch (Throwable t) { if (t instanceof ThreadDeath) { throw (ThreadDeath) t; } else { b = false; // logger.error("Failed to grab screen!", t); } } if (!b) { data.free(); data = null; } return data; } /** * Sets the index of the display to be used by this <tt>ImageStream</tt>. * * @param displayIndex the index of the display to be used by this <tt>ImageStream</tt> */ public void setDisplayIndex(int displayIndex) { this.displayIndex = displayIndex; } /** * Sets the origin to be captured by this <tt>ImageStream</tt>. * * @param x the x coordinate of the origin to be set on this instance * @param y the y coordinate of the origin to be set on this instance */ public void setOrigin(int x, int y) { this.x = x; this.y = y; } /** * Start desktop capture stream. * * @see AbstractPullBufferStream#start() */ @Override public void start() throws IOException { super.start(); if (desktopInteract == null) { try { desktopInteract = new DesktopInteractImpl(); } catch (Exception e) { logger.warn("Cannot create DesktopInteract object!"); } } } /** * Stop desktop capture stream. * * @see AbstractPullBufferStream#stop() */ @Override public void stop() throws IOException { try { if (logger.isInfoEnabled()) logger.info("Stop stream"); } finally { super.stop(); byteBufferPool.drain(); } } }
/** * The <tt>FileElement</tt> extends the smackx <tt>StreamInitiation.File</tt> in order to provide a * file that supports thumbnails. * * @author Yana Stamcheva */ public class FileElement extends File implements IQProvider { private static final Logger logger = Logger.getLogger(FileElement.class); private static final List<DateFormat> DATE_FORMATS = new ArrayList<DateFormat>(); /** The element name of this <tt>IQProvider</tt>. */ public static final String ELEMENT_NAME = "si"; /** The namespace of this <tt>IQProvider</tt>. */ public static final String NAMESPACE = "http://jabber.org/protocol/si"; static { // DATE_FORMATS DateFormat fmt; // XEP-0091 DATE_FORMATS.add(DelayInformation.XEP_0091_UTC_FORMAT); fmt = new SimpleDateFormat("yyyyMd'T'HH:mm:ss'Z'"); fmt.setTimeZone(TimeZone.getTimeZone("UTC")); DATE_FORMATS.add(fmt); // XEP-0082 fmt = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); fmt.setTimeZone(TimeZone.getTimeZone("UTC")); DATE_FORMATS.add(fmt); fmt = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); fmt.setTimeZone(TimeZone.getTimeZone("UTC")); DATE_FORMATS.add(fmt); DATE_FORMATS.add(new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ")); DATE_FORMATS.add(new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ")); } private ThumbnailElement thumbnail; /** An empty constructor used to initialize this class as an <tt>IQProvider</tt>. */ public FileElement() { this("", 0); } /** * Creates a <tt>FileElement</tt> by specifying a base file and a thumbnail to extend it with. * * @param baseFile the file used as a base * @param thumbnail the thumbnail to add */ public FileElement(File baseFile, ThumbnailElement thumbnail) { this(baseFile.getName(), baseFile.getSize()); this.thumbnail = thumbnail; } /** * Creates a <tt>FileElement</tt> by specifying the name and the size of the file. * * @param name the name of the file * @param size the size of the file */ public FileElement(String name, long size) { super(name, size); } /** * Represents this <tt>FileElement</tt> in an XML. * * @see File#toXML() */ @Override public String toXML() { StringBuilder buffer = new StringBuilder(); buffer .append("<") .append(getElementName()) .append(" xmlns=\"") .append(getNamespace()) .append("\" "); if (getName() != null) { buffer.append("name=\"").append(StringUtils.escapeForXML(getName())).append("\" "); } if (getSize() > 0) { buffer.append("size=\"").append(getSize()).append("\" "); } if (getDate() != null) { buffer.append("date=\"").append(StringUtils.formatXEP0082Date(this.getDate())).append("\" "); } if (getHash() != null) { buffer.append("hash=\"").append(getHash()).append("\" "); } if ((this.getDesc() != null && getDesc().length() > 0) || isRanged() || thumbnail != null) { buffer.append(">"); if (getDesc() != null && getDesc().length() > 0) { buffer.append("<desc>").append(StringUtils.escapeForXML(getDesc())).append("</desc>"); } if (isRanged()) { buffer.append("<range/>"); } if (thumbnail != null) { buffer.append(thumbnail.toXML()); } buffer.append("</").append(getElementName()).append(">"); } else { buffer.append("/>"); } return buffer.toString(); } /** * Returns the <tt>ThumbnailElement</tt> contained in this <tt>FileElement</tt>. * * @return the <tt>ThumbnailElement</tt> contained in this <tt>FileElement</tt> */ public ThumbnailElement getThumbnailElement() { return thumbnail; } /** * Sets the given <tt>thumbnail</tt> to this <tt>FileElement</tt>. * * @param thumbnail the <tt>ThumbnailElement</tt> to set */ public void setThumbnailElement(ThumbnailElement thumbnail) { this.thumbnail = thumbnail; } /** * Parses the given <tt>parser</tt> in order to create a <tt>FileElement</tt> from it. * * @param parser the parser to parse * @see IQProvider#parseIQ(XmlPullParser) */ public IQ parseIQ(final XmlPullParser parser) throws Exception { boolean done = false; // si String id = parser.getAttributeValue("", "id"); String mimeType = parser.getAttributeValue("", "mime-type"); StreamInitiation initiation = new StreamInitiation(); // file String name = null; String size = null; String hash = null; String date = null; String desc = null; ThumbnailElement thumbnail = null; boolean isRanged = false; // feature DataForm form = null; DataFormProvider dataFormProvider = new DataFormProvider(); int eventType; String elementName; String namespace; while (!done) { eventType = parser.next(); elementName = parser.getName(); namespace = parser.getNamespace(); if (eventType == XmlPullParser.START_TAG) { if (elementName.equals("file")) { name = parser.getAttributeValue("", "name"); size = parser.getAttributeValue("", "size"); hash = parser.getAttributeValue("", "hash"); date = parser.getAttributeValue("", "date"); } else if (elementName.equals("desc")) { desc = parser.nextText(); } else if (elementName.equals("range")) { isRanged = true; } else if (elementName.equals("x") && namespace.equals("jabber:x:data")) { form = (DataForm) dataFormProvider.parseExtension(parser); } else if (elementName.equals("thumbnail")) { thumbnail = new ThumbnailElement(parser.getText()); } } else if (eventType == XmlPullParser.END_TAG) { if (elementName.equals("si")) { done = true; } // The name-attribute is required per XEP-0096, so ignore the // IQ if the name is not set to avoid exceptions. Particularly, // the SI response of Empathy contains an invalid, empty // file-tag. else if (elementName.equals("file") && name != null) { long fileSize = 0; if (size != null && size.trim().length() != 0) { try { fileSize = Long.parseLong(size); } catch (NumberFormatException e) { logger.warn( "Received an invalid file size," + " continuing with fileSize set to 0", e); } } FileElement file = new FileElement(name, fileSize); file.setHash(hash); if (date != null) { // try all known date formats boolean found = false; if (date.matches(".*?T\\d+:\\d+:\\d+(\\.\\d+)?(\\+|-)\\d+:\\d+")) { int timeZoneColon = date.lastIndexOf(":"); date = date.substring(0, timeZoneColon) + date.substring(timeZoneColon + 1, date.length()); } for (DateFormat fmt : DATE_FORMATS) { try { file.setDate(fmt.parse(date)); found = true; break; } catch (ParseException ex) { } } if (!found) { logger.warn("Unknown dateformat on incoming file transfer: " + date); } } if (thumbnail != null) file.setThumbnailElement(thumbnail); file.setDesc(desc); file.setRanged(isRanged); initiation.setFile(file); } } } initiation.setSesssionID(id); initiation.setMimeType(mimeType); initiation.setFeatureNegotiationForm(form); return initiation; } }
/** * Class implements {@link Recorder} using Jirecon recorder container. * * @author Pawel Domas */ public class JireconRecorder extends Recorder { /** The logger. */ private static final Logger logger = Logger.getLogger(JireconRecorder.class); /** * The name of the property which specifies the token used to authenticate requests to enable * media recording. */ static final String MEDIA_RECORDING_TOKEN_PNAME = "org.jitsi.videobridge.MEDIA_RECORDING_TOKEN"; /** FIXME: not sure about that Our room JID in form of room_name@muc_component/focus_nickname */ private final String mucRoomJid; /** Recording authentication token. */ private final String token; /** Recording status of the Jirecon component. */ private JireconIq.Status status = JireconIq.Status.UNDEFINED; /** Recording session identifier assigned by Jirecon. */ private String recordingId; /** * Creates new instance of <tt>JireconRecorder</tt>. * * @param mucRoomJid focus room jid in form of "room_name@muc_component/focus_nickname". * @param recorderComponentJid recorder component address. * @param xmpp {@link OperationSetDirectSmackXmpp} instance for current XMPP connection. */ public JireconRecorder( String mucRoomJid, String recorderComponentJid, OperationSetDirectSmackXmpp xmpp) { super(recorderComponentJid, xmpp); this.mucRoomJid = mucRoomJid; this.token = FocusBundleActivator.getConfigService().getString(MEDIA_RECORDING_TOKEN_PNAME); } /** {@inheritDoc} */ @Override public boolean isRecording() { return JireconIq.Status.INITIATING == status || JireconIq.Status.STARTED == status; } /** {@inheritDoc} */ @Override public boolean setRecording(String from, String token, State doRecord, String path) { if (!StringUtils.isNullOrEmpty(this.token) && !this.token.equals(token)) { return false; } if (!isRecording() && doRecord.equals(State.ON)) { // Send start recording IQ JireconIq recording = new JireconIq(); recording.setTo(recorderComponentJid); recording.setType(IQ.Type.SET); recording.setFrom(from); recording.setMucJid(mucRoomJid); recording.setAction(JireconIq.Action.START); recording.setOutput(path); Packet reply = xmpp.getXmppConnection().sendPacketAndGetReply(recording); if (reply instanceof JireconIq) { JireconIq recResponse = (JireconIq) reply; if (JireconIq.Status.INITIATING.equals(recResponse.getStatus())) { recordingId = recResponse.getRid(); logger.info("Received recording ID: " + recordingId); status = JireconIq.Status.INITIATING; } else { logger.error("Unexpected status received: " + recResponse.toXML()); } } else { logger.error("Unexpected response: " + IQUtils.responseToXML(reply)); } } else if (isRecording() && doRecord.equals(State.OFF)) { // Send stop recording IQ JireconIq recording = new JireconIq(); recording.setTo(recorderComponentJid); recording.setType(IQ.Type.SET); recording.setFrom(from); recording.setRid(recordingId); recording.setMucJid(mucRoomJid); recording.setAction(JireconIq.Action.STOP); xmpp.getXmppConnection().sendPacket(recording); status = JireconIq.Status.STOPPING; } return true; } /** * Accepts Jirecon packets. * * <p>{@inheritDoc} */ @Override public boolean accept(Packet packet) { return packet instanceof JireconIq; } /** * Jirecon packets processing logic. * * <p>{@inheritDoc} */ @Override public void processPacket(Packet packet) { JireconIq recording = (JireconIq) packet; if (JireconIq.Action.INFO != recording.getAction() && IQ.Type.RESULT == recording.getType() || StringUtils.isNullOrEmpty(recording.getRid())) { logger.warn("Discarded: " + recording.toXML()); return; } if (!recording.getRid().equals(recordingId)) { logger.warn("Received IQ for unknown session: " + recording.toXML()); return; } if (status != recording.getStatus()) { status = recording.getStatus(); logger.info("Recording " + recordingId + " status: " + status); if (status == JireconIq.Status.STOPPED) { logger.info("Recording STOPPED: " + recordingId); recordingId = null; } } else { logger.info("Ignored status change: " + recording.toXML()); } } }
/** * @author Bing SU ([email protected]) * @author Lyubomir Marinov * @author Boris Grozev */ public abstract class RTPConnectorInputStream implements PushSourceStream, Runnable { /** * The value of the property <tt>controls</tt> of <tt>RTPConnectorInputStream</tt> when there are * no controls. Explicitly defined in order to reduce unnecessary allocations. */ private static final Object[] EMPTY_CONTROLS = new Object[0]; /** * The length in bytes of the buffers of <tt>RTPConnectorInputStream</tt> receiving packets from * the network. */ public static final int PACKET_RECEIVE_BUFFER_LENGTH = 4 * 1024; /** * The <tt>Logger</tt> used by the <tt>RTPConnectorInputStream</tt> class and its instances to * print debug information. */ private static final Logger logger = Logger.getLogger(RTPConnectorInputStream.class); /** Packet receive buffer */ private final byte[] buffer = new byte[PACKET_RECEIVE_BUFFER_LENGTH]; /** Whether this stream is closed. Used to control the termination of worker thread. */ protected boolean closed; public Participant videoRecorder; /** * The <tt>DatagramPacketFilter</tt>s which allow dropping <tt>DatagramPacket</tt>s before they * are converted into <tt>RawPacket</tt>s. */ private DatagramPacketFilter[] datagramPacketFilters; /** Caught an IO exception during read from socket */ protected boolean ioError = false; /** * The packet data to be read out of this instance through its {@link #read(byte[], int, int)} * method. */ private RawPacket pkt; /** The <tt>Object</tt> which synchronizes the access to {@link #pkt}. */ private final Object pktSyncRoot = new Object(); /** The adapter of this <tt>PushSourceStream</tt> to the <tt>PushBufferStream</tt> interface. */ private final PushBufferStream pushBufferStream; /** * The pool of <tt>RawPacket[]</tt> instances to reduce their allocations and garbage collection. * Contains arrays full of <tt>null</tt>. */ private final Queue<RawPacket[]> rawPacketArrayPool = new LinkedBlockingQueue<RawPacket[]>(); /** * The pool of <tt>RawPacket</tt> instances to reduce their allocations and garbage collection. */ private final Queue<RawPacket> rawPacketPool = new LinkedBlockingQueue<RawPacket>(); /** The Thread receiving packets. */ protected Thread receiverThread = null; /** SourceTransferHandler object which is used to read packets. */ private SourceTransferHandler transferHandler; /** * Whether this <tt>RTPConnectorInputStream</tt> is enabled or disabled. While disabled, the * stream does not accept any packets. */ private boolean enabled = true; /** * Initializes a new <tt>RTPConnectorInputStream</tt> which is to receive packet data from a * specific UDP socket. */ public RTPConnectorInputStream() { // PacketLoggingService addDatagramPacketFilter( new DatagramPacketFilter() { /** * Used for debugging. As we don't log every packet, we must count them and decide which * to log. */ private long numberOfPackets = 0; public boolean accept(DatagramPacket p) { numberOfPackets++; if (RTPConnectorOutputStream.logPacket(numberOfPackets)) { PacketLoggingService packetLogging = LibJitsi.getPacketLoggingService(); if ((packetLogging != null) && packetLogging.isLoggingEnabled(PacketLoggingService.ProtocolName.RTP)) doLogPacket(p); } return true; } }); /* * Adapt this PushSourceStream to the PushBufferStream interface in * order to make it possible to read the Buffer flags of RawPacket. */ pushBufferStream = new PushBufferStreamAdapter(this, null) { @Override protected int doRead(Buffer buffer, byte[] data, int offset, int length) throws IOException { return RTPConnectorInputStream.this.read(buffer, data, offset, length); } }; } /** Close this stream, stops the worker thread. */ public synchronized void close() {} /** * Creates a new <tt>RawPacket</tt> from a specific <tt>DatagramPacket</tt> in order to have this * instance receive its packet data through its {@link #read(byte[], int, int)} method. Returns an * array of <tt>RawPacket</tt> with the created packet as its first element (and <tt>null</tt> for * the other elements). * * <p>Allows extenders to intercept the packet data and possibly filter and/or modify it. * * @param datagramPacket the <tt>DatagramPacket</tt> containing the packet data * @return an array of <tt>RawPacket</tt> containing the <tt>RawPacket</tt> which contains the * packet data of the specified <tt>DatagramPacket</tt> as its first element. */ protected RawPacket[] createRawPacket(DatagramPacket datagramPacket) { RawPacket[] pkts = rawPacketArrayPool.poll(); if (pkts == null) pkts = new RawPacket[1]; RawPacket pkt = rawPacketPool.poll(); if (pkt == null) pkt = new RawPacket(); pkt.setBuffer(datagramPacket.getData()); pkt.setFlags(0); pkt.setLength(datagramPacket.getLength()); pkt.setOffset(datagramPacket.getOffset()); pkts[0] = pkt; return pkts; } /** * Provides a dummy implementation to {@link RTPConnectorInputStream#endOfStream()} that always * returns <tt>false</tt>. * * @return <tt>false</tt>, no matter what. */ public boolean endOfStream() { return false; } /** * Provides a dummy implementation to {@link RTPConnectorInputStream#getContentDescriptor()} that * always returns <tt>null</tt>. * * @return <tt>null</tt>, no matter what. */ public ContentDescriptor getContentDescriptor() { return null; } /** * Provides a dummy implementation to {@link RTPConnectorInputStream#getContentLength()} that * always returns <tt>LENGTH_UNKNOWN</tt>. * * @return <tt>LENGTH_UNKNOWN</tt>, no matter what. */ public long getContentLength() { return LENGTH_UNKNOWN; } /** * Provides a dummy implementation of {@link RTPConnectorInputStream#getControl(String)} that * always returns <tt>null</tt>. * * @param controlType ignored. * @return <tt>null</tt>, no matter what. */ public Object getControl(String controlType) { if (PushBufferStream.class.getName().equals(controlType)) return pushBufferStream; else return null; } /** * Provides a dummy implementation of {@link RTPConnectorInputStream#getControls()} that always * returns <tt>EMPTY_CONTROLS</tt>. * * @return <tt>EMPTY_CONTROLS</tt>, no matter what. */ public Object[] getControls() { return EMPTY_CONTROLS; } /** * Provides a dummy implementation to {@link RTPConnectorInputStream#getMinimumTransferSize()} * that always returns <tt>2 * 1024</tt>. * * @return <tt>2 * 1024</tt>, no matter what. */ public int getMinimumTransferSize() { return 2 * 1024; // twice the MTU size, just to be safe. } /** * Pools the specified <tt>RawPacket</tt> in order to avoid future allocations and to reduce the * effects of garbage collection. * * @param pkt the <tt>RawPacket</tt> to be offered to {@link #rawPacketPool} */ private void poolRawPacket(RawPacket pkt) { pkt.setBuffer(null); pkt.setFlags(0); pkt.setLength(0); pkt.setOffset(0); rawPacketPool.offer(pkt); } /** * Copies the content of the most recently received packet into <tt>buffer</tt>. * * @param buffer the <tt>byte[]</tt> that we'd like to copy the content of the packet to. * @param offset the position where we are supposed to start writing in <tt>buffer</tt>. * @param length the number of <tt>byte</tt>s available for writing in <tt>buffer</tt>. * @return the number of bytes read * @throws IOException if <tt>length</tt> is less than the size of the packet. */ public int read(byte[] buffer, int offset, int length) throws IOException { return read(null, buffer, offset, length); } /** * Copies the content of the most recently received packet into <tt>data</tt>. * * @param buffer an optional <tt>Buffer</tt> instance associated with the specified <tt>data</tt>, * <tt>offset</tt> and <tt>length</tt> and provided to the method in case the implementation * would like to provide additional <tt>Buffer</tt> properties such as <tt>flags</tt> * @param data the <tt>byte[]</tt> that we'd like to copy the content of the packet to. * @param offset the position where we are supposed to start writing in <tt>data</tt>. * @param length the number of <tt>byte</tt>s available for writing in <tt>data</tt>. * @return the number of bytes read * @throws IOException if <tt>length</tt> is less than the size of the packet. */ protected int read(Buffer buffer, byte[] data, int offset, int length) throws IOException { if (data == null) throw new NullPointerException("data"); if (ioError) return -1; RawPacket pkt; synchronized (pktSyncRoot) { pkt = this.pkt; this.pkt = null; } int pktLength; if (pkt == null) { pktLength = 0; } else { // By default, pkt will be returned to the pool after it was read. boolean poolPkt = true; try { pktLength = pkt.getLength(); if (length < pktLength) { /* * If pkt is still the latest RawPacket made available to * reading, reinstate it for the next invocation of read; * otherwise, return it to the pool. */ poolPkt = false; throw new IOException("Input buffer not big enough for " + pktLength); } else { byte[] pktBuffer = pkt.getBuffer(); if (pktBuffer == null) { throw new NullPointerException( "pkt.buffer null, pkt.length " + pktLength + ", pkt.offset " + pkt.getOffset()); } else { System.arraycopy(pkt.getBuffer(), pkt.getOffset(), data, offset, pktLength); if (buffer != null) buffer.setFlags(pkt.getFlags()); } } } finally { if (!poolPkt) { synchronized (pktSyncRoot) { if (this.pkt == null) this.pkt = pkt; else poolPkt = true; } } if (poolPkt) { // Return pkt to the pool because it was successfully read. poolRawPacket(pkt); } } } return pktLength; } /** * Log the packet. * * @param packet packet to log */ protected abstract void doLogPacket(DatagramPacket packet); /** * Receive packet. * * @param p packet for receiving * @throws IOException if something goes wrong during receiving */ protected abstract void receivePacket(DatagramPacket p) throws IOException; /** * Listens for incoming datagrams, stores them for reading by the <tt>read</tt> method and * notifies the local <tt>transferHandler</tt> that there's data to be read. */ public void run() { DatagramPacket p = new DatagramPacket(buffer, 0, PACKET_RECEIVE_BUFFER_LENGTH); while (!closed) { try { // http://code.google.com/p/android/issues/detail?id=24765 if (OSUtils.IS_ANDROID) p.setLength(PACKET_RECEIVE_BUFFER_LENGTH); receivePacket(p); } catch (IOException e) { ioError = true; break; } /* * Do the DatagramPacketFilters accept the received DatagramPacket? */ DatagramPacketFilter[] datagramPacketFilters = getDatagramPacketFilters(); boolean accept; if (!enabled) accept = false; else if (datagramPacketFilters == null) accept = true; else { accept = true; for (int i = 0; i < datagramPacketFilters.length; i++) { try { if (!datagramPacketFilters[i].accept(p)) { accept = false; break; } } catch (Throwable t) { if (t instanceof ThreadDeath) throw (ThreadDeath) t; } } } if (accept) { RawPacket pkts[] = createRawPacket(p); for (int i = 0; i < pkts.length; i++) { RawPacket pkt = pkts[i]; pkts[i] = null; if (pkt != null) { if (pkt.isInvalid()) { /* * Return pkt to the pool because it is invalid and, * consequently, will not be made available to * reading. */ poolRawPacket(pkt); } else { RawPacket oldPkt; synchronized (pktSyncRoot) { oldPkt = this.pkt; this.pkt = pkt; } if (oldPkt != null) { /* * Return oldPkt to the pool because it was made * available to reading and it was not read. */ poolRawPacket(oldPkt); } if (videoRecorder != null) videoRecorder.recordData(pkt); if ((transferHandler != null) && !closed) { try { transferHandler.transferData(this); } catch (Throwable t) { /* * XXX We cannot allow transferHandler to * kill us. */ if (t instanceof ThreadDeath) { throw (ThreadDeath) t; } else { logger.warn("An RTP packet may have not been" + " fully handled.", t); } } } } } } rawPacketArrayPool.offer(pkts); } } } /** * Sets the <tt>transferHandler</tt> that this connector should be notifying when new data is * available for reading. * * @param transferHandler the <tt>transferHandler</tt> that this connector should be notifying * when new data is available for reading. */ public void setTransferHandler(SourceTransferHandler transferHandler) { if (!closed) this.transferHandler = transferHandler; } /** * Changes current thread priority. * * @param priority the new priority. */ public void setPriority(int priority) { // currently no priority is set // if (receiverThread != null) // receiverThread.setPriority(priority); } /** * Gets the <tt>DatagramPacketFilter</tt>s which allow dropping <tt>DatagramPacket</tt>s before * they are converted into <tt>RawPacket</tt>s. * * @return the <tt>DatagramPacketFilter</tt>s which allow dropping <tt>DatagramPacket</tt>s before * they are converted into <tt>RawPacket</tt>s. */ public synchronized DatagramPacketFilter[] getDatagramPacketFilters() { return datagramPacketFilters; } /** * Adds a <tt>DatagramPacketFilter</tt> which allows dropping <tt>DatagramPacket</tt>s before they * are converted into <tt>RawPacket</tt>s. * * @param datagramPacketFilter the <tt>DatagramPacketFilter</tt> which allows dropping * <tt>DatagramPacket</tt>s before they are converted into <tt>RawPacket</tt>s */ public synchronized void addDatagramPacketFilter(DatagramPacketFilter datagramPacketFilter) { if (datagramPacketFilter == null) throw new NullPointerException("datagramPacketFilter"); if (datagramPacketFilters == null) { datagramPacketFilters = new DatagramPacketFilter[] {datagramPacketFilter}; } else { final int length = datagramPacketFilters.length; for (int i = 0; i < length; i++) if (datagramPacketFilter.equals(datagramPacketFilters[i])) return; DatagramPacketFilter[] newDatagramPacketFilters = new DatagramPacketFilter[length + 1]; System.arraycopy(datagramPacketFilters, 0, newDatagramPacketFilters, 0, length); newDatagramPacketFilters[length] = datagramPacketFilter; datagramPacketFilters = newDatagramPacketFilters; } } /** * Enables or disables this <tt>RTPConnectorInputStream</tt>. While the stream is disabled, it * does not accept any packets. * * @param enabled <tt>true</tt> to enable, <tt>false</tt> to disable. */ public void setEnabled(boolean enabled) { if (logger.isDebugEnabled()) logger.debug("setEnabled: " + enabled); this.enabled = enabled; } }