/** * Makes an <tt>RTCPREMBPacket</tt> that provides receiver feedback to the endpoint from which we * receive. * * @return an <tt>RTCPREMBPacket</tt> that provides receiver feedback to the endpoint from which * we receive. */ private RTCPREMBPacket makeRTCPREMBPacket() { // TODO we should only make REMBs if REMB support has been advertised. // Destination RemoteBitrateEstimator remoteBitrateEstimator = ((VideoMediaStream) getStream()).getRemoteBitrateEstimator(); Collection<Integer> ssrcs = remoteBitrateEstimator.getSsrcs(); // TODO(gp) intersect with SSRCs from signaled simulcast layers // NOTE(gp) The Google Congestion Control algorithm (sender side) // doesn't seem to care about the SSRCs in the dest field. long[] dest = new long[ssrcs.size()]; int i = 0; for (Integer ssrc : ssrcs) dest[i++] = ssrc & 0xFFFFFFFFL; // Exp & mantissa long bitrate = remoteBitrateEstimator.getLatestEstimate(); if (bitrate == -1) return null; if (logger.isDebugEnabled()) logger.debug("Estimated bitrate: " + bitrate); // Create and return the packet. // We use the stream's local source ID (SSRC) as the SSRC of packet // sender. long streamSSRC = getLocalSSRC(); return new RTCPREMBPacket(streamSSRC, /* mediaSSRC */ 0L, bitrate, dest); }
/** * Sets the value of {@code lastN}, that is, the maximum number of endpoints whose video streams * will be forwarded to the endpoint. A value of {@code -1} means that there is no limit. * * @param lastN the value to set. */ public void setLastN(int lastN) { if (logger.isDebugEnabled()) { logger.debug("Setting lastN=" + lastN); } List<String> endpointsToAskForKeyframe = null; synchronized (this) { // Since we have the lock anyway, call update() inside, so it // doesn't have to obtain it again. But keep the call to // askForKeyframes() outside. if (this.lastN != lastN) { // If we're just now enabling lastN, we don't need to ask for // keyframes as all streams were being forwarded already. boolean update = this.lastN != -1; this.lastN = lastN; if (lastN >= 0 && (currentLastN < 0 || currentLastN > lastN)) { currentLastN = lastN; } if (update) { endpointsToAskForKeyframe = update(); } } } askForKeyframes(endpointsToAskForKeyframe); }
/** Implements notification in order to track socket state. */ @Override public synchronized void onSctpNotification(SctpSocket socket, SctpNotification notification) { if (logger.isDebugEnabled()) { logger.debug("socket=" + socket + "; notification=" + notification); } switch (notification.sn_type) { case SctpNotification.SCTP_ASSOC_CHANGE: SctpNotification.AssociationChange assocChange = (SctpNotification.AssociationChange) notification; switch (assocChange.state) { case SctpNotification.AssociationChange.SCTP_COMM_UP: if (!assocIsUp) { boolean wasReady = isReady(); assocIsUp = true; if (isReady() && !wasReady) notifySctpConnectionReady(); } break; case SctpNotification.AssociationChange.SCTP_COMM_LOST: case SctpNotification.AssociationChange.SCTP_SHUTDOWN_COMP: case SctpNotification.AssociationChange.SCTP_CANT_STR_ASSOC: try { closeStream(); } catch (IOException e) { logger.error("Error closing SCTP socket", e); } break; } break; } }
/** * Initializes the local list of endpoints ({@link #speechActivityEndpointsChanged(List)}) with * the current endpoints from the conference. */ public synchronized void initializeConferenceEndpoints() { speechActivityEndpointsChanged(channel.getConferenceSpeechActivity().getEndpoints()); if (logger.isDebugEnabled()) { logger.debug( "Initialized the list of endpoints: " + conferenceSpeechActivityEndpoints.toString()); } }
/** * Returns the SSRC paired with <tt>ssrc</tt> in an FID source-group, if any. If none is found, * returns -1. * * @return the SSRC paired with <tt>ssrc</tt> in an FID source-group, if any. If none is found, * returns -1. */ private long getPrimarySsrc(RawPacket pkt) { MediaStreamTrackReceiver receiver = channel.getStream().getMediaStreamTrackReceiver(); if (receiver == null) { if (logger.isDebugEnabled()) { logger.debug("Dropping an incoming RTX packet from an unknown source."); } return -1; } RTPEncoding encoding = receiver.resolveRTPEncoding(pkt); if (encoding == null) { if (logger.isDebugEnabled()) { logger.debug("Dropping an incoming RTX packet from an unknown source."); } return -1; } return encoding.getPrimarySSRC(); }
/** * Notifies this instance that the ordered list of endpoints (specified as a list of endpoint IDs) * in the conference has changed. * * @param endpointIds the new ordered list of endpoints (specified as a list of endpoint IDs) in * the conference. * @return the list of IDs of endpoints which were added to the list of forwarded endpoints as a * result of the call. */ private synchronized List<String> speechActivityEndpointIdsChanged(List<String> endpointIds) { if (conferenceSpeechActivityEndpoints.equals(endpointIds)) { if (logger.isDebugEnabled()) { logger.debug("Conference endpoints have not changed."); } return null; } else { List<String> newEndpoints = new LinkedList<>(endpointIds); newEndpoints.removeAll(conferenceSpeechActivityEndpoints); conferenceSpeechActivityEndpoints = endpointIds; return update(newEndpoints); } }
/** * Sets the list of "pinned" endpoints (i.e. endpoints for which video should always be forwarded, * regardless of {@code lastN}). * * @param newPinnedEndpointIds the list of endpoint IDs to set. */ public void setPinnedEndpointIds(List<String> newPinnedEndpointIds) { if (logger.isDebugEnabled()) { logger.debug("Setting pinned endpoints: " + newPinnedEndpointIds.toString()); } List<String> endpointsToAskForKeyframe = null; synchronized (this) { // Since we have the lock anyway, call update() inside, so it // doesn't have to obtain it again. But keep the call to // askForKeyframes() outside. if (!pinnedEndpoints.equals(newPinnedEndpointIds)) { pinnedEndpoints = Collections.unmodifiableList(newPinnedEndpointIds); endpointsToAskForKeyframe = update(); } } askForKeyframes(endpointsToAskForKeyframe); }
/** * Removes the RTX encapsulation from a packet. * * @param pkt the packet to remove the RTX encapsulation from. * @return the original media packet represented by {@code pkt}, or null if we couldn't * reconstruct the original packet. */ private RawPacket deRtx(RawPacket pkt) { boolean success = false; if (pkt.getPayloadLength() - pkt.getPaddingSize() < 2) { // We need at least 2 bytes to read the OSN field. if (logger.isDebugEnabled()) { logger.debug("Dropping an incoming RTX packet with padding only: " + pkt); } return null; } long mediaSsrc = getPrimarySsrc(pkt); if (mediaSsrc != -1) { if (rtxAssociatedPayloadType != -1) { int osn = pkt.getOriginalSequenceNumber(); // Remove the RTX header by moving the RTP header two bytes // right. byte[] buf = pkt.getBuffer(); int off = pkt.getOffset(); System.arraycopy(buf, off, buf, off + 2, pkt.getHeaderLength()); pkt.setOffset(off + 2); pkt.setLength(pkt.getLength() - 2); pkt.setSSRC((int) mediaSsrc); pkt.setSequenceNumber(osn); pkt.setPayloadType(rtxAssociatedPayloadType); success = true; } else { logger.warn( "RTX packet received, but no APT is defined. Packet " + "SSRC " + pkt.getSSRCAsLong() + ", associated media" + " SSRC " + mediaSsrc); } } // If we failed to handle the RTX packet, drop it. return success ? pkt : null; }
/** * Notifies this instance that the ordered list of endpoints in the conference has changed. * * @param endpoints the new ordered list of endpoints in the conference. * @return the list of endpoints which were added to the list of forwarded endpoints as a result * of the call, or {@code null} if none were added. */ public List<Endpoint> speechActivityEndpointsChanged(List<Endpoint> endpoints) { List<String> newEndpointIdList = getIDs(endpoints); List<String> enteringEndpointIds = speechActivityEndpointIdsChanged(newEndpointIdList); if (logger.isDebugEnabled()) { logger.debug( "New list of conference endpoints: " + newEndpointIdList.toString() + "; entering endpoints: " + (enteringEndpointIds == null ? "none" : enteringEndpointIds.toString())); } List<Endpoint> ret = new LinkedList<>(); if (enteringEndpointIds != null) { for (Endpoint endpoint : endpoints) { if (enteringEndpointIds.contains(endpoint.getID())) { ret.add(endpoint); } } } return ret; }
/** * Handles control packet. * * @param data raw packet data that arrived on control PPID. * @param sid SCTP stream id on which the data has arrived. */ private synchronized void onCtrlPacket(byte[] data, int sid) throws IOException { ByteBuffer buffer = ByteBuffer.wrap(data); int messageType = /* 1 byte unsigned integer */ 0xFF & buffer.get(); if (messageType == MSG_CHANNEL_ACK) { if (logger.isDebugEnabled()) { logger.debug(getEndpoint().getID() + " ACK received SID: " + sid); } // Open channel ACK WebRtcDataStream channel = channels.get(sid); if (channel != null) { // Ack check prevents from firing multiple notifications // if we get more than one ACKs (by mistake/bug). if (!channel.isAcknowledged()) { channel.ackReceived(); notifyChannelOpened(channel); } else { logger.warn("Redundant ACK received for SID: " + sid); } } else { logger.error("No channel exists on sid: " + sid); } } else if (messageType == MSG_OPEN_CHANNEL) { int channelType = /* 1 byte unsigned integer */ 0xFF & buffer.get(); int priority = /* 2 bytes unsigned integer */ 0xFFFF & buffer.getShort(); long reliability = /* 4 bytes unsigned integer */ 0xFFFFFFFFL & buffer.getInt(); int labelLength = /* 2 bytes unsigned integer */ 0xFFFF & buffer.getShort(); int protocolLength = /* 2 bytes unsigned integer */ 0xFFFF & buffer.getShort(); String label; String protocol; if (labelLength == 0) { label = ""; } else { byte[] labelBytes = new byte[labelLength]; buffer.get(labelBytes); label = new String(labelBytes, "UTF-8"); } if (protocolLength == 0) { protocol = ""; } else { byte[] protocolBytes = new byte[protocolLength]; buffer.get(protocolBytes); protocol = new String(protocolBytes, "UTF-8"); } if (logger.isDebugEnabled()) { logger.debug( "!!! " + getEndpoint().getID() + " data channel open request on SID: " + sid + " type: " + channelType + " prio: " + priority + " reliab: " + reliability + " label: " + label + " proto: " + protocol); } if (channels.containsKey(sid)) { logger.error("Channel on sid: " + sid + " already exists"); } WebRtcDataStream newChannel = new WebRtcDataStream(sctpSocket, sid, label, true); channels.put(sid, newChannel); sendOpenChannelAck(sid); notifyChannelOpened(newChannel); } else { logger.error("Unexpected ctrl msg type: " + messageType); } }
private void logDebug(String msg) { if (logger.isDebugEnabled()) { msg = getSimulcastEngine().getVideoChannel().getEndpoint().getID() + ": " + msg; logger.debug(msg); } }
/** * Maybe send a data channel command to the associated <tt>Endpoint</tt> to make it start * streaming its hq stream, if it's being watched by some receiver. */ public void maybeSendStartHighQualityStreamCommand() { if (nativeSimulcast || !hasLayers()) { // In native simulcast the client adjusts its layers autonomously so // we don't need (nor we can) to control it with data channel // messages. return; } Endpoint newEndpoint = getSimulcastEngine().getVideoChannel().getEndpoint(); SimulcastLayer[] newSimulcastLayers = getSimulcastLayers(); SctpConnection sctpConnection; if (newSimulcastLayers == null || newSimulcastLayers.length <= 1 /* newEndpoint != null is implied */ || (sctpConnection = newEndpoint.getSctpConnection()) == null || !sctpConnection.isReady() || sctpConnection.isExpired()) { return; } // we have a new endpoint and it has an SCTP connection that is // ready and not expired. if somebody else is watching the new // endpoint, start its hq stream. boolean startHighQualityStream = false; for (Endpoint e : getSimulcastEngine().getVideoChannel().getContent().getConference().getEndpoints()) { // TODO(gp) need some synchronization here. What if the // selected endpoint changes while we're in the loop? if (e == newEndpoint) continue; Endpoint eSelectedEndpoint = e.getEffectivelySelectedEndpoint(); if (newEndpoint == eSelectedEndpoint) { // somebody is watching the new endpoint or somebody has not // yet signaled its selected endpoint to the bridge, start // the hq stream. if (logger.isDebugEnabled()) { Map<String, Object> map = new HashMap<String, Object>(3); map.put("e", e); map.put("newEndpoint", newEndpoint); map.put("maybe", eSelectedEndpoint == null ? "(maybe) " : ""); StringCompiler sc = new StringCompiler(map).c("{e.id} is {maybe} watching {newEndpoint.id}."); logDebug(sc.toString().replaceAll("\\s+", " ")); } startHighQualityStream = true; break; } } if (startHighQualityStream) { // TODO(gp) this assumes only a single hq stream. logDebug( getSimulcastEngine().getVideoChannel().getEndpoint().getID() + " notifies " + newEndpoint.getID() + " to start its HQ stream."); SimulcastLayer hqLayer = newSimulcastLayers[newSimulcastLayers.length - 1]; ; StartSimulcastLayerCommand command = new StartSimulcastLayerCommand(hqLayer); String json = mapper.toJson(command); try { newEndpoint.sendMessageOnDataChannel(json); } catch (IOException e) { logError(newEndpoint.getID() + " failed to send message on data channel.", e); } } }
/** * Attempts to enable or disable media recording for this <tt>Conference</tt>. * * @param recording whether to enable or disable recording. * @return the state of the media recording for this <tt>Conference</tt> after the attempt to * enable (or disable). */ public boolean setRecording(boolean recording) { if (recording != this.recording) { if (recording) { // try enable recording if (logger.isDebugEnabled()) { logger.debug("Starting recording for conference with id=" + getID()); } String path = getRecordingPath(); boolean failedToStart = !checkRecordingDirectory(path); if (!failedToStart) { RecorderEventHandler handler = getRecorderEventHandler(); if (handler == null) failedToStart = true; } if (!failedToStart) { EndpointRecorder endpointRecorder = getEndpointRecorder(); if (endpointRecorder == null) { failedToStart = true; } else { for (Endpoint endpoint : getEndpoints()) endpointRecorder.updateEndpoint(endpoint); } } /* * The Recorders of the Contents need to share a single * Synchronizer, we take it from the first Recorder. */ boolean first = true; Synchronizer synchronizer = null; for (Content content : contents) { MediaType mediaType = content.getMediaType(); if (!MediaType.VIDEO.equals(mediaType) && !MediaType.AUDIO.equals(mediaType)) { continue; } if (!failedToStart) failedToStart = !content.setRecording(true, path); if (failedToStart) break; if (first) { first = false; synchronizer = content.getRecorder().getSynchronizer(); } else { Recorder recorder = content.getRecorder(); if (recorder != null) recorder.setSynchronizer(synchronizer); } content.feedKnownSsrcsToSynchronizer(); } if (failedToStart) { recording = false; logger.warn("Failed to start media recording for conference " + getID()); } } // either we were asked to disable recording, or we failed to // enable it if (!recording) { if (logger.isDebugEnabled()) { logger.debug("Stopping recording for conference with id=" + getID()); } for (Content content : contents) { MediaType mediaType = content.getMediaType(); if (MediaType.AUDIO.equals(mediaType) || MediaType.VIDEO.equals(mediaType)) { content.setRecording(false, null); } } if (recorderEventHandler != null) recorderEventHandler.close(); recorderEventHandler = null; recordingPath = null; recordingDirectory = null; if (endpointRecorder != null) endpointRecorder.close(); endpointRecorder = null; } this.recording = recording; } return this.recording; }
/** * Implements {@link ControllerListener#controllerUpdate(ControllerEvent)}. Handles events from * the <tt>Processor</tt>s that this instance uses to transcode media. * * @param ev the event to handle. */ public void controllerUpdate(ControllerEvent ev) { if (ev == null || ev.getSourceController() == null) { return; } Processor processor = (Processor) ev.getSourceController(); ReceiveStreamDesc desc = findReceiveStream(processor); if (desc == null) { logger.warn("Event from an orphaned processor, ignoring: " + ev); return; } if (ev instanceof ConfigureCompleteEvent) { if (logger.isInfoEnabled()) { logger.info( "Configured processor for ReceiveStream ssrc=" + desc.ssrc + " (" + desc.format + ")" + " " + System.currentTimeMillis()); } boolean audio = desc.format instanceof AudioFormat; if (audio) { ContentDescriptor cd = processor.setContentDescriptor(AUDIO_CONTENT_DESCRIPTOR); if (!AUDIO_CONTENT_DESCRIPTOR.equals(cd)) { logger.error( "Failed to set the Processor content " + "descriptor to " + AUDIO_CONTENT_DESCRIPTOR + ". Actual result: " + cd); removeReceiveStream(desc, false); return; } } for (TrackControl track : processor.getTrackControls()) { Format trackFormat = track.getFormat(); if (audio) { final long ssrc = desc.ssrc; SilenceEffect silenceEffect; if (Constants.OPUS_RTP.equals(desc.format.getEncoding())) { silenceEffect = new SilenceEffect(48000); } else { // We haven't tested that the RTP timestamps survive // the journey through the chain when codecs other than // opus are in use, so for the moment we rely on FMJ's // timestamps for non-opus formats. silenceEffect = new SilenceEffect(); } silenceEffect.setListener( new SilenceEffect.Listener() { boolean first = true; @Override public void onSilenceNotInserted(long timestamp) { if (first) { first = false; // send event only audioRecordingStarted(ssrc, timestamp); } else { // change file and send event resetRecording(ssrc, timestamp); } } }); desc.silenceEffect = silenceEffect; AudioLevelEffect audioLevelEffect = new AudioLevelEffect(); audioLevelEffect.setAudioLevelListener( new SimpleAudioLevelListener() { @Override public void audioLevelChanged(int level) { activeSpeakerDetector.levelChanged(ssrc, level); } }); try { // We add an effect, which will insert "silence" in // place of lost packets. track.setCodecChain(new Codec[] {silenceEffect, audioLevelEffect}); } catch (UnsupportedPlugInException upie) { logger.warn("Failed to insert silence effect: " + upie); // But do go on, a recording without extra silence is // better than nothing ;) } } else { // transcode vp8/rtp to vp8 (i.e. depacketize vp8) if (trackFormat.matches(vp8RtpFormat)) track.setFormat(vp8Format); else { logger.error("Unsupported track format: " + trackFormat + " for ssrc=" + desc.ssrc); // we currently only support vp8 removeReceiveStream(desc, false); return; } } } processor.realize(); } else if (ev instanceof RealizeCompleteEvent) { desc.dataSource = processor.getDataOutput(); long ssrc = desc.ssrc; boolean audio = desc.format instanceof AudioFormat; String suffix = audio ? AUDIO_FILENAME_SUFFIX : VIDEO_FILENAME_SUFFIX; // XXX '\' on windows? String filename = getNextFilename(path + "/" + ssrc, suffix); desc.filename = filename; DataSink dataSink; if (audio) { try { dataSink = Manager.createDataSink(desc.dataSource, new MediaLocator("file:" + filename)); } catch (NoDataSinkException ndse) { logger.error("Could not create DataSink: " + ndse); removeReceiveStream(desc, false); return; } } else { dataSink = new WebmDataSink(filename, desc.dataSource); } if (logger.isInfoEnabled()) logger.info( "Created DataSink (" + dataSink + ") for SSRC=" + ssrc + ". Output filename: " + filename); try { dataSink.open(); } catch (IOException e) { logger.error("Failed to open DataSink (" + dataSink + ") for" + " SSRC=" + ssrc + ": " + e); removeReceiveStream(desc, false); return; } if (!audio) { final WebmDataSink webmDataSink = (WebmDataSink) dataSink; webmDataSink.setSsrc(ssrc); webmDataSink.setEventHandler(eventHandler); webmDataSink.setKeyFrameControl( new KeyFrameControlAdapter() { @Override public boolean requestKeyFrame(boolean urgent) { return requestFIR(webmDataSink); } }); } try { dataSink.start(); } catch (IOException e) { logger.error( "Failed to start DataSink (" + dataSink + ") for" + " SSRC=" + ssrc + ". " + e); removeReceiveStream(desc, false); return; } if (logger.isInfoEnabled()) logger.info("Started DataSink for SSRC=" + ssrc); desc.dataSink = dataSink; processor.start(); } else if (logger.isDebugEnabled()) { logger.debug( "Unhandled ControllerEvent from the Processor for ssrc=" + desc.ssrc + ": " + ev); } }
/** * Recalculates the list of forwarded endpoints based on the current values of the various * parameters of this instance ({@link #lastN}, {@link #conferenceSpeechActivityEndpoints}, {@link * #pinnedEndpoints}). * * @param newConferenceEndpoints A list of endpoints which entered the conference since the last * call to this method. They need not be asked for keyframes, because they were never filtered * by this {@link #LastNController(VideoChannel)}. * @return the list of IDs of endpoints which were added to {@link #forwardedEndpoints} (i.e. of * endpoints * "entering last-n") as a result of this call. Returns {@code null} if no * endpoints were added. */ private synchronized List<String> update(List<String> newConferenceEndpoints) { List<String> newForwardedEndpoints = new LinkedList<>(); String ourEndpointId = getEndpointId(); if (conferenceSpeechActivityEndpoints == INITIAL_EMPTY_LIST) { conferenceSpeechActivityEndpoints = getIDs(channel.getConferenceSpeechActivity().getEndpoints()); newConferenceEndpoints = conferenceSpeechActivityEndpoints; } if (lastN < 0 && currentLastN < 0) { // Last-N is disabled, we forward everything. newForwardedEndpoints.addAll(conferenceSpeechActivityEndpoints); if (ourEndpointId != null) { newForwardedEndpoints.remove(ourEndpointId); } } else { // Here we have lastN >= 0 || currentLastN >= 0 which implies // currentLastN >= 0. // Pinned endpoints are always forwarded. newForwardedEndpoints.addAll(getPinnedEndpoints()); // As long as they are still endpoints in the conference. newForwardedEndpoints.retainAll(conferenceSpeechActivityEndpoints); if (newForwardedEndpoints.size() > currentLastN) { // What do we want in this case? It looks like a contradictory // request from the client, but maybe it makes for a good API // on the client to allow the pinned to override last-n. // Unfortunately, this will not play well with Adaptive-Last-N // or changes to Last-N for other reasons. } else if (newForwardedEndpoints.size() < currentLastN) { for (String endpointId : conferenceSpeechActivityEndpoints) { if (newForwardedEndpoints.size() < currentLastN) { if (!endpointId.equals(ourEndpointId) && !newForwardedEndpoints.contains(endpointId)) { newForwardedEndpoints.add(endpointId); } } else { break; } } } } List<String> enteringEndpoints; if (forwardedEndpoints.equals(newForwardedEndpoints)) { // We want forwardedEndpoints != INITIAL_EMPTY_LIST forwardedEndpoints = newForwardedEndpoints; enteringEndpoints = null; } else { enteringEndpoints = new ArrayList<>(newForwardedEndpoints); enteringEndpoints.removeAll(forwardedEndpoints); if (logger.isDebugEnabled()) { logger.debug( "Forwarded endpoints changed: " + forwardedEndpoints.toString() + " -> " + newForwardedEndpoints.toString() + ". Entering: " + enteringEndpoints.toString()); } forwardedEndpoints = Collections.unmodifiableList(newForwardedEndpoints); if (lastN >= 0 || currentLastN >= 0) { // TODO: we may want to do this asynchronously. channel.sendLastNEndpointsChangeEventOnDataChannel(forwardedEndpoints, enteringEndpoints); } } // If lastN is disabled, the endpoints entering forwardedEndpoints were // never filtered, so they don't need to be asked for keyframes. if (lastN < 0 && currentLastN < 0) { enteringEndpoints = null; } if (enteringEndpoints != null && newConferenceEndpoints != null) { // Endpoints just entering the conference need not be asked for // keyframes. enteringEndpoints.removeAll(newConferenceEndpoints); } return enteringEndpoints; }
private void runOnDtlsTransport(StreamConnector connector) throws IOException { DtlsControlImpl dtlsControl = (DtlsControlImpl) getTransportManager().getDtlsControl(this); DtlsTransformEngine engine = dtlsControl.getTransformEngine(); final DtlsPacketTransformer transformer = (DtlsPacketTransformer) engine.getRTPTransformer(); byte[] receiveBuffer = new byte[SCTP_BUFFER_SIZE]; if (LOG_SCTP_PACKETS) { System.setProperty( ConfigurationService.PNAME_SC_HOME_DIR_LOCATION, System.getProperty("java.io.tmpdir")); System.setProperty( ConfigurationService.PNAME_SC_HOME_DIR_NAME, SctpConnection.class.getName()); } synchronized (this) { // FIXME local SCTP port is hardcoded in bridge offer SDP (Jitsi // Meet) sctpSocket = Sctp.createSocket(5000); assocIsUp = false; acceptedIncomingConnection = false; } // Implement output network link for SCTP stack on DTLS transport sctpSocket.setLink( new NetworkLink() { @Override public void onConnOut(SctpSocket s, byte[] packet) throws IOException { if (LOG_SCTP_PACKETS) { LibJitsi.getPacketLoggingService() .logPacket( PacketLoggingService.ProtocolName.ICE4J, new byte[] {0, 0, 0, (byte) debugId}, 5000, new byte[] {0, 0, 0, (byte) (debugId + 1)}, remoteSctpPort, PacketLoggingService.TransportName.UDP, true, packet); } // Send through DTLS transport transformer.sendApplicationData(packet, 0, packet.length); } }); if (logger.isDebugEnabled()) { logger.debug("Connecting SCTP to port: " + remoteSctpPort + " to " + getEndpoint().getID()); } sctpSocket.setNotificationListener(this); sctpSocket.listen(); // FIXME manage threads threadPool.execute( new Runnable() { @Override public void run() { SctpSocket sctpSocket = null; try { // sctpSocket is set to null on close sctpSocket = SctpConnection.this.sctpSocket; while (sctpSocket != null) { if (sctpSocket.accept()) { acceptedIncomingConnection = true; break; } Thread.sleep(100); sctpSocket = SctpConnection.this.sctpSocket; } if (isReady()) { notifySctpConnectionReady(); } } catch (Exception e) { logger.error("Error accepting SCTP connection", e); } if (sctpSocket == null && logger.isInfoEnabled()) { logger.info( "SctpConnection " + getID() + " closed" + " before SctpSocket accept()-ed."); } } }); // Notify that from now on SCTP connection is considered functional sctpSocket.setDataCallback(this); // Setup iceSocket DatagramSocket datagramSocket = connector.getDataSocket(); if (datagramSocket != null) { this.iceSocket = new IceUdpSocketWrapper(datagramSocket); } else { this.iceSocket = new IceTcpSocketWrapper(connector.getDataTCPSocket()); } DatagramPacket rcvPacket = new DatagramPacket(receiveBuffer, 0, receiveBuffer.length); // Receive loop, breaks when SCTP socket is closed try { do { iceSocket.receive(rcvPacket); RawPacket raw = new RawPacket(rcvPacket.getData(), rcvPacket.getOffset(), rcvPacket.getLength()); raw = transformer.reverseTransform(raw); // Check for app data if (raw == null) continue; if (LOG_SCTP_PACKETS) { LibJitsi.getPacketLoggingService() .logPacket( PacketLoggingService.ProtocolName.ICE4J, new byte[] {0, 0, 0, (byte) (debugId + 1)}, remoteSctpPort, new byte[] {0, 0, 0, (byte) debugId}, 5000, PacketLoggingService.TransportName.UDP, false, raw.getBuffer(), raw.getOffset(), raw.getLength()); } // Pass network packet to SCTP stack sctpSocket.onConnIn(raw.getBuffer(), raw.getOffset(), raw.getLength()); } while (true); } finally { // Eventually, close the socket although it should happen from // expire(). synchronized (this) { assocIsUp = false; acceptedIncomingConnection = false; if (sctpSocket != null) { sctpSocket.close(); sctpSocket = null; } } } }
/** * Enables or disables this <tt>RTPConnectorInputStream</tt>. While the stream is disabled, it * does not accept any packets. * * @param enabled <tt>true</tt> to enable, <tt>false</tt> to disable. */ public void setEnabled(boolean enabled) { if (logger.isDebugEnabled()) logger.debug("setEnabled: " + enabled); this.enabled = enabled; }