/** * {@inheritDoc} * * @param format unused, since this implementation records multiple streams using potentially * different formats. * @param dirname the path to the directory into which this <tt>Recorder</tt> will store the * recorded media files. */ @Override public void start(String format, String dirname) throws IOException, MediaException { if (logger.isInfoEnabled()) logger.info("Starting, format=" + format + " " + hashCode()); path = dirname; MediaService mediaService = LibJitsi.getMediaService(); /* * Note that we use only one RTPConnector for both the RTPTranslator * and the RTPManager instances. The this.translator will write to its * output streams, and this.rtpManager will read from its input streams. */ rtpConnector = new RTPConnectorImpl(redPayloadType, ulpfecPayloadType); rtpManager = RTPManager.newInstance(); /* * Add the formats that we know about. */ rtpManager.addFormat(vp8RtpFormat, vp8PayloadType); rtpManager.addFormat(opusFormat, opusPayloadType); rtpManager.addReceiveStreamListener(this); /* * Note: When this.rtpManager sends RTCP sender/receiver reports, they * will end up being written to its own input stream. This is not * expected to cause problems, but might be something to keep an eye on. */ rtpManager.initialize(rtpConnector); /* * Register a fake call participant. * TODO: can we use a more generic MediaStream here? */ streamRTPManager = new StreamRTPManager( mediaService.createMediaStream( new MediaDeviceImpl(new CaptureDeviceInfo(), MediaType.VIDEO)), translator); streamRTPManager.initialize(rtpConnector); rtcpFeedbackSender = translator.getRtcpFeedbackMessageSender(); translator.addFormat(streamRTPManager, opusFormat, opusPayloadType); // ((RTPTranslatorImpl)videoRTPTranslator).addFormat(streamRTPManager, redFormat, // redPayloadType); // ((RTPTranslatorImpl)videoRTPTranslator).addFormat(streamRTPManager, ulpfecFormat, // ulpfecPayloadType); // ((RTPTranslatorImpl)videoRTPTranslator).addFormat(streamRTPManager, // mediaFormatImpl.getFormat(), vp8PayloadType); started = true; }
/** * Checks whether RTP packets from {@code sourceChannel} should be forwarded to {@link #channel}. * * @param sourceChannel the channel. * @return {@code true} iff RTP packets from {@code sourceChannel} should be forwarded to {@link * #channel}. */ public boolean isForwarded(Channel sourceChannel) { if (lastN < 0 && currentLastN < 0) { // If Last-N is disabled, we forward everything. return true; } if (sourceChannel == null) { logger.warn("Invalid sourceChannel: null."); return false; } Endpoint channelEndpoint = sourceChannel.getEndpoint(); if (channelEndpoint == null) { logger.warn("sourceChannel has no endpoint."); return false; } if (forwardedEndpoints == INITIAL_EMPTY_LIST) { // LastN is enabled, but we haven't yet initialized the list of // endpoints in the conference. initializeConferenceEndpoints(); } // This may look like a place to optimize, because we query an unordered // list (in O(n)) and it executes on each video packet if lastN is // enabled. However, the size of forwardedEndpoints is restricted to // lastN and so small enough that it is not worth optimizing. return forwardedEndpoints.contains(channelEndpoint.getID()); }
private void removeReceiveStream(ReceiveStreamDesc receiveStream, boolean emptyJB) { if (receiveStream.format instanceof VideoFormat) { rtpConnector.packetBuffer.disable(receiveStream.ssrc); emptyPacketBuffer(receiveStream.ssrc); } if (receiveStream.dataSink != null) { try { receiveStream.dataSink.stop(); } catch (IOException e) { logger.error("Failed to stop DataSink " + e); } receiveStream.dataSink.close(); } if (receiveStream.processor != null) { receiveStream.processor.stop(); receiveStream.processor.close(); } DataSource dataSource = receiveStream.receiveStream.getDataSource(); if (dataSource != null) { try { dataSource.stop(); } catch (IOException ioe) { logger.warn("Failed to stop DataSource"); } dataSource.disconnect(); } synchronized (receiveStreams) { receiveStreams.remove(receiveStream); } }
/** Implements notification in order to track socket state. */ @Override public synchronized void onSctpNotification(SctpSocket socket, SctpNotification notification) { if (logger.isDebugEnabled()) { logger.debug("socket=" + socket + "; notification=" + notification); } switch (notification.sn_type) { case SctpNotification.SCTP_ASSOC_CHANGE: SctpNotification.AssociationChange assocChange = (SctpNotification.AssociationChange) notification; switch (assocChange.state) { case SctpNotification.AssociationChange.SCTP_COMM_UP: if (!assocIsUp) { boolean wasReady = isReady(); assocIsUp = true; if (isReady() && !wasReady) notifySctpConnectionReady(); } break; case SctpNotification.AssociationChange.SCTP_COMM_LOST: case SctpNotification.AssociationChange.SCTP_SHUTDOWN_COMP: case SctpNotification.AssociationChange.SCTP_CANT_STR_ASSOC: try { closeStream(); } catch (IOException e) { logger.error("Error closing SCTP socket", e); } break; } break; } }
/** * Makes an <tt>RTCPREMBPacket</tt> that provides receiver feedback to the endpoint from which we * receive. * * @return an <tt>RTCPREMBPacket</tt> that provides receiver feedback to the endpoint from which * we receive. */ private RTCPREMBPacket makeRTCPREMBPacket() { // TODO we should only make REMBs if REMB support has been advertised. // Destination RemoteBitrateEstimator remoteBitrateEstimator = ((VideoMediaStream) getStream()).getRemoteBitrateEstimator(); Collection<Integer> ssrcs = remoteBitrateEstimator.getSsrcs(); // TODO(gp) intersect with SSRCs from signaled simulcast layers // NOTE(gp) The Google Congestion Control algorithm (sender side) // doesn't seem to care about the SSRCs in the dest field. long[] dest = new long[ssrcs.size()]; int i = 0; for (Integer ssrc : ssrcs) dest[i++] = ssrc & 0xFFFFFFFFL; // Exp & mantissa long bitrate = remoteBitrateEstimator.getLatestEstimate(); if (bitrate == -1) return null; if (logger.isDebugEnabled()) logger.debug("Estimated bitrate: " + bitrate); // Create and return the packet. // We use the stream's local source ID (SSRC) as the SSRC of packet // sender. long streamSSRC = getLocalSSRC(); return new RTCPREMBPacket(streamSSRC, /* mediaSSRC */ 0L, bitrate, dest); }
/** * Gets the <tt>MediaFormat</tt>s among the specified <tt>mediaFormats</tt> which have the * specified <tt>encoding</tt> and, optionally, <tt>clockRate</tt>. * * @param mediaFormats the <tt>MediaFormat</tt>s from which to filter out only the ones which have * the specified <tt>encoding</tt> and, optionally, <tt>clockRate</tt> * @param encoding the well-known encoding (name) of the <tt>MediaFormat</tt>s to be retrieved * @param clockRate the clock rate of the <tt>MediaFormat</tt>s to be retrieved; {@link * #CLOCK_RATE_NOT_SPECIFIED} if any clock rate is acceptable * @return a <tt>List</tt> of the <tt>MediaFormat</tt>s among <tt>mediaFormats</tt> which have the * specified <tt>encoding</tt> and, optionally, <tt>clockRate</tt> */ private List<MediaFormat> getMatchingMediaFormats( MediaFormat[] mediaFormats, String encoding, double clockRate) { /* * XXX Use String#equalsIgnoreCase(String) because some clients transmit * some of the codecs starting with capital letters. */ /* * As per RFC 3551.4.5.2, because of a mistake in RFC 1890 and for * backward compatibility, G.722 should always be announced as 8000 even * though it is wideband. So, if someone is looking for G722/16000, * then: Forgive them, for they know not what they do! */ if ("G722".equalsIgnoreCase(encoding) && (16000 == clockRate)) { clockRate = 8000; if (logger.isInfoEnabled()) logger.info("Suppressing erroneous 16000 announcement for G.722"); } List<MediaFormat> supportedMediaFormats = new ArrayList<MediaFormat>(); for (MediaFormat mediaFormat : mediaFormats) { if (mediaFormat.getEncoding().equalsIgnoreCase(encoding) && ((CLOCK_RATE_NOT_SPECIFIED == clockRate) || (mediaFormat.getClockRate() == clockRate))) { supportedMediaFormats.add(mediaFormat); } } return supportedMediaFormats; }
/** * Retransmits a packet to {@link #channel}. If the destination supports the RTX format, the * packet will be encapsulated in RTX, otherwise, the packet will be retransmitted as-is. * * @param pkt the packet to retransmit. * @param after the {@code TransformEngine} in the chain of {@code TransformEngine}s of the * associated {@code MediaStream} after which the injection of {@code pkt} is to begin * @return {@code true} if the packet was successfully retransmitted, {@code false} otherwise. */ public boolean retransmit(RawPacket pkt, TransformEngine after) { boolean destinationSupportsRtx = channel.getRtxPayloadType() != -1; boolean retransmitPlain; if (destinationSupportsRtx) { long rtxSsrc = getPairedSsrc(pkt.getSSRC()); if (rtxSsrc == -1) { logger.warn("Cannot find SSRC for RTX, retransmitting plain."); retransmitPlain = true; } else { retransmitPlain = !encapsulateInRtxAndTransmit(pkt, rtxSsrc); } } else { retransmitPlain = true; } if (retransmitPlain) { MediaStream mediaStream = channel.getStream(); if (mediaStream != null) { try { mediaStream.injectPacket(pkt, /* data */ true, after); } catch (TransmissionFailedException tfe) { logger.warn("Failed to retransmit a packet."); return false; } } } return true; }
/** * Jirecon packets processing logic. * * <p>{@inheritDoc} */ @Override public void processPacket(Packet packet) { JireconIq recording = (JireconIq) packet; if (JireconIq.Action.INFO != recording.getAction() && IQ.Type.RESULT == recording.getType() || StringUtils.isNullOrEmpty(recording.getRid())) { logger.warn("Discarded: " + recording.toXML()); return; } if (!recording.getRid().equals(recordingId)) { logger.warn("Received IQ for unknown session: " + recording.toXML()); return; } if (status != recording.getStatus()) { status = recording.getStatus(); logger.info("Recording " + recordingId + " status: " + status); if (status == JireconIq.Status.STOPPED) { logger.info("Recording STOPPED: " + recordingId); recordingId = null; } } else { logger.info("Ignored status change: " + recording.toXML()); } }
/** * Handles a specific <tt>IOException</tt> which was thrown during the execution of {@link * #runInConnectThread(DTLSProtocol, TlsPeer, DatagramTransport)} while trying to establish a DTLS * connection * * @param ioe the <tt>IOException</tt> to handle * @param msg the human-readable message to log about the specified <tt>ioe</tt> * @param i the number of tries remaining after the current one * @return <tt>true</tt> if the specified <tt>ioe</tt> was successfully handled; <tt>false</tt>, * otherwise */ private boolean handleRunInConnectThreadException(IOException ioe, String msg, int i) { // SrtpControl.start(MediaType) starts its associated TransformEngine. // We will use that mediaType to signal the normal stop then as well // i.e. we will ignore exception after the procedure to stop this // PacketTransformer has begun. if (mediaType == null) return false; if (ioe instanceof TlsFatalAlert) { TlsFatalAlert tfa = (TlsFatalAlert) ioe; short alertDescription = tfa.getAlertDescription(); if (alertDescription == AlertDescription.unexpected_message) { msg += " Received fatal unexpected message."; if (i == 0 || !Thread.currentThread().equals(connectThread) || connector == null || mediaType == null) { msg += " Giving up after " + (CONNECT_TRIES - i) + " retries."; } else { msg += " Will retry."; logger.error(msg, ioe); return true; } } else { msg += " Received fatal alert " + alertDescription + "."; } } logger.error(msg, ioe); return false; }
/** * Sets the value of {@code lastN}, that is, the maximum number of endpoints whose video streams * will be forwarded to the endpoint. A value of {@code -1} means that there is no limit. * * @param lastN the value to set. */ public void setLastN(int lastN) { if (logger.isDebugEnabled()) { logger.debug("Setting lastN=" + lastN); } List<String> endpointsToAskForKeyframe = null; synchronized (this) { // Since we have the lock anyway, call update() inside, so it // doesn't have to obtain it again. But keep the call to // askForKeyframes() outside. if (this.lastN != lastN) { // If we're just now enabling lastN, we don't need to ask for // keyframes as all streams were being forwarded already. boolean update = this.lastN != -1; this.lastN = lastN; if (lastN >= 0 && (currentLastN < 0 || currentLastN > lastN)) { currentLastN = lastN; } if (update) { endpointsToAskForKeyframe = update(); } } } askForKeyframes(endpointsToAskForKeyframe); }
/** * Initializes the local list of endpoints ({@link #speechActivityEndpointsChanged(List)}) with * the current endpoints from the conference. */ public synchronized void initializeConferenceEndpoints() { speechActivityEndpointsChanged(channel.getConferenceSpeechActivity().getEndpoints()); if (logger.isDebugEnabled()) { logger.debug( "Initialized the list of endpoints: " + conferenceSpeechActivityEndpoints.toString()); } }
/** * Stop desktop capture stream. * * @see AbstractPullBufferStream#stop() */ @Override public void stop() throws IOException { try { if (logger.isInfoEnabled()) logger.info("Stop stream"); } finally { super.stop(); byteBufferPool.drain(); } }
/** * Write the overall stats of the <tt>MediaStream</tt> this <tt>MediaStreamStats</tt> keep track * in its file. */ public void writeOverallStats() { try { logger.info("Writing overall stats to file"); PrintWriter writer = new PrintWriter(overallStatsFile, "UTF-8"); writer.print(getOverallStatsJSON() + '\n'); writer.close(); } catch (FileNotFoundException e) { logger.fatal("Overall stats file opening error", e); } catch (UnsupportedEncodingException e) { logger.fatal("Overall stats file opening error", e); } }
/** * Returns {@code true} if a specific RED packet contains multiple blocks; * {@code false}, otherwise. * * @param buffer the byte buffer that contains the RED payload. * @param offset the offset in the buffer where the RED payload begins. * @param length the length of the RED payload. * @return {@code true if {@pkt} contains multiple RED blocks; otherwise, * {@code false} */ public static boolean isMultiBlock(byte[] buffer, int offset, int length) { if (buffer == null || buffer.length == 0) { logger.warn("The buffer appears to be empty."); return false; } if (offset < 0 || buffer.length <= offset) { logger.warn("Prevented array out of bounds exception."); return false; } return (buffer[offset] & 0x80) != 0; }
protected void doInitialize() throws Exception { DSCaptureDevice devices[] = DSManager.getInstance().getCaptureDevices(); boolean captureDeviceInfoIsAdded = false; for (int i = 0, count = (devices == null) ? 0 : devices.length; i < count; i++) { long pixelFormat = devices[i].getFormat().getPixelFormat(); int ffmpegPixFmt = (int) DataSource.getFFmpegPixFmt(pixelFormat); Format format = null; if (ffmpegPixFmt != FFmpeg.PIX_FMT_NONE) { format = new AVFrameFormat(ffmpegPixFmt, (int) pixelFormat); } else { logger.warn( "No support for this webcam: " + devices[i].getName() + "(format " + pixelFormat + " not supported)"); continue; } if (logger.isInfoEnabled()) { for (DSFormat f : devices[i].getSupportedFormats()) { if (f.getWidth() != 0 && f.getHeight() != 0) logger.info( "Webcam available resolution for " + devices[i].getName() + ":" + f.getWidth() + "x" + f.getHeight()); } } CaptureDeviceInfo device = new CaptureDeviceInfo( devices[i].getName(), new MediaLocator(LOCATOR_PROTOCOL + ':' + devices[i].getName()), new Format[] {format}); if (logger.isInfoEnabled()) logger.info("Found[" + i + "]: " + device.getName()); CaptureDeviceManager.addDevice(device); captureDeviceInfoIsAdded = true; } if (captureDeviceInfoIsAdded && !MediaServiceImpl.isJmfRegistryDisableLoad()) CaptureDeviceManager.commit(); DSManager.dispose(); }
/** * Restarts the recording for a specific SSRC. * * @param ssrc the SSRC for which to restart recording. RTP packet of the new recording). */ private void resetRecording(long ssrc, long timestamp) { ReceiveStreamDesc receiveStream = findReceiveStream(ssrc); // we only restart audio recordings if (receiveStream != null && receiveStream.format instanceof AudioFormat) { String newFilename = getNextFilename(path + "/" + ssrc, AUDIO_FILENAME_SUFFIX); // flush the buffer contained in the MP3 encoder String s = "trying to flush ssrc=" + ssrc; Processor p = receiveStream.processor; if (p != null) { s += " p!=null"; for (TrackControl tc : p.getTrackControls()) { Object o = tc.getControl(FlushableControl.class.getName()); if (o != null) ((FlushableControl) o).flush(); } } if (logger.isInfoEnabled()) { logger.info("Restarting recording for SSRC=" + ssrc + ". New filename: " + newFilename); } receiveStream.dataSink.close(); receiveStream.dataSink = null; // flush the FMJ jitter buffer // DataSource ds = receiveStream.receiveStream.getDataSource(); // if (ds instanceof net.sf.fmj.media.protocol.rtp.DataSource) // ((net.sf.fmj.media.protocol.rtp.DataSource)ds).flush(); receiveStream.filename = newFilename; try { receiveStream.dataSink = Manager.createDataSink( receiveStream.dataSource, new MediaLocator("file:" + newFilename)); } catch (NoDataSinkException ndse) { logger.warn("Could not reset recording for SSRC=" + ssrc + ": " + ndse); removeReceiveStream(receiveStream, false); } try { receiveStream.dataSink.open(); receiveStream.dataSink.start(); } catch (IOException ioe) { logger.warn("Could not reset recording for SSRC=" + ssrc + ": " + ioe); removeReceiveStream(receiveStream, false); } audioRecordingStarted(ssrc, timestamp); } }
/** {@inheritDoc} */ @Override public boolean setRecording(String from, String token, State doRecord, String path) { if (!StringUtils.isNullOrEmpty(this.token) && !this.token.equals(token)) { return false; } if (!isRecording() && doRecord.equals(State.ON)) { // Send start recording IQ JireconIq recording = new JireconIq(); recording.setTo(recorderComponentJid); recording.setType(IQ.Type.SET); recording.setFrom(from); recording.setMucJid(mucRoomJid); recording.setAction(JireconIq.Action.START); recording.setOutput(path); Packet reply = xmpp.getXmppConnection().sendPacketAndGetReply(recording); if (reply instanceof JireconIq) { JireconIq recResponse = (JireconIq) reply; if (JireconIq.Status.INITIATING.equals(recResponse.getStatus())) { recordingId = recResponse.getRid(); logger.info("Received recording ID: " + recordingId); status = JireconIq.Status.INITIATING; } else { logger.error("Unexpected status received: " + recResponse.toXML()); } } else { logger.error("Unexpected response: " + IQUtils.responseToXML(reply)); } } else if (isRecording() && doRecord.equals(State.OFF)) { // Send stop recording IQ JireconIq recording = new JireconIq(); recording.setTo(recorderComponentJid); recording.setType(IQ.Type.SET); recording.setFrom(from); recording.setRid(recordingId); recording.setMucJid(mucRoomJid); recording.setAction(JireconIq.Action.STOP); xmpp.getXmppConnection().sendPacket(recording); status = JireconIq.Status.STOPPING; } return true; }
/** * Stops the <tt>msofficecomm</tt> bundle in a specific {@link BundleContext}. * * @param bundleContext the <tt>BundleContext</tt> in which the <tt>msofficecomm</tt> bundle is to * be stopped * @throws Exception if anything goes wrong while stopping the <tt>msofficecomm</tt> bundle in the * specified <tt>BundleContext</tt> */ public void stop(BundleContext bundleContext) throws Exception { // The msofficecomm bundle is available on Windows only. if (!OSUtils.IS_WINDOWS) return; try { int hresult = OutOfProcessServer.stop(); if (hresult < 0) throw new RuntimeException("HRESULT " + hresult); } finally { Messenger.stop(bundleContext); } if (logger.isInfoEnabled()) logger.info("MsOfficeComm plugin ... [UNREGISTERED]"); }
/** * Notifies this instance that the ordered list of endpoints (specified as a list of endpoint IDs) * in the conference has changed. * * @param endpointIds the new ordered list of endpoints (specified as a list of endpoint IDs) in * the conference. * @return the list of IDs of endpoints which were added to the list of forwarded endpoints as a * result of the call. */ private synchronized List<String> speechActivityEndpointIdsChanged(List<String> endpointIds) { if (conferenceSpeechActivityEndpoints.equals(endpointIds)) { if (logger.isDebugEnabled()) { logger.debug("Conference endpoints have not changed."); } return null; } else { List<String> newEndpoints = new LinkedList<>(endpointIds); newEndpoints.removeAll(conferenceSpeechActivityEndpoints); conferenceSpeechActivityEndpoints = endpointIds; return update(newEndpoints); } }
/** {@inheritDoc} */ @Override public RawPacket transform(RawPacket pkt) { if (pkt == null) { return pkt; } RTCPCompoundPacket inPacket; try { inPacket = (RTCPCompoundPacket) parser.parse(pkt.getBuffer(), pkt.getOffset(), pkt.getLength()); } catch (BadFormatException e) { logger.warn("Failed to terminate an RTCP packet. " + "Dropping packet."); return null; } // Update our RTCP stats map (timestamps). This operation is // read-only. remoteClockEstimator.apply(inPacket); cnameRegistry.update(inPacket); // Remove SRs and RRs from the RTCP packet. pkt = feedbackGateway.gateway(inPacket); return pkt; }
/** * Returns the cached recent messages history. * * @return * @throws IOException */ private History getHistory() throws IOException { synchronized (historyID) { HistoryService historyService = MessageHistoryActivator.getMessageHistoryService().getHistoryService(); if (history == null) { history = historyService.createHistory(historyID, recordStructure); // lets check the version if not our version, re-create // history (delete it) HistoryReader reader = history.getReader(); boolean delete = false; QueryResultSet<HistoryRecord> res = reader.findLast(1); if (res != null && res.hasNext()) { HistoryRecord hr = res.next(); if (hr.getPropertyValues().length >= 4) { if (!hr.getPropertyValues()[3].equals(RECENT_MSGS_VER)) delete = true; } else delete = true; } if (delete) { // delete it try { historyService.purgeLocallyStoredHistory(historyID); history = historyService.createHistory(historyID, recordStructure); } catch (IOException ex) { logger.error("Cannot delete recent_messages history", ex); } } } return history; } }
/** * Capture a part of the desktop screen using <tt>java.awt.Robot</tt>. * * @param x x position to start capture * @param y y position to start capture * @param width capture width * @param height capture height * @return <tt>BufferedImage</tt> of a part of the desktop screen or null if Robot problem */ public BufferedImage captureScreen(int x, int y, int width, int height) { BufferedImage img = null; Rectangle rect = null; if (robot == null) { /* Robot has not been created so abort */ return null; } if (logger.isInfoEnabled()) logger.info("Begin capture: " + System.nanoTime()); rect = new Rectangle(x, y, width, height); img = robot.createScreenCapture(rect); if (logger.isInfoEnabled()) logger.info("End capture: " + System.nanoTime()); return img; }
/** * Tries to find an SSRC paired with {@code ssrc} in an FID group in one of the channels from * {@link #channel}'s {@code Content}. Returns -1 on failure. * * @param pkt the {@code RawPacket} that holds the RTP packet for which to find a paired SSRC. * @return An SSRC paired with {@code ssrc} in an FID group, or -1. */ private long getRtxSsrc(RawPacket pkt) { StreamRTPManager receiveRTPManager = channel.getStream().getRTPTranslator().findStreamRTPManagerByReceiveSSRC(pkt.getSSRC()); MediaStreamTrackReceiver receiver = null; if (receiveRTPManager != null) { MediaStream receiveStream = receiveRTPManager.getMediaStream(); if (receiveStream != null) { receiver = receiveStream.getMediaStreamTrackReceiver(); } } if (receiver == null) { return -1; } RTPEncoding encoding = receiver.resolveRTPEncoding(pkt); if (encoding == null) { logger.warn( "encoding_not_found" + ",stream_hash=" + channel.getStream().hashCode() + " ssrc=" + pkt.getSSRCAsLong()); return -1; } return encoding.getRTXSSRC(); }
/** * Closes given {@link #transportManagers} of this <tt>Conference</tt> and removes corresponding * channel bundle. */ void closeTransportManager(TransportManager transportManager) { synchronized (transportManagers) { for (Iterator<IceUdpTransportManager> i = transportManagers.values().iterator(); i.hasNext(); ) { if (i.next() == transportManager) { i.remove(); // Presumably, we have a single association for // transportManager. break; } } // Close manager try { transportManager.close(); } catch (Throwable t) { logger.warn( "Failed to close an IceUdpTransportManager of" + " conference " + getID() + "!", t); // The whole point of explicitly closing the // transportManagers of this Conference is to prevent memory // leaks. Hence, it does not make sense to possibly leave // TransportManagers open because a TransportManager has // failed to close. if (t instanceof InterruptedException) Thread.currentThread().interrupt(); else if (t instanceof ThreadDeath) throw (ThreadDeath) t; } } }
/** Stops this <tt>PacketTransformer</tt>. */ private synchronized void stop() { if (connectThread != null) connectThread = null; try { // The dtlsTransport and _srtpTransformer SHOULD be closed, of // course. The datagramTransport MUST be closed. if (dtlsTransport != null) { try { dtlsTransport.close(); } catch (IOException ioe) { logger.error("Failed to (properly) close " + dtlsTransport.getClass(), ioe); } dtlsTransport = null; } if (_srtpTransformer != null) { _srtpTransformer.close(); _srtpTransformer = null; } } finally { try { closeDatagramTransport(); } finally { notifyAll(); } } }
/** * Starts the configuration service * * @param bundleContext the <tt>BundleContext</tt> as provided by the OSGi framework. * @throws Exception if anything goes wrong */ public void start(BundleContext bundleContext) throws Exception { FileAccessService fas = ServiceUtils.getService(bundleContext, FileAccessService.class); if (fas != null) { File usePropFileConfig; try { usePropFileConfig = fas.getPrivatePersistentFile(".usepropfileconfig", FileCategory.PROFILE); } catch (Exception ise) { // There is somewhat of a chicken-and-egg dependency between // FileConfigurationServiceImpl and ConfigurationServiceImpl: // FileConfigurationServiceImpl throws IllegalStateException if // certain System properties are not set, // ConfigurationServiceImpl will make sure that these properties // are set but it will do that later. // A SecurityException is thrown when the destination // is not writable or we do not have access to that folder usePropFileConfig = null; } if (usePropFileConfig != null && usePropFileConfig.exists()) { logger.info("Using properties file configuration store."); this.cs = LibJitsi.getConfigurationService(); } } if (this.cs == null) { this.cs = new JdbcConfigService(fas); } bundleContext.registerService(ConfigurationService.class.getName(), this.cs, null); fixPermissions(this.cs); }
/** * Expires this <tt>Conference</tt>, its <tt>Content</tt>s and their respective <tt>Channel</tt>s. * Releases the resources acquired by this instance throughout its life time and prepares it to be * garbage collected. */ public void expire() { synchronized (this) { if (expired) return; else expired = true; } EventAdmin eventAdmin = videobridge.getEventAdmin(); if (eventAdmin != null) eventAdmin.sendEvent(EventFactory.conferenceExpired(this)); setRecording(false); if (recorderEventHandler != null) { recorderEventHandler.close(); recorderEventHandler = null; } Videobridge videobridge = getVideobridge(); try { videobridge.expireConference(this); } finally { // Expire the Contents of this Conference. for (Content content : getContents()) { try { content.expire(); } catch (Throwable t) { logger.warn( "Failed to expire content " + content.getName() + " of conference " + getID() + "!", t); if (t instanceof InterruptedException) Thread.currentThread().interrupt(); else if (t instanceof ThreadDeath) throw (ThreadDeath) t; } } // Close the transportManagers of this Conference. Normally, there // will be no TransportManager left to close at this point because // all Channels have expired and the last Channel to be removed from // a TransportManager closes the TransportManager. However, a // Channel may have expired before it has learned of its // TransportManager and then the TransportManager will not close. closeTransportManagers(); if (logger.isInfoEnabled()) { logger.info( "Expired conference " + getID() + ". " + videobridge.getConferenceCountString()); } } }
/** * Provoke the stop of the method run(). The method run() won't be stopped right away : but the * loop will be broken at the next iteration. * * <p>If the method run() is not running, calling this method won't do anything */ public synchronized void stop() { if (!threadStop) { logger.info("Stopping the main loop"); System.out.println("Stopping the HammerStats stop.\n"); threadStop = true; } }
/** * Sends acknowledgment for open channel request on given SCTP stream ID. * * @param sid SCTP stream identifier to be used for sending ack. */ private void sendOpenChannelAck(int sid) throws IOException { // Send ACK byte[] ack = MSG_CHANNEL_ACK_BYTES; int sendAck = sctpSocket.send(ack, true, sid, WEB_RTC_PPID_CTRL); if (sendAck != ack.length) { logger.error("Failed to send open channel confirmation"); } }
/** * Ctor. * * @param videoChannel The <tt>VideoChannel</tt> associated to this <tt>SimulcastEngine</tt>. */ public SimulcastEngine(VideoChannel videoChannel) { this.videoChannel = videoChannel; simulcastReceiver = new SimulcastReceiver( this, ServiceUtils.getService(videoChannel.getBundleContext(), ConfigurationService.class)); this.logger = Logger.getLogger(classLogger, videoChannel.getContent().getConference().getLogger()); }