/** @throws Exception If failed. */
  public void testClientAffinity() throws Exception {
    GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME);

    Collection<Object> keys = new ArrayList<>();

    keys.addAll(Arrays.asList(Boolean.TRUE, Boolean.FALSE, 1, Integer.MAX_VALUE));

    Random rnd = new Random();
    StringBuilder sb = new StringBuilder();

    // Generate some random strings.
    for (int i = 0; i < 100; i++) {
      sb.setLength(0);

      for (int j = 0; j < 255; j++)
        // Only printable ASCII symbols for test.
        sb.append((char) (rnd.nextInt(0x7f - 0x20) + 0x20));

      keys.add(sb.toString());
    }

    // Generate some more keys to achieve better coverage.
    for (int i = 0; i < 100; i++) keys.add(UUID.randomUUID());

    for (Object key : keys) {
      UUID nodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id();

      UUID clientNodeId = partitioned.affinity(key);

      assertEquals(
          "Invalid affinity mapping for REST response for key: " + key, nodeId, clientNodeId);
    }
  }
Exemplo n.º 2
0
  /**
   * Schedules runnable task for execution.
   *
   * @param w Runnable task.
   * @throws GridException Thrown if any exception occurred.
   */
  @SuppressWarnings({"CatchGenericClass", "ProhibitedExceptionThrown"})
  public void execute(final GridWorker w) throws GridException {
    workers.add(w);

    try {
      exec.execute(
          new Runnable() {
            @Override
            public void run() {
              try {
                w.run();
              } finally {
                workers.remove(w);
              }
            }
          });
    } catch (RejectedExecutionException e) {
      workers.remove(w);

      throw new GridComputeExecutionRejectedException(
          "Failed to execute worker due to execution rejection.", e);
    } catch (RuntimeException e) {
      workers.remove(w);

      throw new GridException("Failed to execute worker due to runtime exception.", e);
    } catch (Error e) {
      workers.remove(w);

      throw e;
    }
  }
    /** {@inheritDoc} */
    @Override
    protected Collection<? extends GridComputeJob> split(int gridSize, Object arg)
        throws GridException {
      Collection<GridComputeJobAdapter> jobs = new ArrayList<>(gridSize);

      this.gridSize = gridSize;

      final String locNodeId = grid.localNode().id().toString();

      for (int i = 0; i < gridSize; i++) {
        jobs.add(
            new GridComputeJobAdapter() {
              @SuppressWarnings("OverlyStrongTypeCast")
              @Override
              public Object execute() {
                try {
                  Thread.sleep(1000);
                } catch (InterruptedException ignored) {
                  Thread.currentThread().interrupt();
                }

                return new GridBiTuple<>(locNodeId, 1);
              }
            });
      }

      return jobs;
    }
  /**
   * Makes <tt>RTCPSRPacket</tt>s for all the RTP streams that we're sending.
   *
   * @return a <tt>List</tt> of <tt>RTCPSRPacket</tt> for all the RTP streams that we're sending.
   */
  private Collection<RTCPSRPacket> makeRTCPSRPackets(long time) {
    Collection<RTCPSRPacket> srPackets = new ArrayList<RTCPSRPacket>();

    for (RTPStatsEntry rtpStatsEntry : rtpStatsMap.values()) {
      int ssrc = rtpStatsEntry.getSsrc();
      RemoteClock estimate = remoteClockEstimator.estimate(ssrc, time);
      if (estimate == null) {
        // We're not going to go far without an estimate..
        continue;
      }

      RTCPSRPacket srPacket = new RTCPSRPacket(ssrc, MIN_RTCP_REPORTS_BLOCKS_ARRAY);

      // Set the NTP timestamp for this SR.
      long estimatedRemoteTime = estimate.getRemoteTime();
      long secs = estimatedRemoteTime / 1000L;
      double fraction = (estimatedRemoteTime - secs * 1000L) / 1000D;
      srPacket.ntptimestamplsw = (int) (fraction * 4294967296D);
      srPacket.ntptimestampmsw = secs;

      // Set the RTP timestamp.
      srPacket.rtptimestamp = estimate.getRtpTimestamp();

      // Fill-in packet and octet send count.
      srPacket.packetcount = rtpStatsEntry.getPacketsSent();
      srPacket.octetcount = rtpStatsEntry.getBytesSent();

      srPackets.add(srPacket);
    }

    return srPackets;
  }
  /**
   * Sends query request.
   *
   * @param fut Distributed future.
   * @param req Request.
   * @param nodes Nodes.
   * @throws IgniteCheckedException In case of error.
   */
  @SuppressWarnings("unchecked")
  private void sendRequest(
      final GridCacheDistributedQueryFuture<?, ?, ?> fut,
      final GridCacheQueryRequest req,
      Collection<ClusterNode> nodes)
      throws IgniteCheckedException {
    assert fut != null;
    assert req != null;
    assert nodes != null;

    final UUID locNodeId = cctx.localNodeId();

    ClusterNode locNode = null;

    Collection<ClusterNode> rmtNodes = null;

    for (ClusterNode n : nodes) {
      if (n.id().equals(locNodeId)) locNode = n;
      else {
        if (rmtNodes == null) rmtNodes = new ArrayList<>(nodes.size());

        rmtNodes.add(n);
      }
    }

    // Request should be sent to remote nodes before the query is processed on the local node.
    // For example, a remote reducer has a state, we should not serialize and then send
    // the reducer changed by the local node.
    if (!F.isEmpty(rmtNodes)) {
      cctx.io()
          .safeSend(
              rmtNodes,
              req,
              cctx.ioPolicy(),
              new P1<ClusterNode>() {
                @Override
                public boolean apply(ClusterNode node) {
                  fut.onNodeLeft(node.id());

                  return !fut.isDone();
                }
              });
    }

    if (locNode != null) {
      cctx.closures()
          .callLocalSafe(
              new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                  req.beforeLocalExecution(cctx);

                  processQueryRequest(locNodeId, req);

                  return null;
                }
              });
    }
  }
Exemplo n.º 6
0
  private static void testImplementation(Class<? extends Collection> implClazz) throws Throwable {
    testPotato(implClazz, Vector.class);
    testPotato(implClazz, CopyOnWriteArrayList.class);

    final Constructor<? extends Collection> constr = implClazz.getConstructor(Collection.class);
    final Collection<Object> coll = constr.newInstance(Arrays.asList(new String[] {}));
    coll.add(1);
    equal(coll.toString(), "[1]");
  }
    /** {@inheritDoc} */
    @Override
    protected Collection<? extends GridComputeJob> split(int gridSize, String arg)
        throws GridException {
      Collection<GridComputeJobAdapter> jobs = new ArrayList<>(jobCnt);

      for (int i = 0; i < jobCnt; i++) jobs.add(new TestJob());

      return jobs;
    }
Exemplo n.º 8
0
  /** {@inheritDoc} */
  @Override
  public Collection<UUID> nodeIds() {
    Collection<UUID> ids = new GridLeanSet<UUID>();

    ids.add(cctx.nodeId());
    ids.addAll(mappings.keySet());

    return ids;
  }
 private static Collection<Node> getNodeMatching(Node body, String regexp) {
   final Collection<Node> nodes = new ArrayList<>();
   if (body.getNodeName().matches(regexp)) nodes.add(body);
   if (body.getChildNodes().getLength() == 0) return nodes;
   NodeList returnList = body.getChildNodes();
   for (int k = 0; k < returnList.getLength(); k++) {
     final Node node = returnList.item(k);
     nodes.addAll(getNodeMatching(node, regexp));
   }
   return nodes;
 }
    /** {@inheritDoc} */
    @Override
    protected Collection<? extends GridComputeJob> split(int gridSize, Object arg)
        throws GridException {
      if (log.isInfoEnabled())
        log.info("Splitting job [job=" + this + ", gridSize=" + gridSize + ", arg=" + arg + ']');

      Collection<GridComputeJob> jobs = new ArrayList<>(SPLIT_COUNT);

      for (int i = 1; i <= SPLIT_COUNT; i++) jobs.add(new GridCancelTestJob(i));

      return jobs;
    }
Exemplo n.º 11
0
 public static void load(Collection<FileDesc> files, Path root, int blocSize, Pattern pattern)
     throws IOException {
   root = root.toAbsolutePath().normalize();
   Visitor visitor = new Visitor(root, blocSize, pattern);
   Files.walkFileTree(root, visitor);
   for (Future<FileDesc> future : visitor.futures()) {
     try {
       files.add(future.get());
     } catch (Exception e) {
       log.error("", e);
     }
   }
 }
  /**
   * Makes <tt>RTCPSDES</tt> packets for all the RTP streams that we're sending.
   *
   * @return a <tt>List</tt> of <tt>RTCPSDES</tt> packets for all the RTP streams that we're
   *     sending.
   */
  private RTCPSDESPacket makeSDESPacket() {
    Collection<RTCPSDES> sdesChunks = new ArrayList<RTCPSDES>();

    // Create an SDES for our own SSRC.
    RTCPSDES ownSDES = new RTCPSDES();

    SSRCInfo ourinfo = getStream().getStreamRTPManager().getSSRCCache().ourssrc;
    ownSDES.ssrc = (int) getLocalSSRC();
    Collection<RTCPSDESItem> ownItems = new ArrayList<RTCPSDESItem>();
    ownItems.add(new RTCPSDESItem(RTCPSDESItem.CNAME, ourinfo.sourceInfo.getCNAME()));

    // Throttle the source description bandwidth. See RFC3550#6.3.9
    // Allocation of Source Description Bandwidth.

    if (sdesCounter % 3 == 0) {
      if (ourinfo.name != null && ourinfo.name.getDescription() != null)
        ownItems.add(new RTCPSDESItem(RTCPSDESItem.NAME, ourinfo.name.getDescription()));
      if (ourinfo.email != null && ourinfo.email.getDescription() != null)
        ownItems.add(new RTCPSDESItem(RTCPSDESItem.EMAIL, ourinfo.email.getDescription()));
      if (ourinfo.phone != null && ourinfo.phone.getDescription() != null)
        ownItems.add(new RTCPSDESItem(RTCPSDESItem.PHONE, ourinfo.phone.getDescription()));
      if (ourinfo.loc != null && ourinfo.loc.getDescription() != null)
        ownItems.add(new RTCPSDESItem(RTCPSDESItem.LOC, ourinfo.loc.getDescription()));
      if (ourinfo.tool != null && ourinfo.tool.getDescription() != null)
        ownItems.add(new RTCPSDESItem(RTCPSDESItem.TOOL, ourinfo.tool.getDescription()));
      if (ourinfo.note != null && ourinfo.note.getDescription() != null)
        ownItems.add(new RTCPSDESItem(RTCPSDESItem.NOTE, ourinfo.note.getDescription()));
    }

    sdesCounter++;

    ownSDES.items = ownItems.toArray(new RTCPSDESItem[ownItems.size()]);

    sdesChunks.add(ownSDES);

    for (Map.Entry<Integer, byte[]> entry : cnameRegistry.entrySet()) {
      RTCPSDES sdes = new RTCPSDES();
      sdes.ssrc = entry.getKey();
      sdes.items = new RTCPSDESItem[] {new RTCPSDESItem(RTCPSDESItem.CNAME, entry.getValue())};
    }

    RTCPSDES[] sps = sdesChunks.toArray(new RTCPSDES[sdesChunks.size()]);
    RTCPSDESPacket sp = new RTCPSDESPacket(sps);

    return sp;
  }
  /**
   * Makes <tt>RTCPRRPacket</tt>s using information in FMJ.
   *
   * @param time
   * @return A <tt>Collection</tt> of <tt>RTCPRRPacket</tt>s to inject to the <tt>MediaStream</tt>.
   */
  private Collection<RTCPRRPacket> makeRTCPRRPackets(long time) {
    RTCPReportBlock[] reportBlocks = makeRTCPReportBlocks(time);
    if (reportBlocks == null || reportBlocks.length == 0) {
      return null;
    }

    Collection<RTCPRRPacket> rrPackets = new ArrayList<RTCPRRPacket>();

    // We use the stream's local source ID (SSRC) as the SSRC of packet
    // sender.
    long streamSSRC = getLocalSSRC();

    // Since a maximum of 31 reception report blocks will fit in an SR
    // or RR packet, additional RR packets SHOULD be stacked after the
    // initial SR or RR packet as needed to contain the reception
    // reports for all sources heard during the interval since the last
    // report.
    if (reportBlocks.length > MAX_RTCP_REPORT_BLOCKS) {
      for (int offset = 0; offset < reportBlocks.length; offset += MAX_RTCP_REPORT_BLOCKS) {
        RTCPReportBlock[] blocks =
            (reportBlocks.length - offset < MAX_RTCP_REPORT_BLOCKS)
                ? new RTCPReportBlock[reportBlocks.length - offset]
                : MAX_RTCP_REPORT_BLOCKS_ARRAY;

        System.arraycopy(reportBlocks, offset, blocks, 0, blocks.length);

        RTCPRRPacket rr = new RTCPRRPacket((int) streamSSRC, blocks);
        rrPackets.add(rr);
      }
    } else {
      RTCPRRPacket rr = new RTCPRRPacket((int) streamSSRC, reportBlocks);
      rrPackets.add(rr);
    }

    return rrPackets;
  }
  /** {@inheritDoc} */
  @Override
  public Collection<ClusterNode> nodes(int p, AffinityTopologyVersion topVer) {
    Collection<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer);

    lock.readLock().lock();

    try {
      assert node2part != null && node2part.valid()
          : "Invalid node-to-partitions map [topVer1="
              + topVer
              + ", topVer2="
              + this.topVer
              + ", cache="
              + cctx.name()
              + ", node2part="
              + node2part
              + ']';

      Collection<ClusterNode> nodes = null;

      Collection<UUID> nodeIds = part2node.get(p);

      if (!F.isEmpty(nodeIds)) {
        Collection<UUID> affIds = new HashSet<>(F.viewReadOnly(affNodes, F.node2id()));

        for (UUID nodeId : nodeIds) {
          if (!affIds.contains(nodeId) && hasState(p, nodeId, OWNING, MOVING, RENTING)) {
            ClusterNode n = cctx.discovery().node(nodeId);

            if (n != null
                && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) {
              if (nodes == null) {
                nodes = new ArrayList<>(affNodes.size() + 2);

                nodes.addAll(affNodes);
              }

              nodes.add(n);
            }
          }
        }
      }

      return nodes != null ? nodes : affNodes;
    } finally {
      lock.readLock().unlock();
    }
  }
  /** {@inheritDoc} */
  @Override
  public void loadCache(GridBiInClosure<K, V> c, @Nullable Object... args) throws GridException {
    ExecutorService exec =
        new ThreadPoolExecutor(
            threadsCnt,
            threadsCnt,
            0L,
            MILLISECONDS,
            new ArrayBlockingQueue<Runnable>(batchQueueSize),
            new BlockingRejectedExecutionHandler());

    Iterator<I> iter = inputIterator(args);

    Collection<I> buf = new ArrayList<>(batchSize);

    try {
      while (iter.hasNext()) {
        if (Thread.currentThread().isInterrupted()) {
          U.warn(log, "Working thread was interrupted while loading data.");

          break;
        }

        buf.add(iter.next());

        if (buf.size() == batchSize) {
          exec.submit(new Worker(c, buf, args));

          buf = new ArrayList<>(batchSize);
        }
      }

      if (!buf.isEmpty()) exec.submit(new Worker(c, buf, args));
    } catch (RejectedExecutionException ignored) {
      // Because of custom RejectedExecutionHandler.
      assert false : "RejectedExecutionException was thrown while it shouldn't.";
    } finally {
      exec.shutdown();

      try {
        exec.awaitTermination(Long.MAX_VALUE, MILLISECONDS);
      } catch (InterruptedException ignored) {
        U.warn(log, "Working thread was interrupted while waiting for put operations to complete.");

        Thread.currentThread().interrupt();
      }
    }
  }
  /**
   * Processes cache query request.
   *
   * @param sndId Sender node id.
   * @param req Query request.
   */
  @SuppressWarnings("unchecked")
  @Override
  void processQueryRequest(UUID sndId, GridCacheQueryRequest req) {
    if (req.cancel()) {
      cancelIds.add(new CancelMessageId(req.id(), sndId));

      if (req.fields()) removeFieldsQueryResult(sndId, req.id());
      else removeQueryResult(sndId, req.id());
    } else {
      if (!cancelIds.contains(new CancelMessageId(req.id(), sndId))) {
        if (!F.eq(req.cacheName(), cctx.name())) {
          GridCacheQueryResponse res =
              new GridCacheQueryResponse(
                  cctx.cacheId(),
                  req.id(),
                  new IgniteCheckedException(
                      "Received request for incorrect cache [expected="
                          + cctx.name()
                          + ", actual="
                          + req.cacheName()));

          sendQueryResponse(sndId, res, 0);
        } else {
          threads.put(req.id(), Thread.currentThread());

          try {
            GridCacheQueryInfo info = distributedQueryInfo(sndId, req);

            if (info == null) return;

            if (req.fields()) runFieldsQuery(info);
            else runQuery(info);
          } catch (Throwable e) {
            U.error(log(), "Failed to run query.", e);

            sendQueryResponse(
                sndId, new GridCacheQueryResponse(cctx.cacheId(), req.id(), e.getCause()), 0);

            if (e instanceof Error) throw (Error) e;
          } finally {
            threads.remove(req.id());
          }
        }
      }
    }
  }
  /**
   * Iterate through all the <tt>ReceiveStream</tt>s that this <tt>MediaStream</tt> has and make
   * <tt>RTCPReportBlock</tt>s for all of them.
   *
   * @param time
   * @return
   */
  private RTCPReportBlock[] makeRTCPReportBlocks(long time) {
    MediaStream stream = getStream();
    // State validation.
    if (stream == null) {
      logger.warn("stream is null.");
      return MIN_RTCP_REPORTS_BLOCKS_ARRAY;
    }

    StreamRTPManager streamRTPManager = stream.getStreamRTPManager();
    if (streamRTPManager == null) {
      logger.warn("streamRTPManager is null.");
      return MIN_RTCP_REPORTS_BLOCKS_ARRAY;
    }

    Collection<ReceiveStream> receiveStreams = streamRTPManager.getReceiveStreams();

    if (receiveStreams == null || receiveStreams.size() == 0) {
      logger.info("There are no receive streams to build report " + "blocks for.");
      return MIN_RTCP_REPORTS_BLOCKS_ARRAY;
    }

    SSRCCache cache = streamRTPManager.getSSRCCache();
    if (cache == null) {
      logger.info("cache is null.");
      return MIN_RTCP_REPORTS_BLOCKS_ARRAY;
    }

    // Create the return object.
    Collection<RTCPReportBlock> rtcpReportBlocks = new ArrayList<RTCPReportBlock>();

    // Populate the return object.
    for (ReceiveStream receiveStream : receiveStreams) {
      // Dig into the guts of FMJ and get the stats for the current
      // receiveStream.
      SSRCInfo info = cache.cache.get((int) receiveStream.getSSRC());

      if (!info.ours && info.sender) {
        RTCPReportBlock rtcpReportBlock = info.makeReceiverReport(time);
        rtcpReportBlocks.add(rtcpReportBlock);
      }
    }

    return rtcpReportBlocks.toArray(new RTCPReportBlock[rtcpReportBlocks.size()]);
  }
Exemplo n.º 18
0
 /**
  * Discover WS device on the local network with specified filter
  *
  * @param regexpProtocol url protocol matching regexp like "^http$", might be empty ""
  * @param regexpPath url path matching regexp like "onvif", might be empty ""
  * @return list of unique device urls filtered
  */
 public static Collection<URL> discoverWsDevicesAsUrls(String regexpProtocol, String regexpPath) {
   final Collection<URL> urls =
       new TreeSet<>(
           new Comparator<URL>() {
             public int compare(URL o1, URL o2) {
               return o1.toString().compareTo(o2.toString());
             }
           });
   for (String key : discoverWsDevices()) {
     try {
       final URL url = new URL(key);
       boolean ok = true;
       if (regexpProtocol.length() > 0 && !url.getProtocol().matches(regexpProtocol)) ok = false;
       if (regexpPath.length() > 0 && !url.getPath().matches(regexpPath)) ok = false;
       if (ok) urls.add(url);
     } catch (MalformedURLException e) {
       e.printStackTrace();
     }
   }
   return urls;
 }
Exemplo n.º 19
0
  /**
   * Decode file charset.
   *
   * @param f File to process.
   * @return File charset.
   * @throws IOException in case of error.
   */
  public static Charset decode(File f) throws IOException {
    SortedMap<String, Charset> charsets = Charset.availableCharsets();

    String[] firstCharsets = {
      Charset.defaultCharset().name(), "US-ASCII", "UTF-8", "UTF-16BE", "UTF-16LE"
    };

    Collection<Charset> orderedCharsets = U.newLinkedHashSet(charsets.size());

    for (String c : firstCharsets)
      if (charsets.containsKey(c)) orderedCharsets.add(charsets.get(c));

    orderedCharsets.addAll(charsets.values());

    try (RandomAccessFile raf = new RandomAccessFile(f, "r")) {
      FileChannel ch = raf.getChannel();

      ByteBuffer buf = ByteBuffer.allocate(4096);

      ch.read(buf);

      buf.flip();

      for (Charset charset : orderedCharsets) {
        CharsetDecoder decoder = charset.newDecoder();

        decoder.reset();

        try {
          decoder.decode(buf);

          return charset;
        } catch (CharacterCodingException ignored) {
        }
      }
    }

    return Charset.defaultCharset();
  }
Exemplo n.º 20
0
  /** {@inheritDoc} */
  @Override
  public GridFuture<?> addData(Collection<? extends Map.Entry<K, V>> entries) {
    A.notEmpty(entries, "entries");

    enterBusy();

    try {
      GridFutureAdapter<Object> resFut = new GridFutureAdapter<>(ctx);

      activeFuts.add(resFut);

      resFut.listenAsync(rmvActiveFut);

      Collection<K> keys = new GridConcurrentHashSet<>(entries.size(), 1.0f, 16);

      for (Map.Entry<K, V> entry : entries) keys.add(entry.getKey());

      load0(entries, resFut, keys, 0);

      return resFut;
    } finally {
      leaveBusy();
    }
  }
Exemplo n.º 21
0
  /**
   * See <a href="http://e-docs.bea.com/wls/docs100/javadocs/weblogic/common/T3StartupDef.html">
   * http://e-docs.bea.com/wls/docs100/javadocs/weblogic/common/T3StartupDef.html</a> for more
   * information.
   *
   * @param str Virtual name by which the class is registered as a {@code startupClass} in the
   *     {@code config.xml} file
   * @param params A hashtable that is made up of the name-value pairs supplied from the {@code
   *     startupArgs} property
   * @return Result string (log message).
   * @throws Exception Thrown if error occurred.
   */
  @SuppressWarnings({"unchecked", "CatchGenericClass"})
  @Override
  public String startup(String str, Hashtable params) throws Exception {
    GridLogger log = new GridJavaLogger(LoggingHelper.getServerLogger());

    cfgFile = (String) params.get(cfgFilePathParam);

    if (cfgFile == null) {
      throw new IllegalArgumentException("Failed to read property: " + cfgFilePathParam);
    }

    String workMgrName = (String) params.get(workMgrParam);

    URL cfgUrl = U.resolveGridGainUrl(cfgFile);

    if (cfgUrl == null)
      throw new ServerLifecycleException(
          "Failed to find Spring configuration file (path provided should be "
              + "either absolute, relative to GRIDGAIN_HOME, or relative to META-INF folder): "
              + cfgFile);

    GenericApplicationContext springCtx;

    try {
      springCtx = new GenericApplicationContext();

      XmlBeanDefinitionReader xmlReader = new XmlBeanDefinitionReader(springCtx);

      xmlReader.loadBeanDefinitions(new UrlResource(cfgUrl));

      springCtx.refresh();
    } catch (BeansException e) {
      throw new ServerLifecycleException(
          "Failed to instantiate Spring XML application context: " + e.getMessage(), e);
    }

    Map cfgMap;

    try {
      // Note: Spring is not generics-friendly.
      cfgMap = springCtx.getBeansOfType(GridConfiguration.class);
    } catch (BeansException e) {
      throw new ServerLifecycleException(
          "Failed to instantiate bean [type="
              + GridConfiguration.class
              + ", err="
              + e.getMessage()
              + ']',
          e);
    }

    if (cfgMap == null)
      throw new ServerLifecycleException(
          "Failed to find a single grid factory configuration in: " + cfgUrl);

    if (cfgMap.isEmpty())
      throw new ServerLifecycleException("Can't find grid factory configuration in: " + cfgUrl);

    try {
      ExecutorService execSvc = null;

      MBeanServer mbeanSrv = null;

      for (GridConfiguration cfg : (Collection<GridConfiguration>) cfgMap.values()) {
        assert cfg != null;

        GridConfigurationAdapter adapter = new GridConfigurationAdapter(cfg);

        // Set logger.
        if (cfg.getGridLogger() == null) adapter.setGridLogger(log);

        if (cfg.getExecutorService() == null) {
          if (execSvc == null)
            execSvc =
                workMgrName != null
                    ? new GridThreadWorkManagerExecutor(workMgrName)
                    : new GridThreadWorkManagerExecutor(J2EEWorkManager.getDefault());

          adapter.setExecutorService(execSvc);
        }

        if (cfg.getMBeanServer() == null) {
          if (mbeanSrv == null) {
            InitialContext ctx = null;

            try {
              ctx = new InitialContext();

              mbeanSrv = (MBeanServer) ctx.lookup("java:comp/jmx/runtime");
            } catch (Exception e) {
              throw new IllegalArgumentException(
                  "MBean server was not provided and failed to obtain " + "Weblogic MBean server.",
                  e);
            } finally {
              if (ctx != null) ctx.close();
            }
          }

          adapter.setMBeanServer(mbeanSrv);
        }

        Grid grid = G.start(adapter, springCtx);

        // Test if grid is not null - started properly.
        if (grid != null) gridNames.add(grid.name());
      }

      return getClass().getSimpleName() + " started successfully.";
    } catch (GridException e) {
      // Stop started grids only.
      for (String name : gridNames) G.stop(name, true);

      throw new ServerLifecycleException("Failed to start GridGain.", e);
    }
  }
  /** {@inheritDoc} */
  @Override
  public RawPacket report() {
    garbageCollector.cleanup();

    // TODO Compound RTCP packets should not exceed the MTU of the network
    // path.
    //
    // An individual RTP participant should send only one compound RTCP
    // packet per report interval in order for the RTCP bandwidth per
    // participant to be estimated correctly, except when the compound
    // RTCP packet is split for partial encryption.
    //
    // If there are too many sources to fit all the necessary RR packets
    // into one compound RTCP packet without exceeding the maximum
    // transmission unit (MTU) of the network path, then only the subset
    // that will fit into one MTU should be included in each interval. The
    // subsets should be selected round-robin across multiple intervals so
    // that all sources are reported.
    //
    // It is impossible to know in advance what the MTU of path will be.
    // There are various algorithms for experimenting to find out, but many
    // devices do not properly implement (or deliberately ignore) the
    // necessary standards so it all comes down to trial and error. For that
    // reason, we can just guess 1200 or 1500 bytes per message.
    long time = System.currentTimeMillis();

    Collection<RTCPPacket> packets = new ArrayList<RTCPPacket>();

    // First, we build the RRs.
    Collection<RTCPRRPacket> rrPackets = makeRTCPRRPackets(time);
    if (rrPackets != null && rrPackets.size() != 0) {
      packets.addAll(rrPackets);
    }

    // Next, we build the SRs.
    Collection<RTCPSRPacket> srPackets = makeRTCPSRPackets(time);
    if (srPackets != null && srPackets.size() != 0) {
      packets.addAll(srPackets);
    }

    // Bail out if we have nothing to report.
    if (packets.size() == 0) {
      return null;
    }

    // Next, we build the REMB.
    RTCPREMBPacket rembPacket = makeRTCPREMBPacket();
    if (rembPacket != null) {
      packets.add(rembPacket);
    }

    // Finally, we add an SDES packet.
    RTCPSDESPacket sdesPacket = makeSDESPacket();
    if (sdesPacket != null) {
      packets.add(sdesPacket);
    }

    // Prepare the <tt>RTCPCompoundPacket</tt> to return.
    RTCPPacket rtcpPackets[] = packets.toArray(new RTCPPacket[packets.size()]);

    RTCPCompoundPacket cp = new RTCPCompoundPacket(rtcpPackets);

    // Build the <tt>RTCPCompoundPacket</tt> and return the
    // <tt>RawPacket</tt> to inject to the <tt>MediaStream</tt>.
    return generator.apply(cp);
  }
Exemplo n.º 23
0
 /**
  * Discover WS device on the local network
  *
  * @return list of unique devices access strings which might be URLs in most cases
  */
 public static Collection<String> discoverWsDevices() {
   final Collection<String> addresses = new ConcurrentSkipListSet<>();
   final CountDownLatch serverStarted = new CountDownLatch(1);
   final CountDownLatch serverFinished = new CountDownLatch(1);
   final Collection<InetAddress> addressList = new ArrayList<>();
   try {
     final Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces();
     if (interfaces != null) {
       while (interfaces.hasMoreElements()) {
         NetworkInterface anInterface = interfaces.nextElement();
         if (!anInterface.isLoopback()) {
           final List<InterfaceAddress> interfaceAddresses = anInterface.getInterfaceAddresses();
           for (InterfaceAddress address : interfaceAddresses) {
             addressList.add(address.getAddress());
           }
         }
       }
     }
   } catch (SocketException e) {
     e.printStackTrace();
   }
   ExecutorService executorService = Executors.newCachedThreadPool();
   for (final InetAddress address : addressList) {
     Runnable runnable =
         new Runnable() {
           public void run() {
             try {
               final String uuid = UUID.randomUUID().toString();
               final String probe =
                   WS_DISCOVERY_PROBE_MESSAGE.replaceAll(
                       "<wsa:MessageID>urn:uuid:.*</wsa:MessageID>",
                       "<wsa:MessageID>urn:uuid:" + uuid + "</wsa:MessageID>");
               final int port = random.nextInt(20000) + 40000;
               @SuppressWarnings("SocketOpenedButNotSafelyClosed")
               final DatagramSocket server = new DatagramSocket(port, address);
               new Thread() {
                 public void run() {
                   try {
                     final DatagramPacket packet = new DatagramPacket(new byte[4096], 4096);
                     server.setSoTimeout(WS_DISCOVERY_TIMEOUT);
                     long timerStarted = System.currentTimeMillis();
                     while (System.currentTimeMillis() - timerStarted < (WS_DISCOVERY_TIMEOUT)) {
                       serverStarted.countDown();
                       server.receive(packet);
                       final Collection<String> collection =
                           parseSoapResponseForUrls(
                               Arrays.copyOf(packet.getData(), packet.getLength()));
                       for (String key : collection) {
                         addresses.add(key);
                       }
                     }
                   } catch (SocketTimeoutException ignored) {
                   } catch (Exception e) {
                     e.printStackTrace();
                   } finally {
                     serverFinished.countDown();
                     server.close();
                   }
                 }
               }.start();
               try {
                 serverStarted.await(1000, TimeUnit.MILLISECONDS);
               } catch (InterruptedException e) {
                 e.printStackTrace();
               }
               if (address instanceof Inet4Address) {
                 server.send(
                     new DatagramPacket(
                         probe.getBytes(),
                         probe.length(),
                         InetAddress.getByName(WS_DISCOVERY_ADDRESS_IPv4),
                         WS_DISCOVERY_PORT));
               } else {
                 server.send(
                     new DatagramPacket(
                         probe.getBytes(),
                         probe.length(),
                         InetAddress.getByName(WS_DISCOVERY_ADDRESS_IPv6),
                         WS_DISCOVERY_PORT));
               }
             } catch (Exception e) {
               e.printStackTrace();
             }
             try {
               serverFinished.await((WS_DISCOVERY_TIMEOUT), TimeUnit.MILLISECONDS);
             } catch (InterruptedException e) {
               e.printStackTrace();
             }
           }
         };
     executorService.submit(runnable);
   }
   try {
     executorService.shutdown();
     executorService.awaitTermination(WS_DISCOVERY_TIMEOUT + 2000, TimeUnit.MILLISECONDS);
   } catch (InterruptedException ignored) {
   }
   return addresses;
 }
Exemplo n.º 24
0
  /**
   * Grabs local events and detects if events was lost since last poll.
   *
   * @param ignite Target grid.
   * @param evtOrderKey Unique key to take last order key from node local map.
   * @param evtThrottleCntrKey Unique key to take throttle count from node local map.
   * @param evtTypes Event types to collect.
   * @param evtMapper Closure to map grid events to Visor data transfer objects.
   * @return Collections of node events
   */
  public static Collection<VisorGridEvent> collectEvents(
      Ignite ignite,
      String evtOrderKey,
      String evtThrottleCntrKey,
      final int[] evtTypes,
      IgniteClosure<Event, VisorGridEvent> evtMapper) {
    assert ignite != null;
    assert evtTypes != null && evtTypes.length > 0;

    ConcurrentMap<String, Long> nl = ignite.cluster().nodeLocalMap();

    final long lastOrder = getOrElse(nl, evtOrderKey, -1L);
    final long throttle = getOrElse(nl, evtThrottleCntrKey, 0L);

    // When we first time arrive onto a node to get its local events,
    // we'll grab only last those events that not older than given period to make sure we are
    // not grabbing GBs of data accidentally.
    final long notOlderThan = System.currentTimeMillis() - EVENTS_COLLECT_TIME_WINDOW;

    // Flag for detecting gaps between events.
    final AtomicBoolean lastFound = new AtomicBoolean(lastOrder < 0);

    IgnitePredicate<Event> p =
        new IgnitePredicate<Event>() {
          /** */
          private static final long serialVersionUID = 0L;

          @Override
          public boolean apply(Event e) {
            // Detects that events were lost.
            if (!lastFound.get() && (lastOrder == e.localOrder())) lastFound.set(true);

            // Retains events by lastOrder, period and type.
            return e.localOrder() > lastOrder
                && e.timestamp() > notOlderThan
                && F.contains(evtTypes, e.type());
          }
        };

    Collection<Event> evts = ignite.events().localQuery(p);

    // Update latest order in node local, if not empty.
    if (!evts.isEmpty()) {
      Event maxEvt = Collections.max(evts, EVTS_ORDER_COMPARATOR);

      nl.put(evtOrderKey, maxEvt.localOrder());
    }

    // Update throttle counter.
    if (!lastFound.get())
      nl.put(evtThrottleCntrKey, throttle == 0 ? EVENTS_LOST_THROTTLE : throttle - 1);

    boolean lost = !lastFound.get() && throttle == 0;

    Collection<VisorGridEvent> res = new ArrayList<>(evts.size() + (lost ? 1 : 0));

    if (lost) res.add(new VisorGridEventsLost(ignite.cluster().localNode().id()));

    for (Event e : evts) {
      VisorGridEvent visorEvt = evtMapper.apply(e);

      if (visorEvt != null) res.add(visorEvt);
    }

    return res;
  }
 /** {@inheritDoc} */
 @Override
 void onQueryFutureCanceled(long reqId) {
   cancelled.add(reqId);
 }
Exemplo n.º 26
0
  /**
   * Checks all known service infos for matches to the given class, returns all matching entries.
   *
   * @param plugin
   * @param options
   * @return
   */
  @SuppressWarnings("boxing")
  private Collection<DiscoveredPlugin> resolve(
      final Class<? extends Plugin> plugin, final DiscoverOption[] options) {

    // Refreshes all known remote discovery managers.
    refreshKnownDiscoveryManagers();

    // The result we intend to return
    final Collection<DiscoveredPlugin> result = new ArrayList<DiscoveredPlugin>();
    final Collection<RemoteManagerEndpoint> endpoints = this.remoteManagersEndpoints;

    // Query all managers
    for (final RemoteManagerEndpoint endpoint : endpoints) {

      // The manager to use ...
      final DiscoveryManager mgr =
          getRemoteProxyToDiscoveryManager(endpoint.address.getHostAddress(), endpoint.port);

      // No matter what happens, close the manager
      try {
        if (mgr == null) {
          this.logger.status(
              "resolve/vanished",
              "address",
              endpoint.address.getHostAddress(),
              "port",
              endpoint.port);
          continue;
        }

        //
        final AtomicInteger pingTime = new AtomicInteger(Integer.MAX_VALUE);

        // Get ping time
        AccessController.doPrivileged(
            new PrivilegedAction<Object>() {
              public Object run() {
                try {
                  final long start = System.nanoTime();

                  // Perform the ping
                  mgr.ping(new Random().nextInt());

                  // Obtain the time
                  final long stop = System.nanoTime();
                  final long time = (stop - start) / 1000;

                  // And set it
                  NetworkProbe.this.logger.status("resolve/ping", "time", time);
                  pingTime.set((int) time);
                } catch (final Exception e) {
                  NetworkProbe.this.logger.status(
                      "resolve/exception/ping", "message", e.getMessage());
                }
                return null;
              }
            });

        // Query the information (needs to be inside a privileged block, otherwise applets might
        // complain.
        final ExportInfo exportInfo =
            AccessController.doPrivileged(
                new PrivilegedAction<ExportInfo>() {
                  public ExportInfo run() {
                    return mgr.getExportInfoFor(plugin.getCanonicalName());
                  }
                });

        // If the plugin in not exported, do nothing
        if (!exportInfo.isExported) {
          continue;
        }

        // If it is, construct required data.
        for (final ExportedPlugin p : exportInfo.allExported) {
          final PublishMethod method = PublishMethod.valueOf(p.exportMethod);
          final URI uri = p.exportURI;

          String _newURI = "";

          _newURI += uri.getScheme();
          _newURI += "://";

          if (endpoint.address != null) {
            _newURI += endpoint.address.getHostAddress();
          } else {
            _newURI += "127.0.0.1";
          }
          _newURI += ":";
          _newURI += uri.getPort();
          _newURI += uri.getPath();

          try {
            // TODO: Compute distance properly.
            result.add(
                new DiscoveredPluginImpl(
                    method, new URI(_newURI), pingTime.get(), p.timeSinceExport));
          } catch (final URISyntaxException e) {
            e.printStackTrace();
          }
        }
      } catch (final Exception e) {
        this.logger.status(
            "resolve/exception/versionmess", "address", endpoint.address, "port", endpoint.port);
        e.printStackTrace();
      } finally {
        // In case the manager is of the type simple resource (which it should be), try to close it.
        if (mgr instanceof SimpleResource) {
          try {

            // Try to close our device again
            AccessController.doPrivileged(
                new PrivilegedAction<Object>() {
                  public Object run() {
                    ((SimpleResource) mgr).close();
                    return null;
                  }
                });

          } catch (final Exception e) {
            e.printStackTrace();
            this.logger.status(
                "resolve/exception/close", "address", endpoint.address, "port", endpoint.port);
          }
        }
      }
    }

    // If we have no result there is nothing to do
    if (result.size() == 0) return result;

    // Prepapare filter options ...
    final OptionUtils<DiscoverOption> ou = new OptionUtils<DiscoverOption>(options);

    DiscoveredPlugin best = result.iterator().next();

    if (ou.contains(OptionNearest.class)) {
      // Check all plugins
      for (final DiscoveredPlugin p : result) {
        // If this one is closer, replace them
        if (p.getDistance() < best.getDistance()) {
          best = p;
        }
      }

      // Remove all other plugins
      result.clear();
      result.add(best);
    }

    if (ou.contains(OptionYoungest.class)) {
      // Check all plugins
      for (final DiscoveredPlugin p : result) {
        // If this one is closer, replace them
        if (p.getTimeSinceExport() < best.getTimeSinceExport()) {
          best = p;
        }
      }

      // Remove all other plugins
      result.clear();
      result.add(best);
    }

    if (ou.contains(OptionOldest.class)) {
      // Check all plugins
      for (final DiscoveredPlugin p : result) {
        // If this one is closer, replace them
        if (p.getTimeSinceExport() > best.getTimeSinceExport()) {
          best = p;
        }
      }

      // Remove all other plugins
      result.clear();
      result.add(best);
    }

    // Debug plugins
    for (final DiscoveredPlugin p : result) {
      this.logger.status("resolve/return/", "plugin", p.getPublishURI());
    }

    return result;
  }
 @Override
 public void onUnknownFile(Document document, Header header) {
   warn("Unknown file extension: %s", document.getFilePath());
   unknownFiles.add(document.getFile());
 }
  /** @throws Exception Thrown if test failed. */
  public void testA() throws Exception {
    Collection<Integer> set = new GridConcurrentWeakHashSet<>();

    Integer i = 1;

    assert set.add(i);
    assert !set.add(i);

    assert set.contains(i);

    assert set.size() == 1;

    Collection<Integer> c = F.asList(2, 3, 4, 5);

    assert set.addAll(c);
    assert !set.addAll(c);

    assert set.containsAll(c);

    assert set.size() == 1 + c.size();

    assert set.remove(i);
    assert !set.remove(i);

    assert !set.contains(i);

    assert set.size() == c.size();

    assert set.removeAll(c);
    assert !set.removeAll(c);

    assert !set.containsAll(c);

    assert set.isEmpty();

    Collection<Integer> c1 = Arrays.asList(1, 3, 5, 7, 9);

    int cnt = 0;

    for (Iterator<Integer> iter = set.iterator(); iter.hasNext(); cnt++) c1.contains(iter.next());

    assert set.size() == cnt;

    assert set.size() == set.toArray().length;

    assert set.addAll(c1);

    assert set.retainAll(c);
    assert !set.retainAll(c);

    Collection<Integer> c2 = F.retain(c1, true, c);

    assert set.containsAll(c2);
    assert !set.containsAll(c1);
    assert !set.containsAll(c);

    assert set.size() == c2.size();

    set.clear();

    assert set.isEmpty();

    try {
      set.iterator().next();

      assert false;
    } catch (NoSuchElementException ignored) {
      assert true;
    }

    try {
      set.add(null);

      assert false;
    } catch (NullPointerException ignored) {
      assert true;
    }
  }
Exemplo n.º 29
0
 public <T> void sequentialRecursive(List<Node<T>> nodes, Collection<T> results) {
   for (Node<T> n : nodes) {
     results.add(n.compute());
     sequentialRecursive(n.getChildren(), results);
   }
 }
  /** @throws Exception Thrown if test failed. */
  @SuppressWarnings({"UnusedAssignment"})
  public void testB() throws Exception {
    Collection<SampleBean> set = new GridConcurrentWeakHashSet<>();

    SampleBean bean1 = new SampleBean(1);

    assert set.add(bean1);
    assert !set.add(bean1);

    assert set.size() == 1;

    assert set.contains(bean1);

    bean1 = null;

    gc();

    assert set.isEmpty();

    Collection<SampleBean> c =
        F.asList(new SampleBean(1), new SampleBean(2), new SampleBean(3), new SampleBean(4));

    assert set.addAll(c);
    assert !set.addAll(c);

    assert set.size() == c.size();

    assert set.containsAll(c);

    c = null;

    gc();

    assert set.isEmpty();

    SampleBean b1 = new SampleBean(1);
    SampleBean b2 = new SampleBean(2);
    SampleBean b3 = new SampleBean(3);
    SampleBean b4 = new SampleBean(4);
    SampleBean b5 = new SampleBean(5);

    set.add(b1);
    set.add(b2);
    set.add(b3);
    set.add(b4);
    set.add(b5);

    Iterator iter = set.iterator();

    assert iter.hasNext();

    b2 = null;
    b3 = null;
    b4 = null;

    gc();

    int cnt = 0;

    while (iter.hasNext()) {
      info(iter.next().toString());

      cnt++;
    }

    assert set.size() == cnt;
  }