/**
   * Increases priority if job has bumped down.
   *
   * @param waitJobs Ordered collection of collision contexts for jobs that are currently waiting
   *     for execution.
   * @param passiveJobs Reordered collection of collision contexts for waiting jobs.
   */
  private void bumpPriority(
      Collection<GridCollisionJobContext> waitJobs, List<GridCollisionJobContext> passiveJobs) {
    assert waitJobs != null;
    assert passiveJobs != null;
    assert waitJobs.size() == passiveJobs.size();

    for (int i = 0; i < passiveJobs.size(); i++) {
      GridCollisionJobContext ctx = passiveJobs.get(i);

      if (i > indexOf(waitJobs, ctx))
        ctx.getJobContext().setAttribute(jobAttrKey, getJobPriority(ctx) + starvationInc);
    }
  }
Esempio n. 2
0
  /*
   * Launches against the agent& main
   */
  public void testAgentAndMain() throws Exception {
    Project project = workspace.getProject("p1");
    Run bndrun = new Run(workspace, project.getBase(), project.getFile("one.bndrun"));
    bndrun.setProperty("-runpath", "biz.aQute.remote.launcher");
    bndrun.setProperty("-runbundles", "bsn-1,bsn-2");
    bndrun.setProperty("-runremote", "agent,main;agent=1090");

    final RemoteProjectLauncherPlugin pl =
        (RemoteProjectLauncherPlugin) bndrun.getProjectLauncher();
    pl.prepare();

    List<? extends RunSession> sessions = pl.getRunSessions();
    assertEquals(2, sessions.size());

    RunSession agent = sessions.get(0);
    RunSession main = sessions.get(1);

    CountDownLatch agentLatch = launch(agent);
    CountDownLatch mainLatch = launch(main);

    agent.waitTillStarted(1000);
    main.waitTillStarted(1000);
    Thread.sleep(500);

    agent.cancel();
    main.cancel();

    agentLatch.await();
    mainLatch.await();
    assertEquals(-3, agent.getExitCode());
    assertEquals(-3, main.getExitCode());

    bndrun.close();
  }
Esempio n. 3
0
  @Test
  public void testCollectToList() {
    Observable<List<Integer>> o =
        Observable.just(1, 2, 3).collect(ArrayList::new, (list, v) -> list.add(v));

    List<Integer> list = o.toBlocking().last();

    assertEquals(3, list.size());
    assertEquals(1, list.get(0).intValue());
    assertEquals(2, list.get(1).intValue());
    assertEquals(3, list.get(2).intValue());

    // test multiple subscribe
    List<Integer> list2 = o.toBlocking().last();

    assertEquals(3, list2.size());
    assertEquals(1, list2.get(0).intValue());
    assertEquals(2, list2.get(1).intValue());
    assertEquals(3, list2.get(2).intValue());
  }
Esempio n. 4
0
  /*
   * Launches against the agent
   */
  public void testSimpleLauncher() throws Exception {
    Project project = workspace.getProject("p1");
    Run bndrun = new Run(workspace, project.getBase(), project.getFile("one.bndrun"));
    bndrun.setProperty("-runpath", "biz.aQute.remote.launcher");
    bndrun.setProperty("-runbundles", "bsn-1,bsn-2");
    bndrun.setProperty("-runremote", "test");

    final RemoteProjectLauncherPlugin pl =
        (RemoteProjectLauncherPlugin) bndrun.getProjectLauncher();
    pl.prepare();
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicInteger exitCode = new AtomicInteger(-1);

    List<? extends RunSession> sessions = pl.getRunSessions();
    assertEquals(1, sessions.size());

    final RunSession session = sessions.get(0);

    Thread t =
        new Thread("test-launch") {
          public void run() {
            try {
              exitCode.set(session.launch());
            } catch (Exception e) {
              e.printStackTrace();
            } finally {
              latch.countDown();
            }
          }
        };
    t.start();
    Thread.sleep(500);

    for (Bundle b : context.getBundles()) {
      System.out.println(b.getLocation());
    }
    assertEquals(4, context.getBundles().length);
    String p1 = t1.getAbsolutePath();
    System.out.println(p1);

    assertNotNull(context.getBundle(p1));
    assertNotNull(context.getBundle(t2.getAbsolutePath()));

    pl.cancel();
    latch.await();

    assertEquals(-3, exitCode.get());

    bndrun.close();
  }
  /**
   * Starts activity.
   *
   * @throws IgniteInterruptedCheckedException If interrupted.
   */
  public void init() throws IgniteInterruptedCheckedException {
    if (isDone()) return;

    if (init.compareAndSet(false, true)) {
      if (isDone()) return;

      try {
        // Wait for event to occur to make sure that discovery
        // will return corresponding nodes.
        U.await(evtLatch);

        assert discoEvt != null : this;
        assert !dummy && !forcePreload : this;

        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion());

        oldestNode.set(oldest);

        startCaches();

        // True if client node joined or failed.
        boolean clientNodeEvt;

        if (F.isEmpty(reqs)) {
          int type = discoEvt.type();

          assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED
              : discoEvt;

          clientNodeEvt = CU.clientNode(discoEvt.eventNode());
        } else {
          assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt;

          boolean clientOnlyStart = true;

          for (DynamicCacheChangeRequest req : reqs) {
            if (!req.clientStartOnly()) {
              clientOnlyStart = false;

              break;
            }
          }

          clientNodeEvt = clientOnlyStart;
        }

        if (clientNodeEvt) {
          ClusterNode node = discoEvt.eventNode();

          // Client need to initialize affinity for local join event or for stated client caches.
          if (!node.isLocal()) {
            for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
              if (cacheCtx.isLocal()) continue;

              GridDhtPartitionTopology top = cacheCtx.topology();

              top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));

              if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) {
                initTopology(cacheCtx);

                top.beforeExchange(this);
              } else
                cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion());
            }

            if (exchId.isLeft())
              cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

            onDone(exchId.topologyVersion());

            skipPreload = cctx.kernalContext().clientNode();

            return;
          }
        }

        if (cctx.kernalContext().clientNode()) {
          skipPreload = true;

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            GridDhtPartitionTopology top = cacheCtx.topology();

            top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));
          }

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            initTopology(cacheCtx);
          }

          if (oldestNode.get() != null) {
            rmtNodes =
                new ConcurrentLinkedQueue<>(
                    CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

            rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

            ready.set(true);

            initFut.onDone(true);

            if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

            sendPartitions();
          } else onDone(exchId.topologyVersion());

          return;
        }

        assert oldestNode.get() != null;

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) {
            if (cacheCtx
                .discovery()
                .cacheAffinityNodes(cacheCtx.name(), topologyVersion())
                .isEmpty())
              U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex());
          }

          cacheCtx.preloader().onExchangeFutureAdded();
        }

        List<String> cachesWithoutNodes = null;

        if (exchId.isLeft()) {
          for (String name : cctx.cache().cacheNames()) {
            if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) {
              if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>();

              cachesWithoutNodes.add(name);

              // Fire event even if there is no client cache started.
              if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) {
                Event evt =
                    new CacheEvent(
                        name,
                        cctx.localNode(),
                        cctx.localNode(),
                        "All server nodes have left the cluster.",
                        EventType.EVT_CACHE_NODES_LEFT,
                        0,
                        false,
                        null,
                        null,
                        null,
                        null,
                        false,
                        null,
                        false,
                        null,
                        null,
                        null);

                cctx.gridEvents().record(evt);
              }
            }
          }
        }

        if (cachesWithoutNodes != null) {
          StringBuilder sb =
              new StringBuilder(
                  "All server nodes for the following caches have left the cluster: ");

          for (int i = 0; i < cachesWithoutNodes.size(); i++) {
            String cache = cachesWithoutNodes.get(i);

            sb.append('\'').append(cache).append('\'');

            if (i != cachesWithoutNodes.size() - 1) sb.append(", ");
          }

          U.quietAndWarn(log, sb.toString());

          U.quietAndWarn(log, "Must have server nodes for caches to operate.");
        }

        assert discoEvt != null;

        assert exchId.nodeId().equals(discoEvt.eventNode().id());

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          GridClientPartitionTopology clientTop =
              cctx.exchange().clearClientTopology(cacheCtx.cacheId());

          long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();

          // Update before waiting for locks.
          if (!cacheCtx.isLocal())
            cacheCtx
                .topology()
                .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId()));
        }

        // Grab all alive remote nodes with order of equal or less than last joined node.
        rmtNodes =
            new ConcurrentLinkedQueue<>(
                CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

        rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

        for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        AffinityTopologyVersion topVer = exchId.topologyVersion();

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Must initialize topology after we get discovery event.
          initTopology(cacheCtx);

          cacheCtx.preloader().updateLastExchangeFuture(this);
        }

        IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer);

        // Assign to class variable so it will be included into toString() method.
        this.partReleaseFut = partReleaseFut;

        if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this);

        while (true) {
          try {
            partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            // Print pending transactions and locks that might have led to hang.
            dumpPendingObjects();
          }
        }

        if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this);

        if (!F.isEmpty(reqs)) blockGateways();

        if (exchId.isLeft())
          cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

        IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion());

        while (true) {
          try {
            locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            U.warn(
                log,
                "Failed to wait for locks release future. "
                    + "Dumping pending objects that might be the cause: "
                    + cctx.localNodeId());

            U.warn(log, "Locked entries:");

            Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks =
                cctx.mvcc().unfinishedLocks(exchId.topologyVersion());

            for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet())
              U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']');
          }
        }

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Notify replication manager.
          GridCacheContext drCacheCtx =
              cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;

          if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft());

          // Partition release future is done so we can flush the write-behind store.
          cacheCtx.store().forceFlush();

          // Process queued undeploys prior to sending/spreading map.
          cacheCtx.preloader().unwindUndeploys();

          GridDhtPartitionTopology top = cacheCtx.topology();

          assert topVer.equals(top.topologyVersion())
              : "Topology version is updated only in this class instances inside single ExchangeWorker thread.";

          top.beforeExchange(this);
        }

        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
          top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId()));

          top.beforeExchange(this);
        }
      } catch (IgniteInterruptedCheckedException e) {
        onDone(e);

        throw e;
      } catch (Throwable e) {
        U.error(
            log,
            "Failed to reinitialize local partitions (preloading will be stopped): " + exchId,
            e);

        onDone(e);

        if (e instanceof Error) throw (Error) e;

        return;
      }

      if (F.isEmpty(rmtIds)) {
        onDone(exchId.topologyVersion());

        return;
      }

      ready.set(true);

      initFut.onDone(true);

      if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

      // If this node is not oldest.
      if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions();
      else {
        boolean allReceived = allReceived();

        if (allReceived && replied.compareAndSet(false, true)) {
          if (spreadPartitions()) onDone(exchId.topologyVersion());
        }
      }

      scheduleRecheck();
    } else assert false : "Skipped init future: " + this;
  }
Esempio n. 6
0
 private <E extends Enum<E>> Enum<E> getNextEnumPlaceholder(Class<E> clazz) {
   List<E> enums = new ArrayList<E>(EnumSet.allOf(clazz));
   return enums.get(enumPlaceholder++ % enums.size());
 }
Esempio n. 7
0
  /**
   * Performs flush.
   *
   * @throws GridException If failed.
   */
  private void doFlush() throws GridException {
    lastFlushTime = U.currentTimeMillis();

    List<GridFuture> activeFuts0 = null;

    int doneCnt = 0;

    for (GridFuture<?> f : activeFuts) {
      if (!f.isDone()) {
        if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2));

        activeFuts0.add(f);
      } else {
        f.get();

        doneCnt++;
      }
    }

    if (activeFuts0 == null || activeFuts0.isEmpty()) return;

    while (true) {
      Queue<GridFuture<?>> q = null;

      for (Buffer buf : bufMappings.values()) {
        GridFuture<?> flushFut = buf.flush();

        if (flushFut != null) {
          if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2);

          q.add(flushFut);
        }
      }

      if (q != null) {
        assert !q.isEmpty();

        boolean err = false;

        for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) {
          try {
            fut.get();
          } catch (GridException e) {
            if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e);

            err = true;
          }
        }

        if (err)
          // Remaps needed - flush buffers.
          continue;
      }

      doneCnt = 0;

      for (int i = 0; i < activeFuts0.size(); i++) {
        GridFuture f = activeFuts0.get(i);

        if (f == null) doneCnt++;
        else if (f.isDone()) {
          f.get();

          doneCnt++;

          activeFuts0.set(i, null);
        } else break;
      }

      if (doneCnt == activeFuts0.size()) return;
    }
  }
  /** @throws Exception If failed. */
  public void testCreateFileFragmented() throws Exception {
    GridGgfsEx impl = (GridGgfsEx) grid(0).ggfs("ggfs");

    GridGgfsFragmentizerManager fragmentizer = impl.context().fragmentizer();

    GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false);

    GridGgfsPath path = new GridGgfsPath("/file");

    try {
      GridGgfs fs0 = grid(0).ggfs("ggfs");
      GridGgfs fs1 = grid(1).ggfs("ggfs");
      GridGgfs fs2 = grid(2).ggfs("ggfs");

      try (GridGgfsOutputStream out =
          fs0.create(
              path,
              128,
              false,
              1,
              CFG_GRP_SIZE,
              F.asMap(GridGgfs.PROP_PREFER_LOCAL_WRITES, "true"))) {
        // 1.5 blocks
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 1);

        out.write(data);
      }

      try (GridGgfsOutputStream out = fs1.append(path, false)) {
        // 1.5 blocks.
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 2);

        out.write(data);
      }

      // After this we should have first two block colocated with grid 0 and last block colocated
      // with grid 1.
      GridGgfsFileImpl fileImpl = (GridGgfsFileImpl) fs.info(path);

      GridCache<Object, Object> metaCache = grid(0).cachex(META_CACHE_NAME);

      GridGgfsFileInfo fileInfo = (GridGgfsFileInfo) metaCache.get(fileImpl.fileId());

      GridGgfsFileMap map = fileInfo.fileMap();

      List<GridGgfsFileAffinityRange> ranges = map.ranges();

      assertEquals(2, ranges.size());

      assertTrue(ranges.get(0).startOffset() == 0);
      assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1);

      assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE);
      assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1);

      // Validate data read after colocated writes.
      try (GridGgfsInputStream in = fs2.open(path)) {
        // Validate first part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read());

        // Validate second part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read());

        assertEquals(-1, in.read());
      }
    } finally {
      GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true);

      boolean hasData = false;

      for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(DATA_CACHE_NAME).isEmpty();

      assertTrue(hasData);

      fs.delete(path, true);
    }

    GridTestUtils.retryAssert(
        log,
        ASSERT_RETRIES,
        ASSERT_RETRY_INTERVAL,
        new CAX() {
          @Override
          public void applyx() {
            for (int i = 0; i < NODES_CNT; i++)
              assertTrue(grid(i).cachex(DATA_CACHE_NAME).isEmpty());
          }
        });
  }
Esempio n. 9
0
    /** {@inheritDoc} */
    @Override
    public String reduce(List<GridComputeJobResult> results) throws GridException {
      assert results.size() == 1;

      return results.get(0).getData();
    }