/** @throws Exception If failed. */
  public void testMapField() throws Exception {
    BinaryObjectBuilder builder = builder("Class");

    builder.hashCode(100);

    builder.setField("mapField", F.asMap(new Key(1), new Value(1), new Key(2), new Value(2)));
    builder.setField(
        "mapField2", F.asMap(new Key(1), new Value(1), new Key(2), new Value(2)), Map.class);

    BinaryObject po = builder.build();

    assertEquals(expectedHashCode("Class"), po.type().typeId());
    assertEquals(100, po.hashCode());

    // Test non-standard map.
    Map<Key, Value> map = po.field("mapField");

    assertEquals(2, map.size());

    for (Map.Entry<Key, Value> e : map.entrySet()) assertEquals(e.getKey().i, e.getValue().i);

    // Test binary map
    Map<BinaryObject, BinaryObject> map2 = po.field("mapField2");

    assertEquals(2, map2.size());

    for (Map.Entry<BinaryObject, BinaryObject> e : map2.entrySet())
      assertEquals(e.getKey().<Key>deserialize().i, e.getValue().<Value>deserialize().i);
  }
Пример #2
0
  /**
   * Calculates data nodes for replicated caches on unstable topology.
   *
   * @param cctx Cache context for main space.
   * @param extraSpaces Extra spaces.
   * @return Collection of all data nodes owning all the caches or {@code null} for retry.
   */
  private Collection<ClusterNode> replicatedUnstableDataNodes(
      final GridCacheContext<?, ?> cctx, List<String> extraSpaces) {
    assert cctx.isReplicated() : cctx.name() + " must be replicated";

    Set<ClusterNode> nodes = replicatedUnstableDataNodes(cctx);

    if (F.isEmpty(nodes)) return null; // Retry.

    if (!F.isEmpty(extraSpaces)) {
      for (String extraSpace : extraSpaces) {
        GridCacheContext<?, ?> extraCctx = cacheContext(extraSpace);

        if (extraCctx.isLocal()) continue;

        if (!extraCctx.isReplicated())
          throw new CacheException(
              "Queries running on replicated cache should not contain JOINs "
                  + "with tables in partitioned caches [rCache="
                  + cctx.name()
                  + ", pCache="
                  + extraSpace
                  + "]");

        Set<ClusterNode> extraOwners = replicatedUnstableDataNodes(extraCctx);

        if (F.isEmpty(extraOwners)) return null; // Retry.

        nodes.retainAll(extraOwners);

        if (nodes.isEmpty()) return null; // Retry.
      }
    }

    return nodes;
  }
Пример #3
0
  /**
   * @param c Connection.
   * @param space Space.
   * @param qry Query.
   * @return Cursor for plans.
   * @throws IgniteCheckedException if failed.
   */
  private Iterator<List<?>> explainPlan(JdbcConnection c, String space, GridCacheTwoStepQuery qry)
      throws IgniteCheckedException {
    List<List<?>> lists = new ArrayList<>();

    for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++) {
      ResultSet rs =
          h2.executeSqlQueryWithTimer(space, c, "SELECT PLAN FROM " + table(i), null, false);

      lists.add(F.asList(getPlan(rs)));
    }

    int tblIdx = 0;

    for (GridCacheSqlQuery mapQry : qry.mapQueries()) {
      GridMergeTable tbl = createMergeTable(c, mapQry, false);

      fakeTable(c, tblIdx++).setInnerTable(tbl);
    }

    GridCacheSqlQuery rdc = qry.reduceQuery();

    ResultSet rs =
        h2.executeSqlQueryWithTimer(
            space, c, "EXPLAIN " + rdc.query(), F.asList(rdc.parameters()), false);

    lists.add(F.asList(getPlan(rs)));

    return lists.iterator();
  }
Пример #4
0
    /**
     * @param topVer Topology version.
     * @param entries Entries.
     */
    FinishLockFuture(Iterable<GridDistributedCacheEntry> entries, AffinityTopologyVersion topVer) {
      assert topVer.compareTo(AffinityTopologyVersion.ZERO) > 0;

      this.topVer = topVer;

      for (GridCacheEntryEx entry : entries) {
        // Either local or near local candidates.
        try {
          Collection<GridCacheMvccCandidate> locs = entry.localCandidates();

          if (!F.isEmpty(locs)) {
            Collection<GridCacheMvccCandidate> cands = new ConcurrentLinkedQueue<>();

            cands.addAll(F.view(locs, versionFilter()));

            if (!F.isEmpty(cands)) pendingLocks.put(entry.txKey(), cands);
          }
        } catch (GridCacheEntryRemovedException ignored) {
          if (exchLog.isDebugEnabled())
            exchLog.debug(
                "Got removed entry when adding it to finish lock future (will ignore): " + entry);
        }
      }

      if (exchLog.isDebugEnabled())
        exchLog.debug("Pending lock set [topVer=" + topVer + ", locks=" + pendingLocks + ']');
    }
  /** {@inheritDoc} */
  @SuppressWarnings("unchecked")
  @Override
  public Collection<BinaryType> metadata() throws BinaryObjectException {
    if (clientNode)
      return F.viewReadOnly(
          clientMetaDataCache.values(),
          new IgniteClosure<BinaryTypeImpl, BinaryType>() {
            @Override
            public BinaryType apply(BinaryTypeImpl meta) {
              return meta;
            }
          });
    else {
      return F.viewReadOnly(
          metaDataCache.entrySetx(metaPred),
          new C1<Cache.Entry<PortableMetadataKey, BinaryMetadata>, BinaryType>() {
            private static final long serialVersionUID = 0L;

            @Override
            public BinaryType apply(Cache.Entry<PortableMetadataKey, BinaryMetadata> e) {
              return e.getValue().wrap(portableCtx);
            }
          });
    }
  }
  /**
   * @param nodes Nodes.
   * @param id ID.
   * @throws IgniteCheckedException If failed.
   */
  private void sendAllPartitions(
      Collection<? extends ClusterNode> nodes, GridDhtPartitionExchangeId id)
      throws IgniteCheckedException {
    GridDhtPartitionsFullMessage m =
        new GridDhtPartitionsFullMessage(id, lastVer.get(), id.topologyVersion());

    for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
      if (!cacheCtx.isLocal()) {
        AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion();

        boolean ready = startTopVer == null || startTopVer.compareTo(id.topologyVersion()) <= 0;

        if (ready)
          m.addFullPartitionsMap(cacheCtx.cacheId(), cacheCtx.topology().partitionMap(true));
      }
    }

    // It is important that client topologies be added after contexts.
    for (GridClientPartitionTopology top : cctx.exchange().clientTopologies())
      m.addFullPartitionsMap(top.cacheId(), top.partitionMap(true));

    if (log.isDebugEnabled())
      log.debug(
          "Sending full partition map [nodeIds="
              + F.viewReadOnly(nodes, F.node2id())
              + ", exchId="
              + exchId
              + ", msg="
              + m
              + ']');

    cctx.io().safeSend(nodes, m, SYSTEM_POOL, null);
  }
  /**
   * @param ctx Context.
   * @param e entry.
   * @return Entry collection.
   */
  private Collection<CacheContinuousQueryEntry> handleEvent(
      GridKernalContext ctx, CacheContinuousQueryEntry e) {
    assert e != null;

    if (internal) {
      if (e.isFiltered()) return Collections.emptyList();
      else return F.asList(e);
    }

    // Initial query entry or evicted entry. These events should be fired immediately.
    if (e.updateCounter() == -1L) return F.asList(e);

    PartitionRecovery rec = rcvs.get(e.partition());

    if (rec == null) {
      rec =
          new PartitionRecovery(
              ctx.log(getClass()),
              initTopVer,
              initUpdCntrs == null ? null : initUpdCntrs.get(e.partition()));

      PartitionRecovery oldRec = rcvs.putIfAbsent(e.partition(), rec);

      if (oldRec != null) rec = oldRec;
    }

    return rec.collectEntries(e);
  }
    /**
     * Checks state of the bean.
     *
     * @param gridName Grid name.
     * @param exec Try to execute something on the grid.
     */
    void checkState(String gridName, boolean exec) {
      assert log != null;
      assert appCtx != null;

      assert F.eq(gridName, ignite.name());

      if (exec)
        // Execute any grid method.
        G.ignite(gridName).events().localQuery(F.<Event>alwaysTrue());
    }
    /** @param nodes Nodes. */
    private Queue<ClusterNode> fallbacks(Collection<ClusterNode> nodes) {
      Queue<ClusterNode> fallbacks = new LinkedList<>();

      ClusterNode node = F.first(F.view(nodes, IS_LOC_NODE));

      if (node != null) fallbacks.add(node);

      fallbacks.addAll(node != null ? F.view(nodes, F.not(IS_LOC_NODE)) : nodes);

      return fallbacks;
    }
  /**
   * @param cacheId Cache ID.
   * @return {@code True} if local client has been added.
   */
  public boolean isLocalClientAdded(int cacheId) {
    if (!F.isEmpty(reqs)) {
      for (DynamicCacheChangeRequest req : reqs) {
        if (req.start() && F.eq(req.initiatingNodeId(), cctx.localNodeId())) {
          if (CU.cacheId(req.cacheName()) == cacheId) return true;
        }
      }
    }

    return false;
  }
Пример #11
0
  /**
   * Checks for explicit events configuration.
   *
   * @param ignite Grid instance.
   * @return {@code true} if all task events explicitly specified in configuration.
   */
  public static boolean checkExplicitTaskMonitoring(Ignite ignite) {
    int[] evts = ignite.configuration().getIncludeEventTypes();

    if (F.isEmpty(evts)) return false;

    for (int evt : VISOR_TASK_EVTS) {
      if (!F.contains(evts, evt)) return false;
    }

    return true;
  }
Пример #12
0
  /**
   * @param mainSpace Main space.
   * @param allSpaces All spaces.
   * @return List of all extra spaces or {@code null} if none.
   */
  private List<String> extraSpaces(String mainSpace, Set<String> allSpaces) {
    if (F.isEmpty(allSpaces) || (allSpaces.size() == 1 && allSpaces.contains(mainSpace)))
      return null;

    ArrayList<String> res = new ArrayList<>(allSpaces.size());

    for (String space : allSpaces) {
      if (!F.eq(space, mainSpace)) res.add(space);
    }

    return res;
  }
  /** @throws Exception If failed. */
  @SuppressWarnings({"SynchronizationOnLocalVariableOrMethodParameter"})
  public void testStartMultipleGridsFromSpring() throws Exception {
    File cfgFile =
        GridTestUtils.resolveIgnitePath(
            GridTestProperties.getProperty("loader.self.multipletest.config"));

    assert cfgFile != null;

    String path = cfgFile.getAbsolutePath();

    info("Loading Grid from configuration file: " + path);

    final GridTuple<IgniteState> gridState1 = F.t(null);
    final GridTuple<IgniteState> gridState2 = F.t(null);

    final Object mux = new Object();

    IgnitionListener factoryLsnr =
        new IgnitionListener() {
          @Override
          public void onStateChange(String name, IgniteState state) {
            synchronized (mux) {
              if ("grid-factory-test-1".equals(name)) gridState1.set(state);
              else if ("grid-factory-test-2".equals(name)) gridState2.set(state);
            }
          }
        };

    G.addListener(factoryLsnr);

    G.start(path);

    assert G.ignite("grid-factory-test-1") != null;
    assert G.ignite("grid-factory-test-2") != null;

    synchronized (mux) {
      assert gridState1.get() == STARTED
          : "Invalid grid state [expected=" + STARTED + ", returned=" + gridState1 + ']';
      assert gridState2.get() == STARTED
          : "Invalid grid state [expected=" + STARTED + ", returned=" + gridState2 + ']';
    }

    G.stop("grid-factory-test-1", true);
    G.stop("grid-factory-test-2", true);

    synchronized (mux) {
      assert gridState1.get() == STOPPED
          : "Invalid grid state [expected=" + STOPPED + ", returned=" + gridState1 + ']';
      assert gridState2.get() == STOPPED
          : "Invalid grid state [expected=" + STOPPED + ", returned=" + gridState2 + ']';
    }
  }
  /**
   * @param cacheId Cache ID to check.
   * @param topVer Topology version.
   * @return {@code True} if cache was added during this exchange.
   */
  public boolean isCacheAdded(int cacheId, AffinityTopologyVersion topVer) {
    if (!F.isEmpty(reqs)) {
      for (DynamicCacheChangeRequest req : reqs) {
        if (req.start() && !req.clientStartOnly()) {
          if (CU.cacheId(req.cacheName()) == cacheId) return true;
        }
      }
    }

    GridCacheContext<?, ?> cacheCtx = cctx.cacheContext(cacheId);

    return cacheCtx != null && F.eq(cacheCtx.startTopologyVersion(), topVer);
  }
Пример #15
0
  /** {@inheritDoc} */
  @Override
  public boolean equals(Object o) {
    if (o == this) return true;

    if (o == null || getClass() != o.getClass()) return false;

    IgfsBlockLocationImpl that = (IgfsBlockLocationImpl) o;

    return len == that.len
        && start == that.start
        && F.eq(nodeIds, that.nodeIds)
        && F.eq(names, that.names)
        && F.eq(hosts, that.hosts);
  }
Пример #16
0
  /** Initializes future. */
  @SuppressWarnings({"SimplifiableIfStatement", "IfMayBeConditional"})
  public void finish() {
    boolean sync;

    if (!F.isEmpty(dhtMap) || !F.isEmpty(nearMap)) sync = finish(dhtMap, nearMap);
    else if (!commit && !F.isEmpty(tx.lockTransactionNodes()))
      sync = rollbackLockTransactions(tx.lockTransactionNodes());
    else
      // No backup or near nodes to send commit message to (just complete then).
      sync = false;

    markInitialized();

    if (!sync) onComplete();
  }
  /** {@inheritDoc} */
  @Override
  public Collection<ClusterNode> nodes(int p, AffinityTopologyVersion topVer) {
    Collection<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer);

    lock.readLock().lock();

    try {
      assert node2part != null && node2part.valid()
          : "Invalid node-to-partitions map [topVer1="
              + topVer
              + ", topVer2="
              + this.topVer
              + ", cache="
              + cctx.name()
              + ", node2part="
              + node2part
              + ']';

      Collection<ClusterNode> nodes = null;

      Collection<UUID> nodeIds = part2node.get(p);

      if (!F.isEmpty(nodeIds)) {
        Collection<UUID> affIds = new HashSet<>(F.viewReadOnly(affNodes, F.node2id()));

        for (UUID nodeId : nodeIds) {
          if (!affIds.contains(nodeId) && hasState(p, nodeId, OWNING, MOVING, RENTING)) {
            ClusterNode n = cctx.discovery().node(nodeId);

            if (n != null
                && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) {
              if (nodes == null) {
                nodes = new ArrayList<>(affNodes.size() + 2);

                nodes.addAll(affNodes);
              }

              nodes.add(n);
            }
          }
        }
      }

      return nodes != null ? nodes : affNodes;
    } finally {
      lock.readLock().unlock();
    }
  }
Пример #18
0
  /** @param maps Mappings. */
  void addEntryMapping(@Nullable Collection<GridDistributedTxMapping> maps) {
    if (!F.isEmpty(maps)) {
      for (GridDistributedTxMapping map : maps) {
        ClusterNode n = map.node();

        GridDistributedTxMapping m = mappings.get(n.id());

        if (m == null) {
          mappings.put(m = new GridDistributedTxMapping(n));

          m.near(map.near());

          if (map.explicitLock()) m.markExplicitLock();
        }

        for (IgniteTxEntry entry : map.entries()) m.add(entry);
      }

      if (log.isDebugEnabled())
        log.debug(
            "Added mappings to transaction [locId="
                + cctx.localNodeId()
                + ", mappings="
                + maps
                + ", tx="
                + this
                + ']');
    }
  }
Пример #19
0
  /**
   * @param exp Expected.
   * @param act Actual.
   */
  protected void assertEqualsCollections(Collection<?> exp, Collection<?> act) {
    if (exp.size() != act.size())
      fail("Collections are not equal:\nExpected:\t" + exp + "\nActual:\t" + act);

    Iterator<?> it1 = exp.iterator();
    Iterator<?> it2 = act.iterator();

    int idx = 0;

    while (it1.hasNext()) {
      Object item1 = it1.next();
      Object item2 = it2.next();

      if (!F.eq(item1, item2))
        fail(
            "Collections are not equal (position "
                + idx
                + "):\nExpected: "
                + exp
                + "\nActual:   "
                + act);

      idx++;
    }
  }
  /** @return Nodes to execute on. */
  private Collection<ClusterNode> nodes() {
    CacheMode cacheMode = cctx.config().getCacheMode();

    switch (cacheMode) {
      case LOCAL:
        if (prj != null)
          U.warn(
              log,
              "Ignoring query projection because it's executed over LOCAL cache "
                  + "(only local node will be queried): "
                  + this);

        return Collections.singletonList(cctx.localNode());

      case REPLICATED:
        if (prj != null || partition() != null) return nodes(cctx, prj, partition());

        return cctx.affinityNode()
            ? Collections.singletonList(cctx.localNode())
            : Collections.singletonList(F.rand(nodes(cctx, null, partition())));

      case PARTITIONED:
        return nodes(cctx, prj, partition());

      default:
        throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode);
    }
  }
  /** @throws Exception If failed. */
  public void testClientService() throws Exception {
    UUID clientNodeId = grid(0).cluster().localNode().id();

    for (int i = 0; i < NODES_CNT; i++) {
      log.info("Iteration: " + i);

      Ignite ignite = grid(i);

      ignite
          .services(ignite.cluster().forClients())
          .deployNodeSingleton(SINGLETON_NAME, new TestService());

      ClusterGroup grp = ignite.cluster();

      assertEquals(NODES_CNT, grp.nodes().size());

      Collection<ServiceDescriptor> srvDscs = ignite.services(grp).serviceDescriptors();

      assertEquals(1, srvDscs.size());

      Map<UUID, Integer> nodesMap = F.first(srvDscs).topologySnapshot();

      assertEquals(1, nodesMap.size());

      for (Map.Entry<UUID, Integer> nodeInfo : nodesMap.entrySet()) {
        assertEquals(clientNodeId, nodeInfo.getKey());

        assertEquals(1, nodeInfo.getValue().intValue());
      }

      ignite.services().cancelAll();
    }
  }
Пример #22
0
  /** {@inheritDoc} */
  @Override
  public void setOwner(Path p, String usr, String grp) throws IOException {
    A.notNull(p, "p");
    A.notNull(usr, "username");
    A.notNull(grp, "grpName");

    enterBusy();

    try {
      if (mode(p) == PROXY) secondaryFileSystem().setOwner(toSecondary(p), usr, grp);
      else if (rmtClient.update(
              convert(p), F.asMap(IgfsUtils.PROP_USER_NAME, usr, IgfsUtils.PROP_GROUP_NAME, grp))
          == null) {
        throw new IOException(
            "Failed to set file permission (file not found?)"
                + " [path="
                + p
                + ", username="******", grpName="
                + grp
                + ']');
      }
    } finally {
      leaveBusy();
    }
  }
  /**
   * @param cctx Cache context.
   * @param prj Projection (optional).
   * @return Collection of data nodes in provided projection (if any).
   */
  private static Collection<ClusterNode> nodes(
      final GridCacheContext<?, ?> cctx,
      @Nullable final ClusterGroup prj,
      @Nullable final Integer part) {
    assert cctx != null;

    final AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion();

    Collection<ClusterNode> affNodes = CU.affinityNodes(cctx);

    if (prj == null && part == null) return affNodes;

    final Set<ClusterNode> owners =
        part == null
            ? Collections.<ClusterNode>emptySet()
            : new HashSet<>(cctx.topology().owners(part, topVer));

    return F.view(
        affNodes,
        new P1<ClusterNode>() {
          @Override
          public boolean apply(ClusterNode n) {

            return cctx.discovery().cacheAffinityNode(n, cctx.name())
                && (prj == null || prj.node(n.id()) != null)
                && (part == null || owners.contains(n));
          }
        });
  }
Пример #24
0
  /** {@inheritDoc} */
  @Override
  protected IgniteInternalFuture<Map<K, V>> getAllAsync(
      @Nullable Collection<? extends K> keys,
      boolean forcePrimary,
      boolean skipTx,
      @Nullable UUID subjId,
      String taskName,
      boolean deserializeBinary,
      boolean skipVals,
      boolean canRemap) {
    ctx.checkSecurity(SecurityPermission.CACHE_READ);

    if (F.isEmpty(keys)) return new GridFinishedFuture<>(Collections.<K, V>emptyMap());

    if (keyCheck) validateCacheKeys(keys);

    CacheOperationContext opCtx = ctx.operationContextPerCall();

    subjId = ctx.subjectIdPerCall(subjId, opCtx);

    return loadAsync(
        null,
        ctx.cacheKeysView(keys),
        forcePrimary,
        subjId,
        taskName,
        deserializeBinary,
        skipVals ? null : opCtx != null ? opCtx.expiry() : null,
        skipVals,
        opCtx != null && opCtx.skipStore(),
        canRemap);
  }
  /** {@inheritDoc} */
  @Override
  public final Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, T arg) {
    assert subgrid != null;
    assert !subgrid.isEmpty();

    Collection<? extends ComputeJob> jobs = split(subgrid.size(), arg);

    if (F.isEmpty(jobs)) throw new IgniteException("Split returned no jobs.");

    Map<ComputeJob, ClusterNode> map = U.newHashMap(jobs.size());

    for (ComputeJob job : jobs) {
      ClusterNode old = map.put(job, balancer.getBalancedNode(job, null));

      if (old != null)
        throw new IgniteException(
            "Failed to map task (same job instance is being mapped more than once) "
                + "[job="
                + job
                + ", task="
                + this
                + ']');
    }

    return map;
  }
  /**
   * Callback for backup update response.
   *
   * @param nodeId Backup node ID.
   * @param updateRes Update response.
   */
  public void onResult(UUID nodeId, GridDhtAtomicUpdateResponse updateRes) {
    if (log.isDebugEnabled())
      log.debug(
          "Received DHT atomic update future result [nodeId="
              + nodeId
              + ", updateRes="
              + updateRes
              + ']');

    if (updateRes.error() != null)
      this.updateRes.addFailedKeys(updateRes.failedKeys(), updateRes.error());

    if (!F.isEmpty(updateRes.nearEvicted())) {
      for (KeyCacheObject key : updateRes.nearEvicted()) {
        GridDhtCacheEntry entry = nearReadersEntries.get(key);

        try {
          entry.removeReader(nodeId, updateRes.messageId());
        } catch (GridCacheEntryRemovedException e) {
          if (log.isDebugEnabled())
            log.debug("Entry with evicted reader was removed [entry=" + entry + ", err=" + e + ']');
        }
      }
    }

    registerResponse(nodeId);
  }
Пример #27
0
  /**
   * @param gridName Grid name.
   * @return Cache configuration.
   * @throws Exception In case of error.
   */
  @SuppressWarnings("unchecked")
  protected CacheConfiguration cacheConfiguration(String gridName) throws Exception {
    CacheConfiguration cfg = defaultCacheConfiguration();

    CacheStore<?, ?> store = cacheStore();

    if (store != null) {
      cfg.setCacheStoreFactory(new TestStoreFactory());
      cfg.setReadThrough(true);
      cfg.setWriteThrough(true);
      cfg.setLoadPreviousValue(true);
    }

    cfg.setSwapEnabled(swapEnabled());
    cfg.setCacheMode(cacheMode());
    cfg.setAtomicityMode(atomicityMode());
    cfg.setWriteSynchronizationMode(writeSynchronization());
    cfg.setNearConfiguration(nearConfiguration());

    Class<?>[] idxTypes = indexedTypes();

    if (!F.isEmpty(idxTypes)) cfg.setIndexedTypes(idxTypes);

    if (cacheMode() == PARTITIONED) cfg.setBackups(1);

    return cfg;
  }
Пример #28
0
  /**
   * Starts multi-update lock. Will wait for topology future is ready.
   *
   * @return Topology version.
   * @throws IgniteCheckedException If failed.
   */
  public AffinityTopologyVersion beginMultiUpdate() throws IgniteCheckedException {
    IgniteBiTuple<IgniteUuid, GridDhtTopologyFuture> tup = multiTxHolder.get();

    if (tup != null)
      throw new IgniteCheckedException("Nested multi-update locks are not supported");

    top.readLock();

    GridDhtTopologyFuture topFut;

    AffinityTopologyVersion topVer;

    try {
      // While we are holding read lock, register lock future for partition release future.
      IgniteUuid lockId = IgniteUuid.fromUuid(ctx.localNodeId());

      topVer = top.topologyVersion();

      MultiUpdateFuture fut = new MultiUpdateFuture(topVer);

      MultiUpdateFuture old = multiTxFuts.putIfAbsent(lockId, fut);

      assert old == null;

      topFut = top.topologyVersionFuture();

      multiTxHolder.set(F.t(lockId, topFut));
    } finally {
      top.readUnlock();
    }

    topFut.get();

    return topVer;
  }
Пример #29
0
  /**
   * @param ctx Kernal context.
   * @param cfg Ignite configuration.
   * @param providers Plugin providers.
   */
  @SuppressWarnings("TypeMayBeWeakened")
  public IgnitePluginProcessor(
      GridKernalContext ctx, IgniteConfiguration cfg, List<PluginProvider> providers) {
    super(ctx);

    ExtensionRegistryImpl registry = new ExtensionRegistryImpl();

    for (PluginProvider provider : providers) {
      GridPluginContext pluginCtx = new GridPluginContext(ctx, cfg);

      if (F.isEmpty(provider.name())) throw new IgniteException("Plugin name can not be empty.");

      if (plugins.containsKey(provider.name()))
        throw new IgniteException("Duplicated plugin name: " + provider.name());

      plugins.put(provider.name(), provider);

      pluginCtxMap.put(provider, pluginCtx);

      provider.initExtensions(pluginCtx, registry);

      if (provider.plugin() == null) throw new IgniteException("Plugin is null.");
    }

    extensions = registry.createExtensionMap();
  }
Пример #30
0
  /**
   * Adds owned versions to map.
   *
   * @param vers Map of owned versions.
   */
  public void ownedVersions(Map<IgniteTxKey, GridCacheVersion> vers) {
    if (F.isEmpty(vers)) return;

    if (owned == null) owned = new GridLeanMap<>(vers.size());

    owned.putAll(vers);
  }