/**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testPrepareQueue() throws Exception {
    // Random sequence names.
    String queueName1 = UUID.randomUUID().toString();
    String queueName2 = UUID.randomUUID().toString();

    CollectionConfiguration colCfg = config(false);

    IgniteQueue queue1 = grid(0).queue(queueName1, 0, colCfg);
    IgniteQueue queue2 = grid(0).queue(queueName2, 0, colCfg);
    IgniteQueue queue3 = grid(0).queue(queueName1, 0, colCfg);

    assertNotNull(queue1);
    assertNotNull(queue2);
    assertNotNull(queue3);
    assert queue1.equals(queue3);
    assert queue3.equals(queue1);
    assert !queue3.equals(queue2);

    queue1.close();
    queue2.close();
    queue3.close();

    assertNull(grid(0).queue(queueName1, 0, null));
    assertNull(grid(0).queue(queueName2, 0, null));
  }
  /** @throws Exception If failed. */
  public void testDisabledRest() throws Exception {
    restEnabled = false;

    final Grid g = startGrid("disabled-rest");

    try {
      Thread.sleep(2 * TOP_REFRESH_FREQ);

      // As long as we have round robin load balancer this will cause every node to be queried.
      for (int i = 0; i < NODES_CNT + 1; i++)
        assertEquals(NODES_CNT + 1, client.compute().refreshTopology(false, false).size());

      final GridClientData data = client.data(PARTITIONED_CACHE_NAME);

      // Check rest-disabled node is unavailable.
      try {
        String affKey;

        do {
          affKey = UUID.randomUUID().toString();
        } while (!data.affinity(affKey).equals(g.localNode().id()));

        data.put(affKey, "asdf");

        assertEquals("asdf", cache(0, PARTITIONED_CACHE_NAME).get(affKey));
      } catch (GridServerUnreachableException e) {
        // Thrown for direct client-node connections.
        assertTrue(
            "Unexpected exception message: " + e.getMessage(),
            e.getMessage()
                .startsWith("No available endpoints to connect (is rest enabled for this node?)"));
      } catch (GridClientException e) {
        // Thrown for routed client-router-node connections.
        String msg = e.getMessage();

        assertTrue(
            "Unexpected exception message: " + msg,
            protocol() == GridClientProtocol.TCP
                ? msg.contains("No available endpoints to connect (is rest enabled for this node?)")
                : // TCP router.
                msg.startsWith(
                    "No available nodes on the router for destination node ID")); // HTTP router.
      }

      // Check rest-enabled nodes are available.
      String affKey;

      do {
        affKey = UUID.randomUUID().toString();
      } while (data.affinity(affKey).equals(g.localNode().id()));

      data.put(affKey, "fdsa");

      assertEquals("fdsa", cache(0, PARTITIONED_CACHE_NAME).get(affKey));
    } finally {
      restEnabled = true;

      G.stop(g.name(), true);
    }
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testAddUnbounded() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    String val = UUID.randomUUID().toString();

    IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false));

    assert queue.add(val);

    assert val.equals(queue.poll());
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testAddDeleteUnbounded() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    String val = UUID.randomUUID().toString();

    IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false));

    assert queue.add(val);

    assert queue.remove(val);

    assert queue.isEmpty();
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testPutRemoveMultiThreadedUnbounded() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    final IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false));

    multithreaded(
        new Callable<String>() {
          @Override
          public String call() throws Exception {
            String thread = Thread.currentThread().getName();

            for (int i = 0; i < QUEUE_CAPACITY; i++) queue.put(thread);

            info("Finished loop 1: " + thread);

            queue.clear();

            info("Cleared queue 1: " + thread);

            return "";
          }
        },
        THREAD_NUM);

    assert queue.isEmpty() : "Queue must be empty. " + queue.size();
  }
  /** @throws Exception If failed. */
  public void testClientAffinity() throws Exception {
    GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME);

    Collection<Object> keys = new ArrayList<>();

    keys.addAll(Arrays.asList(Boolean.TRUE, Boolean.FALSE, 1, Integer.MAX_VALUE));

    Random rnd = new Random();
    StringBuilder sb = new StringBuilder();

    // Generate some random strings.
    for (int i = 0; i < 100; i++) {
      sb.setLength(0);

      for (int j = 0; j < 255; j++)
        // Only printable ASCII symbols for test.
        sb.append((char) (rnd.nextInt(0x7f - 0x20) + 0x20));

      keys.add(sb.toString());
    }

    // Generate some more keys to achieve better coverage.
    for (int i = 0; i < 100; i++) keys.add(UUID.randomUUID());

    for (Object key : keys) {
      UUID nodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id();

      UUID clientNodeId = partitioned.affinity(key);

      assertEquals(
          "Invalid affinity mapping for REST response for key: " + key, nodeId, clientNodeId);
    }
  }
  /** @throws Exception If failed. */
  public void testCreateFileColocated() throws Exception {
    GridGgfsPath path = new GridGgfsPath("/colocated");

    UUID uuid = UUID.randomUUID();

    GridUuid affKey;

    long idx = 0;

    while (true) {
      affKey = new GridUuid(uuid, idx);

      if (grid(0).mapKeyToNode(DATA_CACHE_NAME, affKey).id().equals(grid(0).localNode().id()))
        break;

      idx++;
    }

    try (GridGgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) {
      // Write 5M, should be enough to test distribution.
      for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]);
    }

    GridGgfsFile info = fs.info(path);

    Collection<GridGgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length());

    assertEquals(1, affNodes.size());
    Collection<UUID> nodeIds = F.first(affNodes).nodeIds();

    assertEquals(1, nodeIds.size());
    assertEquals(grid(0).localNode().id(), F.first(nodeIds));
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testPutGetMultithreadUnbounded() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    final IgniteQueue<String> queue = grid(0).queue(queueName, QUEUE_CAPACITY, config(false));

    multithreaded(
        new Callable<Void>() {
          @Override
          public Void call() throws Exception {
            String thName = Thread.currentThread().getName();

            for (int i = 0; i < 5; i++) {
              queue.put(thName);
              queue.peek();
              queue.take();
            }

            return null;
          }
        },
        THREAD_NUM);

    assert queue.isEmpty() : queue.size();
  }
    /** {@inheritDoc} */
    @SuppressWarnings({"unchecked"})
    @Override
    public boolean equals(Object obj) {
      if (obj == this) return true;

      CancelMessageId other = (CancelMessageId) obj;

      return reqId == other.reqId && nodeId.equals(other.nodeId);
    }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testIterator() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false));

    for (int i = 0; i < 100; i++) assert queue.add(Integer.toString(i));

    Iterator<String> iter1 = queue.iterator();

    int cnt = 0;
    for (int i = 0; i < 100; i++) {
      assertNotNull(iter1.next());
      cnt++;
    }

    assertEquals(100, queue.size());
    assertEquals(100, cnt);

    assertNotNull(queue.take());
    assertNotNull(queue.take());
    assertTrue(queue.remove("33"));
    assertTrue(queue.remove("77"));

    assertEquals(96, queue.size());

    Iterator<String> iter2 = queue.iterator();

    try {
      iter2.remove();
    } catch (IllegalStateException e) {
      info("Caught expected exception: " + e);
    }

    iter2.next();
    iter2.remove();

    cnt = 0;
    while (iter2.hasNext()) {
      assertNotNull(iter2.next());
      cnt++;
    }

    assertEquals(95, cnt);
    assertEquals(95, queue.size());

    iter2.remove();
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testAddPollUnbounded() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false));

    assert queue.add("1");

    assert queue.add("2");

    assert queue.add("3");

    assertEquals("1", queue.poll());
    assertEquals("2", queue.poll());
    assertEquals("3", queue.poll());
  }
  /** @throws Exception If failed. */
  public void testAffinityPut() throws Exception {
    Thread.sleep(2 * TOP_REFRESH_FREQ);

    assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size());

    Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT);

    GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME);

    GridClientCompute compute = client.compute();

    for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i));

    for (int i = 0; i < 100; i++) {
      String key = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id();

      assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key));

      assertEquals(primaryNodeId, partitioned.affinity(key));

      // Must go to primary node only. Since backup count is 0, value must present on
      // primary node only.
      partitioned.put(key, "val" + key);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key);

        if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val);
        else assertNull(val);
      }
    }

    // Now check that we will see value in near cache in pinned mode.
    for (int i = 100; i < 200; i++) {
      String pinnedKey = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id();

      UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId)));

      GridClientNode node = compute.node(pinnedNodeId);

      partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey);

        if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey()))
          assertEquals("val" + pinnedKey, val);
        else assertNull(val);
      }
    }
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testPutGetUnbounded() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    IgniteQueue<String> queue = grid(0).queue(queueName, QUEUE_CAPACITY, config(false));

    String thName = Thread.currentThread().getName();

    for (int i = 0; i < 5; i++) {
      queue.put(thName);
      queue.peek();
      queue.take();
    }

    assert queue.isEmpty() : queue.size();
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testPutRemoveUnbounded() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false));

    String thread = Thread.currentThread().getName();

    for (int i = 0; i < QUEUE_CAPACITY; i++) queue.put(thread);

    info("Finished loop 1: " + thread);

    queue.clear();

    info("Cleared queue 1: " + thread);

    assert queue.isEmpty() : "Queue must be empty. " + queue.size();
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testAddPeekUnbounded() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false));

    String item1 = "1";
    assert queue.add(item1);

    String item2 = "2";
    assert queue.add(item2);

    String item3 = "3";
    assert queue.add(item3);

    assert item1.equals(queue.peek());
    assert item1.equals(queue.peek());
    assert !item2.equals(queue.peek());
  }
public final class KafkaEventAdapter implements InputEventAdapter {

  private static final Log log = LogFactory.getLog(KafkaEventAdapter.class);
  private final InputEventAdapterConfiguration eventAdapterConfiguration;
  private final Map<String, String> globalProperties;
  private InputEventAdapterListener eventAdaptorListener;
  private final String id = UUID.randomUUID().toString();
  private int tenantId;
  private ConsumerKafkaAdaptor consumerKafkaAdaptor;

  public KafkaEventAdapter(
      InputEventAdapterConfiguration eventAdapterConfiguration,
      Map<String, String> globalProperties) {
    this.eventAdapterConfiguration = eventAdapterConfiguration;
    this.globalProperties = globalProperties;
  }

  @Override
  public void init(InputEventAdapterListener eventAdaptorListener)
      throws InputEventAdapterException {
    this.eventAdaptorListener = eventAdaptorListener;
  }

  @Override
  public void testConnect() throws TestConnectionNotSupportedException {
    throw new TestConnectionNotSupportedException("not-supported");
  }

  @Override
  public void connect() {
    tenantId = PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantId(true);
    createKafkaAdaptorListener(eventAdaptorListener, eventAdapterConfiguration);
  }

  @Override
  public void disconnect() {
    if (consumerKafkaAdaptor != null) {
      consumerKafkaAdaptor.shutdown();
      String topic =
          eventAdapterConfiguration
              .getProperties()
              .get(KafkaEventAdapterConstants.ADAPTER_MESSAGE_TOPIC);
      log.debug("Adapter " + eventAdapterConfiguration.getName() + " disconnected " + topic);
    }
  }

  @Override
  public void destroy() {}

  public InputEventAdapterListener getEventAdaptorListener() {
    return eventAdaptorListener;
  }

  @Override
  public int hashCode() {
    return id.hashCode();
  }

  private static ConsumerConfig createConsumerConfig(
      String zookeeper, String groupId, String optionalConfigs) {
    try {
      Properties props = new Properties();
      props.put(KafkaEventAdapterConstants.ADAPTOR_SUSCRIBER_ZOOKEEPER_CONNECT, zookeeper);
      props.put(KafkaEventAdapterConstants.ADAPTOR_SUSCRIBER_GROUP_ID, groupId);

      if (optionalConfigs != null) {
        String[] optionalProperties = optionalConfigs.split(",");

        if (optionalProperties != null && optionalProperties.length > 0) {
          for (String header : optionalProperties) {
            String[] configPropertyWithValue = header.split(":", 2);
            if (configPropertyWithValue.length == 2) {
              props.put(configPropertyWithValue[0], configPropertyWithValue[1]);
            } else {
              log.warn(
                  "Optional configuration property not defined in the correct format.\nRequired - property_name1:property_value1,property_name2:property_value2\nFound - "
                      + optionalConfigs);
            }
          }
        }
      }
      return new ConsumerConfig(props);
    } catch (NoClassDefFoundError e) {
      throw new InputEventAdapterRuntimeException(
          "Cannot access kafka context due to missing jars", e);
    }
  }

  private void createKafkaAdaptorListener(
      InputEventAdapterListener inputEventAdapterListener,
      InputEventAdapterConfiguration inputEventAdapterConfiguration) {

    Map<String, String> brokerProperties = new HashMap<String, String>();
    brokerProperties.putAll(inputEventAdapterConfiguration.getProperties());
    String zkConnect =
        brokerProperties.get(KafkaEventAdapterConstants.ADAPTOR_SUSCRIBER_ZOOKEEPER_CONNECT);
    String groupID = brokerProperties.get(KafkaEventAdapterConstants.ADAPTOR_SUSCRIBER_GROUP_ID);
    String threadsStr = brokerProperties.get(KafkaEventAdapterConstants.ADAPTOR_SUSCRIBER_THREADS);
    String optionalConfiguration =
        brokerProperties.get(KafkaEventAdapterConstants.ADAPTOR_OPTIONAL_CONFIGURATION_PROPERTIES);
    int threads = Integer.parseInt(threadsStr);

    String topic =
        inputEventAdapterConfiguration
            .getProperties()
            .get(KafkaEventAdapterConstants.ADAPTOR_SUSCRIBER_TOPIC);

    consumerKafkaAdaptor =
        new ConsumerKafkaAdaptor(
            topic,
            tenantId,
            KafkaEventAdapter.createConsumerConfig(zkConnect, groupID, optionalConfiguration));
    consumerKafkaAdaptor.run(threads, inputEventAdapterListener);
  }

  @Override
  public boolean isEventDuplicatedInCluster() {
    return Boolean.parseBoolean(
        eventAdapterConfiguration
            .getProperties()
            .get(EventAdapterConstants.EVENTS_DUPLICATED_IN_CLUSTER));
  }

  @Override
  public boolean isPolling() {
    return true;
  }
}
  /**
   * @param updateSeq Update sequence.
   * @return Checks if any of the local partitions need to be evicted.
   */
  private boolean checkEvictions(long updateSeq) {
    assert lock.isWriteLockedByCurrentThread();

    boolean changed = false;

    UUID locId = cctx.nodeId();

    for (GridDhtLocalPartition part : locParts.values()) {
      GridDhtPartitionState state = part.state();

      if (state.active()) {
        int p = part.id();

        List<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer);

        if (!affNodes.contains(cctx.localNode())) {
          Collection<UUID> nodeIds = F.nodeIds(nodes(p, topVer, OWNING));

          // If all affinity nodes are owners, then evict partition from local node.
          if (nodeIds.containsAll(F.nodeIds(affNodes))) {
            part.rent(false);

            updateLocal(part.id(), locId, part.state(), updateSeq);

            changed = true;

            if (log.isDebugEnabled())
              log.debug("Evicted local partition (all affinity nodes are owners): " + part);
          } else {
            int ownerCnt = nodeIds.size();
            int affCnt = affNodes.size();

            if (ownerCnt > affCnt) {
              List<ClusterNode> sorted = new ArrayList<>(cctx.discovery().nodes(nodeIds));

              // Sort by node orders in ascending order.
              Collections.sort(sorted, CU.nodeComparator(true));

              int diff = sorted.size() - affCnt;

              for (int i = 0; i < diff; i++) {
                ClusterNode n = sorted.get(i);

                if (locId.equals(n.id())) {
                  part.rent(false);

                  updateLocal(part.id(), locId, part.state(), updateSeq);

                  changed = true;

                  if (log.isDebugEnabled())
                    log.debug(
                        "Evicted local partition (this node is oldest non-affinity node): " + part);

                  break;
                }
              }
            }
          }
        }
      }
    }

    return changed;
  }
 public RepairSession(Range range, String tablename, String... cfnames) {
   this("manual-repair-" + UUID.randomUUID(), range, tablename, cfnames);
 }
  /**
   * Updates value for single partition.
   *
   * @param p Partition.
   * @param nodeId Node ID.
   * @param state State.
   * @param updateSeq Update sequence.
   */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  private void updateLocal(int p, UUID nodeId, GridDhtPartitionState state, long updateSeq) {
    assert lock.isWriteLockedByCurrentThread();
    assert nodeId.equals(cctx.nodeId());

    // In case if node joins, get topology at the time of joining node.
    ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx.shared(), topVer);

    assert oldest != null;

    // If this node became the oldest node.
    if (oldest.id().equals(cctx.nodeId())) {
      long seq = node2part.updateSequence();

      if (seq != updateSeq) {
        if (seq > updateSeq) {
          if (this.updateSeq.get() < seq) {
            // Update global counter if necessary.
            boolean b = this.updateSeq.compareAndSet(this.updateSeq.get(), seq + 1);

            assert b
                : "Invalid update sequence [updateSeq="
                    + updateSeq
                    + ", seq="
                    + seq
                    + ", curUpdateSeq="
                    + this.updateSeq.get()
                    + ", node2part="
                    + node2part.toFullString()
                    + ']';

            updateSeq = seq + 1;
          } else updateSeq = seq;
        }

        node2part.updateSequence(updateSeq);
      }
    }

    GridDhtPartitionMap map = node2part.get(nodeId);

    if (map == null)
      node2part.put(
          nodeId,
          map =
              new GridDhtPartitionMap(
                  nodeId,
                  updateSeq,
                  Collections.<Integer, GridDhtPartitionState>emptyMap(),
                  false));

    map.updateSequence(updateSeq);

    map.put(p, state);

    Set<UUID> ids = part2node.get(p);

    if (ids == null) part2node.put(p, ids = U.newHashSet(3));

    ids.add(nodeId);
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testCollectionMethods() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    IgniteQueue<SameHashItem> queue = grid(0).queue(queueName, 0, config(false));

    int retries = 100;

    // Initialize queue.
    for (int i = 0; i < retries; i++)
      queue.addAll(
          Arrays.asList(
              new SameHashItem(Integer.toString(i)), new SameHashItem(Integer.toString(i))));

    // Get arrays from queue.
    assertEquals(retries * 2, queue.toArray().length);

    SameHashItem[] arr2 = new SameHashItem[retries * 3];

    Object[] arr3 = queue.toArray(arr2);

    assertEquals(arr2, arr3);
    assertEquals(arr3[0], new SameHashItem("0"));

    // Check queue items.
    assertEquals(retries * 2, queue.size());

    assertTrue(queue.contains(new SameHashItem(Integer.toString(14))));

    assertFalse(queue.contains(new SameHashItem(Integer.toString(144))));

    Collection<SameHashItem> col1 =
        Arrays.asList(
            new SameHashItem(Integer.toString(14)),
            new SameHashItem(Integer.toString(14)),
            new SameHashItem(Integer.toString(18)));

    assertTrue(queue.containsAll(col1));

    Collection<SameHashItem> col2 =
        Arrays.asList(
            new SameHashItem(Integer.toString(245)),
            new SameHashItem(Integer.toString(14)),
            new SameHashItem(Integer.toString(18)));

    assertFalse(queue.containsAll(col2));

    // Try to remove item.
    assertTrue(queue.remove(new SameHashItem(Integer.toString(14))));

    assertEquals((retries * 2) - 1, queue.size());

    assertTrue(queue.contains(new SameHashItem(Integer.toString(14))));

    assertTrue(queue.remove(new SameHashItem(Integer.toString(14))));

    assertEquals((retries - 1) * 2, queue.size());

    assertFalse(queue.remove(new SameHashItem(Integer.toString(14))));

    // Try to remove some items.
    assertTrue(queue.contains(new SameHashItem(Integer.toString(33))));

    assertTrue(
        queue.removeAll(
            Arrays.asList(
                new SameHashItem(Integer.toString(15)),
                new SameHashItem(Integer.toString(14)),
                new SameHashItem(Integer.toString(33)),
                new SameHashItem(Integer.toString(1)))));

    assertFalse(queue.contains(new SameHashItem(Integer.toString(33))));

    // Try to retain all items.
    assertTrue(
        queue.retainAll(
            Arrays.asList(
                new SameHashItem(Integer.toString(15)),
                new SameHashItem(Integer.toString(14)),
                new SameHashItem(Integer.toString(33)),
                new SameHashItem(Integer.toString(1)))));

    assertFalse(queue.contains(new SameHashItem(Integer.toString(2))));

    assert queue.isEmpty();
  }
 /** {@inheritDoc} */
 @Override
 public int hashCode() {
   return 31 * ((int) (reqId ^ (reqId >>> 32))) + nodeId.hashCode();
 }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testQueueRemoveMultithreadBounded() throws Exception {
    // Random queue name.
    final String queueName = UUID.randomUUID().toString();

    final IgniteQueue<String> queue = grid(0).queue(queueName, QUEUE_CAPACITY, config(false));

    final CountDownLatch putLatch = new CountDownLatch(THREAD_NUM);

    final CountDownLatch clearLatch = new CountDownLatch(THREAD_NUM);

    for (int t = 0; t < THREAD_NUM; t++) {
      Thread th =
          new Thread(
              new Runnable() {
                @Override
                public void run() {
                  if (log.isDebugEnabled())
                    log.debug("Thread has been started." + Thread.currentThread().getName());

                  try {
                    // Thread must be blocked on put operation.
                    for (int i = 0; i < (QUEUE_CAPACITY * THREAD_NUM); i++)
                      queue.offer("anything", 3, TimeUnit.MINUTES);

                    fail("Queue failed");
                  } catch (IgniteException | IllegalStateException e) {
                    putLatch.countDown();

                    assert e.getMessage().contains("removed");

                    assert queue.removed();
                  }

                  if (log.isDebugEnabled())
                    log.debug("Thread has been stopped." + Thread.currentThread().getName());
                }
              });
      th.start();
    }

    for (int t = 0; t < THREAD_NUM; t++) {
      Thread th =
          new Thread(
              new Runnable() {
                @Override
                public void run() {
                  try {
                    IgniteQueue<String> queue = grid(0).queue(queueName, 0, null);

                    if (queue != null) queue.close();
                  } catch (Exception e) {
                    fail("Unexpected exception: " + e);
                  } finally {
                    clearLatch.countDown();
                  }
                }
              });
      th.start();
    }

    assert putLatch.await(3, TimeUnit.MINUTES);

    assert clearLatch.await(3, TimeUnit.MINUTES);

    try {
      assert queue.isEmpty() : queue.size();

      fail("Queue must be removed.");
    } catch (IgniteException | IllegalStateException e) {
      assert e.getMessage().contains("removed");

      assert queue.removed();
    }
  }