Ejemplo n.º 1
0
  public void testIteration() {
    assertTrue(_nbhm.isEmpty());
    assertThat(_nbhm.put("k1", "v1"), nullValue());
    assertThat(_nbhm.put("k2", "v2"), nullValue());

    String str1 = "";
    for (Iterator<Map.Entry<String, String>> i = _nbhm.entrySet().iterator(); i.hasNext(); ) {
      Map.Entry<String, String> e = i.next();
      str1 += e.getKey();
    }
    assertThat("found all entries", str1, anyOf(is("k1k2"), is("k2k1")));

    String str2 = "";
    for (Iterator<String> i = _nbhm.keySet().iterator(); i.hasNext(); ) {
      String key = i.next();
      str2 += key;
    }
    assertThat("found all keys", str2, anyOf(is("k1k2"), is("k2k1")));

    String str3 = "";
    for (Iterator<String> i = _nbhm.values().iterator(); i.hasNext(); ) {
      String val = i.next();
      str3 += val;
    }
    assertThat("found all vals", str3, anyOf(is("v1v2"), is("v2v1")));

    assertThat(
        "toString works", _nbhm.toString(), anyOf(is("{k1=v1, k2=v2}"), is("{k2=v2, k1=v1}")));
  }
Ejemplo n.º 2
0
 @SuppressWarnings("unchecked")
 private void addFields(
     DataConfig.Entity entity, DocWrapper doc, Map<String, Object> arow, VariableResolver vr) {
   for (Map.Entry<String, Object> entry : arow.entrySet()) {
     String key = entry.getKey();
     Object value = entry.getValue();
     if (value == null) continue;
     if (key.startsWith("$")) continue;
     List<DataConfig.Field> field = entity.colNameVsField.get(key);
     if (field == null && dataImporter.getSchema() != null) {
       // This can be a dynamic field or a field which does not have an entry in data-config ( an
       // implicit field)
       SchemaField sf = dataImporter.getSchema().getFieldOrNull(key);
       if (sf == null) {
         sf = dataImporter.getConfig().lowerNameVsSchemaField.get(key.toLowerCase(Locale.ENGLISH));
       }
       if (sf != null) {
         addFieldToDoc(entry.getValue(), sf.getName(), 1.0f, sf.multiValued(), doc);
       }
       // else do nothing. if we add it it may fail
     } else {
       if (field != null) {
         for (DataConfig.Field f : field) {
           String name = f.getName();
           if (f.dynamicName) {
             name = vr.replaceTokens(name);
           }
           if (f.toWrite) addFieldToDoc(entry.getValue(), name, f.boost, f.multiValued, doc);
         }
       }
     }
   }
 }
Ejemplo n.º 3
0
    public void prepare(String query, InetAddress toExclude) throws InterruptedException {
      for (Map.Entry<Host, HostConnectionPool> entry : pools.entrySet()) {
        if (entry.getKey().getAddress().equals(toExclude)) continue;

        // Let's not wait too long if we can't get a connection. Things
        // will fix themselves once the user tries a query anyway.
        Connection c = null;
        try {
          c = entry.getValue().borrowConnection(200, TimeUnit.MILLISECONDS);
          c.write(new PrepareMessage(query)).get();
        } catch (ConnectionException e) {
          // Again, not being able to prepare the query right now is no big deal, so just ignore
        } catch (BusyConnectionException e) {
          // Same as above
        } catch (TimeoutException e) {
          // Same as above
        } catch (ExecutionException e) {
          // We shouldn't really get exception while preparing a
          // query, so log this (but ignore otherwise as it's not a big deal)
          logger.error(
              String.format(
                  "Unexpected error while preparing query (%s) on %s", query, entry.getKey()),
              e);
        } finally {
          if (c != null) entry.getValue().returnConnection(c);
        }
      }
    }
Ejemplo n.º 4
0
  /** Clears swap entries for evicted partition. */
  private void clearSwap() {
    assert state() == EVICTED;
    assert !GridQueryProcessor.isEnabled(cctx.config())
        : "Indexing needs to have unswapped values.";

    try {
      GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> it = cctx.swap().iterator(id);

      boolean isLocStore = cctx.store().isLocal();

      if (it != null) {
        // We can safely remove these values because no entries will be created for evicted
        // partition.
        while (it.hasNext()) {
          Map.Entry<byte[], GridCacheSwapEntry> entry = it.next();

          byte[] keyBytes = entry.getKey();

          KeyCacheObject key = cctx.toCacheKeyObject(keyBytes);

          cctx.swap().remove(key);

          if (isLocStore) cctx.store().remove(null, key.value(cctx.cacheObjectContext(), false));
        }
      }
    } catch (IgniteCheckedException e) {
      U.error(log, "Failed to clear swap for evicted partition: " + this, e);
    }
  }
Ejemplo n.º 5
0
 private void deferExpired() {
   for (Map.Entry<String, JedisPool> entry : jedisPools.entrySet()) {
     JedisPool jedisPool = entry.getValue();
     try {
       Jedis jedis = jedisPool.getResource();
       try {
         for (Node node : new HashSet<Node>(getRegistered())) {
           String key = NodeRegistryUtils.getNodeTypePath(clusterName, node.getNodeType());
           if (jedis.hset(
                   key, node.toFullString(), String.valueOf(SystemClock.now() + expirePeriod))
               == 1) {
             jedis.publish(key, Constants.REGISTER);
           }
         }
         if (lock.acquire(jedis)) {
           clean(jedis);
         }
         if (!replicate) {
           break; //  如果服务器端已同步数据,只需写入单台机器
         }
       } finally {
         jedis.close();
       }
     } catch (Throwable t) {
       LOGGER.warn(
           "Failed to write provider heartbeat to redis registry. registry: "
               + entry.getKey()
               + ", cause: "
               + t.getMessage(),
           t);
     }
   }
 }
Ejemplo n.º 6
0
 @Override
 public void destroy() {
   super.destroy();
   try {
     expireFuture.cancel(true);
   } catch (Throwable t) {
     LOGGER.warn(t.getMessage(), t);
   }
   try {
     for (Notifier notifier : notifiers.values()) {
       notifier.shutdown();
     }
   } catch (Throwable t) {
     LOGGER.warn(t.getMessage(), t);
   }
   for (Map.Entry<String, JedisPool> entry : jedisPools.entrySet()) {
     JedisPool jedisPool = entry.getValue();
     try {
       jedisPool.destroy();
     } catch (Throwable t) {
       LOGGER.warn(
           "Failed to destroy the redis registry client. registry: "
               + entry.getKey()
               + ", cause: "
               + t.getMessage(),
           t);
     }
   }
 }
  /** Checks consistency after all operations. */
  private void consistencyCheck() {
    if (CONSISTENCY_CHECK) {
      assert lock.writeLock().isHeldByCurrentThread();

      if (node2part == null) return;

      for (Map.Entry<UUID, GridDhtPartitionMap> e : node2part.entrySet()) {
        for (Integer p : e.getValue().keySet()) {
          Set<UUID> nodeIds = part2node.get(p);

          assert nodeIds != null
              : "Failed consistency check [part=" + p + ", nodeId=" + e.getKey() + ']';
          assert nodeIds.contains(e.getKey())
              : "Failed consistency check [part="
                  + p
                  + ", nodeId="
                  + e.getKey()
                  + ", nodeIds="
                  + nodeIds
                  + ']';
        }
      }

      for (Map.Entry<Integer, Set<UUID>> e : part2node.entrySet()) {
        for (UUID nodeId : e.getValue()) {
          GridDhtPartitionMap map = node2part.get(nodeId);

          assert map != null
              : "Failed consistency check [part=" + e.getKey() + ", nodeId=" + nodeId + ']';
          assert map.containsKey(e.getKey())
              : "Failed consistency check [part=" + e.getKey() + ", nodeId=" + nodeId + ']';
        }
      }
    }
  }
Ejemplo n.º 8
0
    public Object call() {
      final ScriptEngineManager scriptEngineManager = new ScriptEngineManager();
      ScriptEngine e = scriptEngineManager.getEngineByName("javascript");
      if (map != null) {
        for (Map.Entry<String, Object> entry : map.entrySet()) {
          e.put(entry.getKey(), entry.getValue());
        }
      }
      e.put("hazelcast", hazelcastInstance);
      try {
        // For new JavaScript engine called Nashorn we need the compatibility script
        if (e.getFactory().getEngineName().toLowerCase().contains("nashorn")) {
          e.eval("load('nashorn:mozilla_compat.js');");
        }

        e.eval("importPackage(java.lang);");
        e.eval("importPackage(java.util);");
        e.eval("importPackage(com.hazelcast.core);");
        e.eval("importPackage(com.hazelcast.config);");
        e.eval("importPackage(java.util.concurrent);");
        e.eval("importPackage(org.junit);");

        return e.eval(script);
      } catch (ScriptException e1) {
        throw new RuntimeException(e1);
      }
    }
Ejemplo n.º 9
0
 public UnfilteredRowIterator next() {
   Map.Entry<PartitionPosition, AtomicBTreePartition> entry = iter.next();
   // Actual stored key should be true DecoratedKey
   assert entry.getKey() instanceof DecoratedKey;
   DecoratedKey key = (DecoratedKey) entry.getKey();
   ClusteringIndexFilter filter = dataRange.clusteringIndexFilter(key);
   return filter.getUnfilteredRowIterator(columnFilter, entry.getValue());
 }
Ejemplo n.º 10
0
  @Override
  protected void doSubscribe(Node node, NotifyListener listener) {

    List<NodeType> listenNodeTypes = node.getListenNodeTypes();
    if (CollectionUtils.isEmpty(listenNodeTypes)) {
      return;
    }
    for (NodeType listenNodeType : listenNodeTypes) {
      String listenNodePath = NodeRegistryUtils.getNodeTypePath(clusterName, listenNodeType);

      Notifier notifier = notifiers.get(listenNodePath);
      if (notifier == null) {
        Notifier newNotifier = new Notifier(listenNodePath);
        notifiers.putIfAbsent(listenNodePath, newNotifier);
        notifier = notifiers.get(listenNodePath);
        if (notifier == newNotifier) {
          notifier.start();
        }
      }

      boolean success = false;
      NodeRegistryException exception = null;
      for (Map.Entry<String, JedisPool> entry : jedisPools.entrySet()) {
        JedisPool jedisPool = entry.getValue();
        try {
          Jedis jedis = jedisPool.getResource();
          try {
            doNotify(
                jedis,
                Collections.singletonList(listenNodePath),
                Collections.singletonList(listener));
            success = true;
            break; // 只需读一个服务器的数据

          } finally {
            jedis.close();
          }
        } catch (Throwable t) {
          exception =
              new NodeRegistryException(
                  "Failed to unregister node to redis registry. registry: "
                      + entry.getKey()
                      + ", node: "
                      + node
                      + ", cause: "
                      + t.getMessage(),
                  t);
        }
      }
      if (exception != null) {
        if (success) {
          LOGGER.warn(exception.getMessage(), exception);
        } else {
          throw exception;
        }
      }
    }
  }
Ejemplo n.º 11
0
 private int findMinLocalDeletionTime(
     Iterator<Map.Entry<PartitionPosition, AtomicBTreePartition>> iterator) {
   int minLocalDeletionTime = Integer.MAX_VALUE;
   while (iterator.hasNext()) {
     Map.Entry<PartitionPosition, AtomicBTreePartition> entry = iterator.next();
     minLocalDeletionTime =
         Math.min(minLocalDeletionTime, entry.getValue().stats().minLocalDeletionTime);
   }
   return minLocalDeletionTime;
 }
Ejemplo n.º 12
0
 public void addEnvToIntent(Intent intent) {
   Map<String, String> envMap = System.getenv();
   Set<Map.Entry<String, String>> envSet = envMap.entrySet();
   Iterator<Map.Entry<String, String>> envIter = envSet.iterator();
   int c = 0;
   while (envIter.hasNext()) {
     Map.Entry<String, String> entry = envIter.next();
     intent.putExtra("env" + c, entry.getKey() + "=" + entry.getValue());
     c++;
   }
 }
  /**
   * Updates partition map in all caches.
   *
   * @param msg Partitions single message.
   */
  private void updatePartitionSingleMap(GridDhtPartitionsSingleMessage msg) {
    for (Map.Entry<Integer, GridDhtPartitionMap> entry : msg.partitions().entrySet()) {
      Integer cacheId = entry.getKey();
      GridCacheContext cacheCtx = cctx.cacheContext(cacheId);

      GridDhtPartitionTopology top =
          cacheCtx != null ? cacheCtx.topology() : cctx.exchange().clientTopology(cacheId, this);

      top.update(exchId, entry.getValue());
    }
  }
Ejemplo n.º 14
0
 protected void handleMapEntries() {
   Set set = getMap().entrySet();
   Iterator it = set.iterator();
   int count = 0;
   long time = Clock.currentTimeMillis();
   while (it.hasNext()) {
     count++;
     Map.Entry entry = (Entry) it.next();
     println(entry.getKey() + " : " + entry.getValue());
   }
   println("Total " + count);
 }
  protected List<Object> executeInConcurrency(
      SqlSessionCallback action, SortedMap<String, DataSource> dsMap) {
    List<ConcurrentShardRequest> requests = Lists.newArrayList();

    for (Map.Entry<String, DataSource> entry : dsMap.entrySet()) {
      ConcurrentShardRequest request = new ConcurrentShardRequest();
      request.setAction(action);
      request.setDataSource(entry.getValue());
      request.setExecutor(dataSourceSpecificExecutors.get(entry.getKey()));
      requests.add(request);
    }
    return getConcurrentShardRequestProcessor().process(requests);
  }
  /** lowerEntry returns preceding entry. */
  public void testLowerEntry() {
    ConcurrentNavigableMap map = map5();
    Map.Entry e1 = map.lowerEntry(three);
    assertEquals(two, e1.getKey());

    Map.Entry e2 = map.lowerEntry(six);
    assertEquals(five, e2.getKey());

    Map.Entry e3 = map.lowerEntry(one);
    assertNull(e3);

    Map.Entry e4 = map.lowerEntry(zero);
    assertNull(e4);
  }
  /** higherEntry returns next entry. */
  public void testDescendingHigherEntry() {
    ConcurrentNavigableMap map = dmap5();
    Map.Entry e1 = map.higherEntry(m3);
    assertEquals(m4, e1.getKey());

    Map.Entry e2 = map.higherEntry(zero);
    assertEquals(m1, e2.getKey());

    Map.Entry e3 = map.higherEntry(m5);
    assertNull(e3);

    Map.Entry e4 = map.higherEntry(m6);
    assertNull(e4);
  }
  /** higherEntry returns next entry. */
  public void testHigherEntry() {
    ConcurrentNavigableMap map = map5();
    Map.Entry e1 = map.higherEntry(three);
    assertEquals(four, e1.getKey());

    Map.Entry e2 = map.higherEntry(zero);
    assertEquals(one, e2.getKey());

    Map.Entry e3 = map.higherEntry(five);
    assertNull(e3);

    Map.Entry e4 = map.higherEntry(six);
    assertNull(e4);
  }
Ejemplo n.º 19
0
  void doStatusCheck() {
    long now = System.currentTimeMillis();

    Set<InetAddress> eps = endpointStateMap_.keySet();
    for (InetAddress endpoint : eps) {
      if (endpoint.equals(localEndpoint_)) continue;

      FailureDetector.instance.interpret(endpoint);
      EndpointState epState = endpointStateMap_.get(endpoint);
      if (epState != null) {
        long duration = now - epState.getUpdateTimestamp();

        if (StorageService.instance.getTokenMetadata().isMember(endpoint))
          epState.setHasToken(true);
        // check if this is a fat client. fat clients are removed automatically from
        // gosip after FatClientTimeout
        if (!epState.getHasToken()
            && !epState.isAlive()
            && !justRemovedEndpoints_.containsKey(endpoint)
            && (duration > FatClientTimeout_)) {
          logger_.info(
              "FatClient "
                  + endpoint
                  + " has been silent for "
                  + FatClientTimeout_
                  + "ms, removing from gossip");
          removeEndpoint(
              endpoint); // will put it in justRemovedEndpoints to respect quarantine delay
          evictFromMembership(endpoint); // can get rid of the state immediately
        }

        if (!epState.isAlive() && (duration > aVeryLongTime_)) {
          evictFromMembership(endpoint);
        }
      }
    }

    if (!justRemovedEndpoints_.isEmpty()) {
      Map<InetAddress, Long> copy = new HashMap<InetAddress, Long>(justRemovedEndpoints_);
      for (Map.Entry<InetAddress, Long> entry : copy.entrySet()) {
        if ((now - entry.getValue()) > QUARANTINE_DELAY) {
          if (logger_.isDebugEnabled())
            logger_.debug(
                QUARANTINE_DELAY + " elapsed, " + entry.getKey() + " gossip quarantine over");
          justRemovedEndpoints_.remove(entry.getKey());
        }
      }
    }
  }
Ejemplo n.º 20
0
  public void run() {
    // each file is processed into a local hash table and then merged with the global results
    // this will cause much less contention on the global table, but still avoids a sequential
    // update
    Hashtable<String, Integer> local_results =
        new Hashtable<String, Integer>(WordCountJ.HASH_SIZE, WordCountJ.LF);
    // grab a file to work on
    String cf;
    while ((cf = files.poll()) != null) {
      try {
        BufferedReader input = new BufferedReader(new FileReader(cf));
        String text;
        // well go line-by-line... maybe this is not the fastest
        while ((text = input.readLine()) != null) {
          // parse words
          Matcher matcher = pattern.matcher(text);
          while (matcher.find()) {
            String word = matcher.group(1);
            if (local_results.containsKey(word)) {
              local_results.put(word, 1 + local_results.get(word));
            } else {
              local_results.put(word, 1);
            }
          }
        }
        input.close();
      } catch (Exception e) {
        System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage());
        return;
      }
      // merge local hashmap with shared one,could have a
      // seperate thread do this but that might be cheating

      Iterator<Map.Entry<String, Integer>> updates = local_results.entrySet().iterator();
      while (updates.hasNext()) {
        Map.Entry<String, Integer> kv = updates.next();
        String k = kv.getKey();
        Integer v = kv.getValue();
        synchronized (results) {
          if (results.containsKey(k)) {
            results.put(k, v + results.get(k));
          } else {
            results.put(k, v);
          }
        }
      }
      local_results.clear();
    }
  }
Ejemplo n.º 21
0
  /**
   * Re-initializes the fields which store information about the currently held data. Empties
   * <tt>data</tt>.
   */
  private void reinit() {
    firstSeq = lastSeq = timestamp = -1;
    pictureId = -1;
    empty = true;
    haveEnd = haveStart = false;
    frameLength = 0;

    Iterator<Map.Entry<Long, Container>> it = data.entrySet().iterator();
    Map.Entry<Long, Container> e;
    while (it.hasNext()) {
      e = it.next();
      free.offer(e.getValue());
      it.remove();
    }
  }
  /**
   * Updates partition map in all caches.
   *
   * @param msg Partitions full messages.
   */
  private void updatePartitionFullMap(GridDhtPartitionsFullMessage msg) {
    for (Map.Entry<Integer, GridDhtPartitionFullMap> entry : msg.partitions().entrySet()) {
      Integer cacheId = entry.getKey();

      GridCacheContext cacheCtx = cctx.cacheContext(cacheId);

      if (cacheCtx != null) cacheCtx.topology().update(exchId, entry.getValue());
      else {
        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, AffinityTopologyVersion.NONE);

        if (oldest != null && oldest.isLocal())
          cctx.exchange().clientTopology(cacheId, this).update(exchId, entry.getValue());
      }
    }
  }
Ejemplo n.º 23
0
 boolean allAvailable() {
   for (Map.Entry<String, PluginSpec> plugin : plugins.entrySet()) {
     PluginSpec pluginSpec = plugin.getValue();
     if (pluginSpec.running < pluginSpec.expected) {
       System.err.println(
           "STILL waiting for "
               + pluginSpec.pluginType
               + " expected "
               + pluginSpec.expected
               + " running "
               + pluginSpec.running);
       return false;
     }
   }
   return true;
 }
  /**
   * Makes <tt>RTCPSDES</tt> packets for all the RTP streams that we're sending.
   *
   * @return a <tt>List</tt> of <tt>RTCPSDES</tt> packets for all the RTP streams that we're
   *     sending.
   */
  private RTCPSDESPacket makeSDESPacket() {
    Collection<RTCPSDES> sdesChunks = new ArrayList<RTCPSDES>();

    // Create an SDES for our own SSRC.
    RTCPSDES ownSDES = new RTCPSDES();

    SSRCInfo ourinfo = getStream().getStreamRTPManager().getSSRCCache().ourssrc;
    ownSDES.ssrc = (int) getLocalSSRC();
    Collection<RTCPSDESItem> ownItems = new ArrayList<RTCPSDESItem>();
    ownItems.add(new RTCPSDESItem(RTCPSDESItem.CNAME, ourinfo.sourceInfo.getCNAME()));

    // Throttle the source description bandwidth. See RFC3550#6.3.9
    // Allocation of Source Description Bandwidth.

    if (sdesCounter % 3 == 0) {
      if (ourinfo.name != null && ourinfo.name.getDescription() != null)
        ownItems.add(new RTCPSDESItem(RTCPSDESItem.NAME, ourinfo.name.getDescription()));
      if (ourinfo.email != null && ourinfo.email.getDescription() != null)
        ownItems.add(new RTCPSDESItem(RTCPSDESItem.EMAIL, ourinfo.email.getDescription()));
      if (ourinfo.phone != null && ourinfo.phone.getDescription() != null)
        ownItems.add(new RTCPSDESItem(RTCPSDESItem.PHONE, ourinfo.phone.getDescription()));
      if (ourinfo.loc != null && ourinfo.loc.getDescription() != null)
        ownItems.add(new RTCPSDESItem(RTCPSDESItem.LOC, ourinfo.loc.getDescription()));
      if (ourinfo.tool != null && ourinfo.tool.getDescription() != null)
        ownItems.add(new RTCPSDESItem(RTCPSDESItem.TOOL, ourinfo.tool.getDescription()));
      if (ourinfo.note != null && ourinfo.note.getDescription() != null)
        ownItems.add(new RTCPSDESItem(RTCPSDESItem.NOTE, ourinfo.note.getDescription()));
    }

    sdesCounter++;

    ownSDES.items = ownItems.toArray(new RTCPSDESItem[ownItems.size()]);

    sdesChunks.add(ownSDES);

    for (Map.Entry<Integer, byte[]> entry : cnameRegistry.entrySet()) {
      RTCPSDES sdes = new RTCPSDES();
      sdes.ssrc = entry.getKey();
      sdes.items = new RTCPSDESItem[] {new RTCPSDESItem(RTCPSDESItem.CNAME, entry.getValue())};
    }

    RTCPSDES[] sps = sdesChunks.toArray(new RTCPSDES[sdesChunks.size()]);
    RTCPSDESPacket sp = new RTCPSDESPacket(sps);

    return sp;
  }
Ejemplo n.º 25
0
  private synchronized void addNewAgent(
      int agentId,
      SocketChannel socket,
      String agentName,
      String agentIP,
      int agentPort,
      int flags) {
    if (agentIP.equals(":same")) {
      InetAddress agentAddress = socket.socket().getInetAddress();
      agentIP = agentAddress.getHostAddress();
    }

    Log.info(
        "New agent id="
            + agentId
            + " name="
            + agentName
            + " address="
            + agentIP
            + ":"
            + agentPort
            + " flags="
            + flags);
    AgentInfo agentInfo = new AgentInfo();
    agentInfo.agentId = agentId;
    agentInfo.flags = flags;
    agentInfo.socket = socket;
    agentInfo.agentName = agentName;
    agentInfo.agentIP = agentIP;
    agentInfo.agentPort = agentPort;
    agentInfo.outputBuf = new MVByteBuffer(1024);
    agentInfo.inputBuf = new MVByteBuffer(1024);
    agents.put(socket, agentInfo);

    NewAgentMessage newAgentMessage =
        new NewAgentMessage(agentId, agentName, agentIP, agentPort, flags);
    for (Map.Entry<SocketChannel, AgentInfo> entry : agents.entrySet()) {
      if (entry.getKey() == socket) continue;

      // Tell other agents about the new one
      synchronized (entry.getValue().outputBuf) {
        Message.toBytes(newAgentMessage, entry.getValue().outputBuf);
      }

      // Tell new agent about other agents
      NewAgentMessage otherAgentMessage =
          new NewAgentMessage(
              entry.getValue().agentId,
              entry.getValue().agentName,
              entry.getValue().agentIP,
              entry.getValue().agentPort,
              entry.getValue().flags);
      synchronized (agentInfo.outputBuf) {
        Message.toBytes(otherAgentMessage, agentInfo.outputBuf);
      }
    }

    messageIO.addAgent(agentInfo);
    messageIO.outputReady();
  }
      /**
       * Submit differencers for running. All tree *must* have been received before this is called.
       */
      public void submitDifferencers() {
        assert requestedEndpoints.size() == 0;

        // Right now, we only difference local host against each other. CASSANDRA-2610 will fix
        // that.
        // In the meantime ugly special casing will work good enough.
        MerkleTree localTree = trees.get(FBUtilities.getLocalAddress());
        assert localTree != null;
        for (Map.Entry<InetAddress, MerkleTree> entry : trees.entrySet()) {
          if (entry.getKey().equals(FBUtilities.getLocalAddress())) continue;

          Differencer differencer =
              new Differencer(cfname, entry.getKey(), entry.getValue(), localTree);
          syncJobs.add(entry.getKey());
          logger.debug("Queueing comparison " + differencer);
          StageManager.getStage(Stage.ANTI_ENTROPY).execute(differencer);
        }
        trees.clear(); // allows gc to do its thing
      }
Ejemplo n.º 27
0
 private void clean(Jedis jedis) {
   // /LTS/{集群名字}/NODES/
   Set<String> nodeTypePaths =
       jedis.keys(NodeRegistryUtils.getRootPath(appContext.getConfig().getClusterName()) + "/*");
   if (CollectionUtils.isNotEmpty(nodeTypePaths)) {
     for (String nodeTypePath : nodeTypePaths) {
       // /LTS/{集群名字}/NODES/JOB_TRACKER
       Set<String> nodePaths = jedis.keys(nodeTypePath);
       if (CollectionUtils.isNotEmpty(nodePaths)) {
         for (String nodePath : nodePaths) {
           Map<String, String> nodes = jedis.hgetAll(nodePath);
           if (CollectionUtils.isNotEmpty(nodes)) {
             boolean delete = false;
             long now = SystemClock.now();
             for (Map.Entry<String, String> entry : nodes.entrySet()) {
               String key = entry.getKey();
               long expire = Long.parseLong(entry.getValue());
               if (expire < now) {
                 jedis.hdel(nodePath, key);
                 delete = true;
                 if (LOGGER.isWarnEnabled()) {
                   LOGGER.warn(
                       "Delete expired key: "
                           + nodePath
                           + " -> value: "
                           + entry.getKey()
                           + ", expire: "
                           + new Date(expire)
                           + ", now: "
                           + new Date(now));
                 }
               }
             }
             if (delete) {
               jedis.publish(nodePath, Constants.UNREGISTER);
             }
           }
         }
       }
     }
   }
 }
Ejemplo n.º 28
0
 protected void handlePartitions(String[] args) {
   Set<Partition> partitions = hazelcast.getPartitionService().getPartitions();
   Map<Member, Integer> partitionCounts = new HashMap<Member, Integer>();
   for (Partition partition : partitions) {
     Member owner = partition.getOwner();
     if (owner != null) {
       Integer count = partitionCounts.get(owner);
       int newCount = 1;
       if (count != null) {
         newCount = count + 1;
       }
       partitionCounts.put(owner, newCount);
     }
     println(partition);
   }
   Set<Map.Entry<Member, Integer>> entries = partitionCounts.entrySet();
   for (Map.Entry<Member, Integer> entry : entries) {
     println(entry.getKey() + ":" + entry.getValue());
   }
 }
Ejemplo n.º 29
0
  public static CommandExecutor.Method getFreeExecutor(Stage st) {
    while (true) {
      synchronized (executorsMap) {
        try {
          for (Map.Entry<CommandExecutor.Method, Stage> entry : executorsMap.entrySet()) {
            if (!entry.getKey().isInUse() && entry.getValue() == st) {
              CommandExecutor.Method m = entry.getKey();
              m.setInUse(true);
              return m;
            }
          }

          LOG.info("All executors for stage " + st + " in use (will wait..)");
          executorsMap.wait();

        } catch (InterruptedException ie) {
          return null;
        }
      }
    }
  }
Ejemplo n.º 30
0
  /**
   * Calculate the squares of the integers 1 through 10.
   *
   * @param workerPool
   */
  private static void mapExample(WorkerPool workerPool) {
    Map<Integer, Callable<Integer>> taskMap = new HashMap<Integer, Callable<Integer>>();
    for (int i = 1; i <= 10; i++) {
      final int input = i;
      Callable<Integer> callable =
          new Callable<Integer>() {
            @Override
            public Integer call() throws Exception {
              // do all your parallelizable work here
              return input * input;
            }
          };
      taskMap.put(i, callable);
    }

    Map<Integer, Integer> resultMap = workerPool.invokeAll(taskMap);

    for (Map.Entry<Integer, Integer> entry : resultMap.entrySet()) {
      System.out.println("key: " + entry.getKey() + ", value: " + entry.getValue());
    }
  }