コード例 #1
0
  public MemtableUnfilteredPartitionIterator makePartitionIterator(
      final ColumnFilter columnFilter, final DataRange dataRange, final boolean isForThrift) {
    AbstractBounds<PartitionPosition> keyRange = dataRange.keyRange();

    boolean startIsMin = keyRange.left.isMinimum();
    boolean stopIsMin = keyRange.right.isMinimum();

    boolean isBound = keyRange instanceof Bounds;
    boolean includeStart = isBound || keyRange instanceof IncludingExcludingBounds;
    boolean includeStop = isBound || keyRange instanceof Range;
    Map<PartitionPosition, AtomicBTreePartition> subMap;
    if (startIsMin)
      subMap = stopIsMin ? partitions : partitions.headMap(keyRange.right, includeStop);
    else
      subMap =
          stopIsMin
              ? partitions.tailMap(keyRange.left, includeStart)
              : partitions.subMap(keyRange.left, includeStart, keyRange.right, includeStop);

    int minLocalDeletionTime = Integer.MAX_VALUE;

    // avoid iterating over the memtable if we purge all tombstones
    if (cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones())
      minLocalDeletionTime = findMinLocalDeletionTime(subMap.entrySet().iterator());

    final Iterator<Map.Entry<PartitionPosition, AtomicBTreePartition>> iter =
        subMap.entrySet().iterator();

    return new MemtableUnfilteredPartitionIterator(
        cfs, iter, isForThrift, minLocalDeletionTime, columnFilter, dataRange);
  }
コード例 #2
0
  /** @throws Exception If failed. */
  public void testAffinityPut() throws Exception {
    Thread.sleep(2 * TOP_REFRESH_FREQ);

    assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size());

    Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT);

    GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME);

    GridClientCompute compute = client.compute();

    for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i));

    for (int i = 0; i < 100; i++) {
      String key = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id();

      assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key));

      assertEquals(primaryNodeId, partitioned.affinity(key));

      // Must go to primary node only. Since backup count is 0, value must present on
      // primary node only.
      partitioned.put(key, "val" + key);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key);

        if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val);
        else assertNull(val);
      }
    }

    // Now check that we will see value in near cache in pinned mode.
    for (int i = 100; i < 200; i++) {
      String pinnedKey = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id();

      UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId)));

      GridClientNode node = compute.node(pinnedNodeId);

      partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey);

        if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey()))
          assertEquals("val" + pinnedKey, val);
        else assertNull(val);
      }
    }
  }
コード例 #3
0
    public Object call() {
      final ScriptEngineManager scriptEngineManager = new ScriptEngineManager();
      ScriptEngine e = scriptEngineManager.getEngineByName("javascript");
      if (map != null) {
        for (Map.Entry<String, Object> entry : map.entrySet()) {
          e.put(entry.getKey(), entry.getValue());
        }
      }
      e.put("hazelcast", hazelcastInstance);
      try {
        // For new JavaScript engine called Nashorn we need the compatibility script
        if (e.getFactory().getEngineName().toLowerCase().contains("nashorn")) {
          e.eval("load('nashorn:mozilla_compat.js');");
        }

        e.eval("importPackage(java.lang);");
        e.eval("importPackage(java.util);");
        e.eval("importPackage(com.hazelcast.core);");
        e.eval("importPackage(com.hazelcast.config);");
        e.eval("importPackage(java.util.concurrent);");
        e.eval("importPackage(org.junit);");

        return e.eval(script);
      } catch (ScriptException e1) {
        throw new RuntimeException(e1);
      }
    }
コード例 #4
0
 /** Returns the {@link SegmentCacheIndex} for a given {@link SegmentHeader}. */
 private SegmentCacheIndex getIndex(SegmentHeader header) {
   // First we check the indexes that already exist.
   // This is fast.
   for (Entry<RolapStar, SegmentCacheIndex> entry : indexes.entrySet()) {
     final String factTableName = entry.getKey().getFactTable().getTableName();
     final ByteString schemaChecksum = entry.getKey().getSchema().getChecksum();
     if (!factTableName.equals(header.rolapStarFactTableName)) {
       continue;
     }
     if (!schemaChecksum.equals(header.schemaChecksum)) {
       continue;
     }
     return entry.getValue();
   }
   // The index doesn't exist. Let's create it.
   for (RolapSchema schema : RolapSchema.getRolapSchemas()) {
     if (!schema.getChecksum().equals(header.schemaChecksum)) {
       continue;
     }
     // We have a schema match.
     RolapStar star = schema.getStar(header.rolapStarFactTableName);
     if (star != null) {
       // Found it.
       indexes.put(star, new SegmentCacheIndexImpl(thread));
     }
     return indexes.get(star);
   }
   return null;
 }
コード例 #5
0
ファイル: DocBuilder.java プロジェクト: ashishlal/echidna_2.0
 @SuppressWarnings("unchecked")
 private void addFields(
     DataConfig.Entity entity, DocWrapper doc, Map<String, Object> arow, VariableResolver vr) {
   for (Map.Entry<String, Object> entry : arow.entrySet()) {
     String key = entry.getKey();
     Object value = entry.getValue();
     if (value == null) continue;
     if (key.startsWith("$")) continue;
     List<DataConfig.Field> field = entity.colNameVsField.get(key);
     if (field == null && dataImporter.getSchema() != null) {
       // This can be a dynamic field or a field which does not have an entry in data-config ( an
       // implicit field)
       SchemaField sf = dataImporter.getSchema().getFieldOrNull(key);
       if (sf == null) {
         sf = dataImporter.getConfig().lowerNameVsSchemaField.get(key.toLowerCase(Locale.ENGLISH));
       }
       if (sf != null) {
         addFieldToDoc(entry.getValue(), sf.getName(), 1.0f, sf.multiValued(), doc);
       }
       // else do nothing. if we add it it may fail
     } else {
       if (field != null) {
         for (DataConfig.Field f : field) {
           String name = f.getName();
           if (f.dynamicName) {
             name = vr.replaceTokens(name);
           }
           if (f.toWrite) addFieldToDoc(entry.getValue(), name, f.boost, f.multiValued, doc);
         }
       }
     }
   }
 }
コード例 #6
0
 private void deferExpired() {
   for (Map.Entry<String, JedisPool> entry : jedisPools.entrySet()) {
     JedisPool jedisPool = entry.getValue();
     try {
       Jedis jedis = jedisPool.getResource();
       try {
         for (Node node : new HashSet<Node>(getRegistered())) {
           String key = NodeRegistryUtils.getNodeTypePath(clusterName, node.getNodeType());
           if (jedis.hset(
                   key, node.toFullString(), String.valueOf(SystemClock.now() + expirePeriod))
               == 1) {
             jedis.publish(key, Constants.REGISTER);
           }
         }
         if (lock.acquire(jedis)) {
           clean(jedis);
         }
         if (!replicate) {
           break; //  如果服务器端已同步数据,只需写入单台机器
         }
       } finally {
         jedis.close();
       }
     } catch (Throwable t) {
       LOGGER.warn(
           "Failed to write provider heartbeat to redis registry. registry: "
               + entry.getKey()
               + ", cause: "
               + t.getMessage(),
           t);
     }
   }
 }
コード例 #7
0
 @Override
 public void destroy() {
   super.destroy();
   try {
     expireFuture.cancel(true);
   } catch (Throwable t) {
     LOGGER.warn(t.getMessage(), t);
   }
   try {
     for (Notifier notifier : notifiers.values()) {
       notifier.shutdown();
     }
   } catch (Throwable t) {
     LOGGER.warn(t.getMessage(), t);
   }
   for (Map.Entry<String, JedisPool> entry : jedisPools.entrySet()) {
     JedisPool jedisPool = entry.getValue();
     try {
       jedisPool.destroy();
     } catch (Throwable t) {
       LOGGER.warn(
           "Failed to destroy the redis registry client. registry: "
               + entry.getKey()
               + ", cause: "
               + t.getMessage(),
           t);
     }
   }
 }
コード例 #8
0
ファイル: Gossiper.java プロジェクト: Jashinta/570
  void notifyFailureDetector(Map<InetAddress, EndpointState> remoteEpStateMap) {
    IFailureDetector fd = FailureDetector.instance;
    for (Entry<InetAddress, EndpointState> entry : remoteEpStateMap.entrySet()) {
      InetAddress endpoint = entry.getKey();
      EndpointState remoteEndpointState = entry.getValue();
      EndpointState localEndpointState = endpointStateMap_.get(endpoint);
      /*
       * If the local endpoint state exists then report to the FD only
       * if the versions workout.
       */
      if (localEndpointState != null) {
        int localGeneration = localEndpointState.getHeartBeatState().generation_;
        int remoteGeneration = remoteEndpointState.getHeartBeatState().generation_;
        if (remoteGeneration > localGeneration) {
          fd.report(endpoint);
          continue;
        }

        if (remoteGeneration == localGeneration) {
          int localVersion = getMaxEndpointStateVersion(localEndpointState);
          // int localVersion = localEndpointState.getHeartBeatState().getHeartBeatVersion();
          int remoteVersion = remoteEndpointState.getHeartBeatState().getHeartBeatVersion();
          if (remoteVersion > localVersion) {
            fd.report(endpoint);
          }
        }
      }
    }
  }
コード例 #9
0
  @Override
  protected void doSubscribe(Node node, NotifyListener listener) {

    List<NodeType> listenNodeTypes = node.getListenNodeTypes();
    if (CollectionUtils.isEmpty(listenNodeTypes)) {
      return;
    }
    for (NodeType listenNodeType : listenNodeTypes) {
      String listenNodePath = NodeRegistryUtils.getNodeTypePath(clusterName, listenNodeType);

      Notifier notifier = notifiers.get(listenNodePath);
      if (notifier == null) {
        Notifier newNotifier = new Notifier(listenNodePath);
        notifiers.putIfAbsent(listenNodePath, newNotifier);
        notifier = notifiers.get(listenNodePath);
        if (notifier == newNotifier) {
          notifier.start();
        }
      }

      boolean success = false;
      NodeRegistryException exception = null;
      for (Map.Entry<String, JedisPool> entry : jedisPools.entrySet()) {
        JedisPool jedisPool = entry.getValue();
        try {
          Jedis jedis = jedisPool.getResource();
          try {
            doNotify(
                jedis,
                Collections.singletonList(listenNodePath),
                Collections.singletonList(listener));
            success = true;
            break; // 只需读一个服务器的数据

          } finally {
            jedis.close();
          }
        } catch (Throwable t) {
          exception =
              new NodeRegistryException(
                  "Failed to unregister node to redis registry. registry: "
                      + entry.getKey()
                      + ", node: "
                      + node
                      + ", cause: "
                      + t.getMessage(),
                  t);
        }
      }
      if (exception != null) {
        if (success) {
          LOGGER.warn(exception.getMessage(), exception);
        } else {
          throw exception;
        }
      }
    }
  }
コード例 #10
0
ファイル: Utils.java プロジェクト: rkrol/metrics
 public static Map<MetricName, Metric> filterMetrics(
     Map<MetricName, Metric> metrics, MetricPredicate predicate) {
   final Map<MetricName, Metric> result = new HashMap<MetricName, Metric>();
   for (Entry<MetricName, Metric> entry : metrics.entrySet()) {
     if (predicate.matches(entry.getKey(), entry.getValue())) {
       result.put(entry.getKey(), entry.getValue());
     }
   }
   return result;
 }
コード例 #11
0
 public void addEnvToIntent(Intent intent) {
   Map<String, String> envMap = System.getenv();
   Set<Map.Entry<String, String>> envSet = envMap.entrySet();
   Iterator<Map.Entry<String, String>> envIter = envSet.iterator();
   int c = 0;
   while (envIter.hasNext()) {
     Map.Entry<String, String> entry = envIter.next();
     intent.putExtra("env" + c, entry.getKey() + "=" + entry.getValue());
     c++;
   }
 }
コード例 #12
0
 /** @param design2manifestPlatMap design manifest platform map. */
 private void check4MissingServices(Map<Long, CmsRfcCI> design2manifestPlatMap)
     throws TransistorException {
   // get CiIds to be checked for missing services
   Set<Long> manifestPlatformIds =
       design2manifestPlatMap
           .entrySet()
           .stream()
           .map(t -> t.getValue().getCiId())
           .collect(Collectors.toSet());
   cloudUtil.check4missingServices(manifestPlatformIds);
 }
コード例 #13
0
ファイル: CanvasItemImpl.java プロジェクト: umaxfun/loadui
  @Override
  public void setLimit(@Nonnull String counterName, long counterValue) {
    if (counterValue > 0) limits.put(counterName, counterValue);
    else limits.remove(counterName);

    StringBuilder s = new StringBuilder();
    for (Entry<String, Long> e : limits.entrySet())
      s.append(e.getKey()).append('=').append(e.getValue().toString()).append(';');
    setAttribute(LIMITS_ATTRIBUTE, s.toString());

    if (TIMER_COUNTER.equals(counterName)) fixTimeLimit();
    fireBaseEvent(LIMITS);
  }
コード例 #14
0
ファイル: Executing.java プロジェクト: rhusar/JGroups
  protected static <V, K> V removeKeyForValue(Map<V, K> map, K value) {
    synchronized (map) {
      Iterator<Entry<V, K>> iter = map.entrySet().iterator();
      while (iter.hasNext()) {
        Entry<V, K> entry = iter.next();
        if (entry.getValue().equals(value)) {
          iter.remove();
          return entry.getKey();
        }
      }
    }

    return null;
  }
コード例 #15
0
ファイル: Gossiper.java プロジェクト: Jashinta/570
  void doStatusCheck() {
    long now = System.currentTimeMillis();

    Set<InetAddress> eps = endpointStateMap_.keySet();
    for (InetAddress endpoint : eps) {
      if (endpoint.equals(localEndpoint_)) continue;

      FailureDetector.instance.interpret(endpoint);
      EndpointState epState = endpointStateMap_.get(endpoint);
      if (epState != null) {
        long duration = now - epState.getUpdateTimestamp();

        if (StorageService.instance.getTokenMetadata().isMember(endpoint))
          epState.setHasToken(true);
        // check if this is a fat client. fat clients are removed automatically from
        // gosip after FatClientTimeout
        if (!epState.getHasToken()
            && !epState.isAlive()
            && !justRemovedEndpoints_.containsKey(endpoint)
            && (duration > FatClientTimeout_)) {
          logger_.info(
              "FatClient "
                  + endpoint
                  + " has been silent for "
                  + FatClientTimeout_
                  + "ms, removing from gossip");
          removeEndpoint(
              endpoint); // will put it in justRemovedEndpoints to respect quarantine delay
          evictFromMembership(endpoint); // can get rid of the state immediately
        }

        if (!epState.isAlive() && (duration > aVeryLongTime_)) {
          evictFromMembership(endpoint);
        }
      }
    }

    if (!justRemovedEndpoints_.isEmpty()) {
      Map<InetAddress, Long> copy = new HashMap<InetAddress, Long>(justRemovedEndpoints_);
      for (Map.Entry<InetAddress, Long> entry : copy.entrySet()) {
        if ((now - entry.getValue()) > QUARANTINE_DELAY) {
          if (logger_.isDebugEnabled())
            logger_.debug(
                QUARANTINE_DELAY + " elapsed, " + entry.getKey() + " gossip quarantine over");
          justRemovedEndpoints_.remove(entry.getKey());
        }
      }
    }
  }
コード例 #16
0
ファイル: Executing.java プロジェクト: rhusar/JGroups
  protected void handleView(View view) {
    this.view = view;
    if (log.isDebugEnabled()) log.debug("view=" + view);
    List<Address> members = view.getMembers();

    _consumerLock.lock();
    try {
      // This removes the consumers that were registered that are now gone
      Iterator<Owner> iterator = _consumersAvailable.iterator();
      while (iterator.hasNext()) {
        Owner owner = iterator.next();
        if (!members.contains(owner.getAddress())) {
          iterator.remove();
          sendRemoveConsumerRequest(owner);
        }
      }

      // This removes the tasks that those requestors are gone
      iterator = _runRequests.iterator();
      while (iterator.hasNext()) {
        Owner owner = iterator.next();
        if (!members.contains(owner.getAddress())) {
          iterator.remove();
          sendRemoveRunRequest(owner);
        }
      }

      synchronized (_awaitingReturn) {
        for (Entry<Owner, Runnable> entry : _awaitingReturn.entrySet()) {
          // The person currently servicing our request has gone down
          // without completing so we have to keep our request alive by
          // sending ours back to the coordinator
          Owner owner = entry.getKey();
          if (!members.contains(owner.getAddress())) {
            Runnable runnable = entry.getValue();
            // We need to register the request id before sending the request back to the coordinator
            // in case if our task gets picked up since another was removed
            _requestId.put(runnable, owner.getRequestId());
            _awaitingConsumer.add(runnable);
            sendToCoordinator(RUN_REQUEST, owner.getRequestId(), local_addr);
          }
        }
      }
    } finally {
      _consumerLock.unlock();
    }
  }
コード例 #17
0
 private Map<String, String> buildMapping() {
   Map<String, String> extensionMapping = new LinkedHashMap<String, String>();
   // force inclusion of unknow item to manage unknown files
   extensionMapping.put(
       DocumentType.UNKNOWN.getExtension(), DocumentType.UNKNOWN.getDefaultHeaderTypeName());
   for (Map.Entry<String, String> entry : mapping.entrySet()) {
     extensionMapping.put(entry.getKey().toLowerCase(), entry.getValue().toLowerCase());
   }
   if (useDefaultMapping) {
     for (Map.Entry<String, String> entry : defaultMapping().entrySet()) {
       if (!extensionMapping.containsKey(entry.getKey())) {
         extensionMapping.put(entry.getKey(), entry.getValue());
       }
     }
   }
   return extensionMapping;
 }
コード例 #18
0
      /**
       * Submit differencers for running. All tree *must* have been received before this is called.
       */
      public void submitDifferencers() {
        assert requestedEndpoints.size() == 0;

        // Right now, we only difference local host against each other. CASSANDRA-2610 will fix
        // that.
        // In the meantime ugly special casing will work good enough.
        MerkleTree localTree = trees.get(FBUtilities.getLocalAddress());
        assert localTree != null;
        for (Map.Entry<InetAddress, MerkleTree> entry : trees.entrySet()) {
          if (entry.getKey().equals(FBUtilities.getLocalAddress())) continue;

          Differencer differencer =
              new Differencer(cfname, entry.getKey(), entry.getValue(), localTree);
          syncJobs.add(entry.getKey());
          logger.debug("Queueing comparison " + differencer);
          StageManager.getStage(Stage.ANTI_ENTROPY).execute(differencer);
        }
        trees.clear(); // allows gc to do its thing
      }
コード例 #19
0
 private void clean(Jedis jedis) {
   // /LTS/{集群名字}/NODES/
   Set<String> nodeTypePaths =
       jedis.keys(NodeRegistryUtils.getRootPath(appContext.getConfig().getClusterName()) + "/*");
   if (CollectionUtils.isNotEmpty(nodeTypePaths)) {
     for (String nodeTypePath : nodeTypePaths) {
       // /LTS/{集群名字}/NODES/JOB_TRACKER
       Set<String> nodePaths = jedis.keys(nodeTypePath);
       if (CollectionUtils.isNotEmpty(nodePaths)) {
         for (String nodePath : nodePaths) {
           Map<String, String> nodes = jedis.hgetAll(nodePath);
           if (CollectionUtils.isNotEmpty(nodes)) {
             boolean delete = false;
             long now = SystemClock.now();
             for (Map.Entry<String, String> entry : nodes.entrySet()) {
               String key = entry.getKey();
               long expire = Long.parseLong(entry.getValue());
               if (expire < now) {
                 jedis.hdel(nodePath, key);
                 delete = true;
                 if (LOGGER.isWarnEnabled()) {
                   LOGGER.warn(
                       "Delete expired key: "
                           + nodePath
                           + " -> value: "
                           + entry.getKey()
                           + ", expire: "
                           + new Date(expire)
                           + ", now: "
                           + new Date(now));
                 }
               }
             }
             if (delete) {
               jedis.publish(nodePath, Constants.UNREGISTER);
             }
           }
         }
       }
     }
   }
 }
コード例 #20
0
ファイル: TestApp.java プロジェクト: karanbatra/hazelcast
 protected void handlePartitions(String[] args) {
   Set<Partition> partitions = hazelcast.getPartitionService().getPartitions();
   Map<Member, Integer> partitionCounts = new HashMap<Member, Integer>();
   for (Partition partition : partitions) {
     Member owner = partition.getOwner();
     if (owner != null) {
       Integer count = partitionCounts.get(owner);
       int newCount = 1;
       if (count != null) {
         newCount = count + 1;
       }
       partitionCounts.put(owner, newCount);
     }
     println(partition);
   }
   Set<Map.Entry<Member, Integer>> entries = partitionCounts.entrySet();
   for (Map.Entry<Member, Integer> entry : entries) {
     println(entry.getKey() + ":" + entry.getValue());
   }
 }
コード例 #21
0
ファイル: WorkerPool.java プロジェクト: harshadptl/lumify
  /**
   * Calculate the squares of the integers 1 through 10.
   *
   * @param workerPool
   */
  private static void mapExample(WorkerPool workerPool) {
    Map<Integer, Callable<Integer>> taskMap = new HashMap<Integer, Callable<Integer>>();
    for (int i = 1; i <= 10; i++) {
      final int input = i;
      Callable<Integer> callable =
          new Callable<Integer>() {
            @Override
            public Integer call() throws Exception {
              // do all your parallelizable work here
              return input * input;
            }
          };
      taskMap.put(i, callable);
    }

    Map<Integer, Integer> resultMap = workerPool.invokeAll(taskMap);

    for (Map.Entry<Integer, Integer> entry : resultMap.entrySet()) {
      System.out.println("key: " + entry.getKey() + ", value: " + entry.getValue());
    }
  }
コード例 #22
0
 @Override
 protected void doRegister(Node node) {
   String key = NodeRegistryUtils.getNodeTypePath(clusterName, node.getNodeType());
   String expire = String.valueOf(SystemClock.now() + expirePeriod);
   boolean success = false;
   NodeRegistryException exception = null;
   for (Map.Entry<String, JedisPool> entry : jedisPools.entrySet()) {
     JedisPool jedisPool = entry.getValue();
     try {
       Jedis jedis = jedisPool.getResource();
       try {
         jedis.hset(key, node.toFullString(), expire);
         jedis.publish(key, Constants.REGISTER);
         success = true;
         if (!replicate) {
           break; //  如果服务器端已同步数据,只需写入单台机器
         }
       } finally {
         jedis.close();
       }
     } catch (Throwable t) {
       exception =
           new NodeRegistryException(
               "Failed to register node to redis registry. registry: "
                   + entry.getKey()
                   + ", node: "
                   + node
                   + ", cause: "
                   + t.getMessage(),
               t);
     }
   }
   if (exception != null) {
     if (success) {
       LOGGER.warn(exception.getMessage(), exception);
     } else {
       throw exception;
     }
   }
 }
コード例 #23
0
ファイル: Utils.java プロジェクト: rkrol/metrics
 public static Map<String, Map<MetricName, Metric>> sortAndFilterMetrics(
     Map<MetricName, Metric> metrics, MetricPredicate predicate) {
   final Map<String, Map<MetricName, Metric>> sortedMetrics =
       new TreeMap<String, Map<MetricName, Metric>>();
   for (Entry<MetricName, Metric> entry : metrics.entrySet()) {
     final String qualifiedTypeName = entry.getKey().getGroup() + "." + entry.getKey().getType();
     if (predicate.matches(entry.getKey(), entry.getValue())) {
       final String scopedName;
       if (entry.getKey().hasScope()) {
         scopedName = qualifiedTypeName + "." + entry.getKey().getScope();
       } else {
         scopedName = qualifiedTypeName;
       }
       Map<MetricName, Metric> subMetrics = sortedMetrics.get(scopedName);
       if (subMetrics == null) {
         subMetrics = new TreeMap<MetricName, Metric>();
         sortedMetrics.put(scopedName, subMetrics);
       }
       subMetrics.put(entry.getKey(), entry.getValue());
     }
   }
   return sortedMetrics;
 }
コード例 #24
0
ファイル: Gossiper.java プロジェクト: Jashinta/570
  void applyStateLocally(Map<InetAddress, EndpointState> epStateMap) {
    for (Entry<InetAddress, EndpointState> entry : epStateMap.entrySet()) {
      InetAddress ep = entry.getKey();
      if (ep.equals(localEndpoint_)) continue;

      EndpointState localEpStatePtr = endpointStateMap_.get(ep);
      EndpointState remoteState = entry.getValue();
      /*
          If state does not exist just add it. If it does then add it only if the version
          of the remote copy is greater than the local copy.
      */
      if (localEpStatePtr != null) {
        int localGeneration = localEpStatePtr.getHeartBeatState().getGeneration();
        int remoteGeneration = remoteState.getHeartBeatState().getGeneration();

        if (remoteGeneration > localGeneration) {
          handleGenerationChange(ep, remoteState);
        } else if (remoteGeneration == localGeneration) {
          /* manage the membership state */
          int localMaxVersion = getMaxEndpointStateVersion(localEpStatePtr);
          int remoteMaxVersion = getMaxEndpointStateVersion(remoteState);
          if (remoteMaxVersion > localMaxVersion) {
            markAlive(ep, localEpStatePtr);
            applyHeartBeatStateLocally(ep, localEpStatePtr, remoteState);
            /* apply ApplicationState */
            applyApplicationStateLocally(ep, localEpStatePtr, remoteState);
          }
        } else {
          if (logger_.isTraceEnabled())
            logger_.trace(
                "Ignoring remote generation " + remoteGeneration + " < " + localGeneration);
        }
      } else {
        handleNewJoin(ep, remoteState);
      }
    }
  }
コード例 #25
0
ファイル: ParamerSet.java プロジェクト: hdwt8289/SerialSlave1
    public TaskWrite(Map map1, Map m_max, Map m_min, DBCollection coll, ModbusSlaveSet slave) {

      try {
        synchronized (slave) {
          Calendar calener = Calendar.getInstance();
          Date d1 = calener.getTime();
          Date d2 = new Date(calener.getTime().getTime() - 5000);
          BasicDBObject b2 = new BasicDBObject();
          b2.put("$gte", d2);
          b2.put("$lte", d1);
          DBCursor cursor =
              coll.find(new BasicDBObject("_id", b2)).sort(new BasicDBObject("_id", -1)).limit(1);
          while (cursor.hasNext()) {
            DBObject dbo = cursor.next();
            Set set1 = map1.entrySet();
            Iterator it1 = set1.iterator();
            while (it1.hasNext()) {
              Map.Entry<String, Map<String, String>> entry =
                  (Map.Entry<String, Map<String, String>>) it1.next();
              Map<String, String> map2 = entry.getValue();
              for (Iterator it2 = map2.entrySet().iterator(); it2.hasNext(); ) {
                Map.Entry<String, String> entry2 = (Map.Entry<String, String>) it2.next();
                String name = entry2.getKey().toString();
                String paramAddr = entry2.getValue().toString();
                int fun = (int) (Double.parseDouble(paramAddr));
                if (paramAddr.substring(0, 1).equals("4")) {
                  double value = Double.parseDouble(dbo.get(name).toString());
                  double dmax = (Double) m_max.get(name);
                  double dmin = (Double) m_min.get(name);
                  double dValue = 0;
                  if (value > dmax || value < dmin) {
                    if (value >= 0) {
                      dValue = 32000 * (int) (value / dmax);
                    }
                    if (value < 0) {
                      dValue = -32000 * (int) (value / dmin);
                    }
                    // slave.getProcessImage(3).setInputRegister(fun % 10000, (short) dValue);
                  } else { /// 参数超限报警
                    JOptionPane.showMessageDialog(null, "参数超限");
                    slave.stop();
                  }
                }
                if (paramAddr.substring(0, 1).equals("3")) {
                  double value = Double.parseDouble(dbo.get(name).toString());
                  double dmax = (Double) m_max.get(name);
                  double dmin = (Double) m_min.get(name);
                  double dValue = 0;
                  if (value > dmax || value < dmin) {
                    if (value >= 0) {
                      dValue = 32000 * (int) (value / dmax);
                    }
                    if (value < 0) {
                      dValue = -32000 * (int) (value / dmin);
                    }
                    // slave.getProcessImage(3).setHoldingRegister(fun % 10000, (short) dValue);
                  } else { /// 参数超限报警
                    JOptionPane.showMessageDialog(null, "参数超限");
                    slave.stop();
                  }
                  ;
                }
                if (paramAddr.substring(0, 1).equals("2")) {
                  String value = dbo.get(name).toString();
                  /// slave.getProcessImage(4).setInput(fun % 10000, Boolean.valueOf(value));
                }
                if (paramAddr.substring(0, 1).equals("1")) {
                  String value = dbo.get(name).toString();
                  // slave.getProcessImage(4).setCoil(fun % 10000, Boolean.valueOf(value));
                }
              }
            }
          }
        }
      } catch (Exception ex) {
      }
    }
コード例 #26
0
ファイル: ParamerSet.java プロジェクト: hdwt8289/SerialSlave1
    public TaskRead(Map map1, Map m_max, Map m_min, DBCollection coll, ModbusSlaveSet slave) {
      try {
        //// 此处只要点击开始就能保存数据
        /// 方案:利用数据源发送单个布尔型变量(1,0)交替,链接成功,先判断是否链接成功,然后在保存数据
        synchronized (slave) {
          Map map = new HashMap();
          Set set1 = map1.entrySet();
          Iterator it1 = set1.iterator();
          while (it1.hasNext()) {
            Map.Entry<String, Map<String, String>> entry =
                (Map.Entry<String, Map<String, String>>) it1.next();
            Map<String, String> map2 = entry.getValue();
            for (Iterator it2 = map2.entrySet().iterator(); it2.hasNext(); ) {
              Map.Entry<String, String> entry2 = (Map.Entry<String, String>) it2.next();
              String name = entry2.getKey().toString();
              String paramAddr = entry2.getValue().toString();
              int fun = (int) (Double.parseDouble(paramAddr));
              if (paramAddr.substring(0, 1).equals("4")) {
                Short d4 = slave.getProcessImage(1).getInputRegister(fun % 10000);
                double dmax = (Double) m_max.get(name);
                double dmin = (Double) m_min.get(name);
                double dValue = 0;
                if (d4 >= 0) {
                  dValue = dmax * d4 / 32000;
                } else {
                  dValue = dmin * d4 / (-32000);
                }
                map.put(name, dValue);
              }
              if (paramAddr.substring(0, 1).equals("3")) {
                Short d3 = slave.getProcessImage(1).getHoldingRegister(fun % 10000);
                double dmax = (Double) m_max.get(name);
                double dmin = (Double) m_min.get(name);
                double dValue = 0;

                if (d3 >= 0) {
                  dValue = dmax * d3 / 32000;
                } else {
                  dValue = dmin * d3 / (-32000);
                }
                map.put(name, dValue);
              }
              if (paramAddr.substring(0, 1).equals("2")) {
                map.put(name, slave.getProcessImage(2).getInput(fun % 10000));
              }
              if (paramAddr.substring(0, 1).equals("1")) {
                Boolean a = slave.getProcessImage(2).getCoil(fun % 10000 - 1);
                map.put(name, a);
              }
            }
          }

          Calendar calendar = Calendar.getInstance();
          Date dd = calendar.getTime();

          BasicDBObject doc = new BasicDBObject();
          doc.put("_id", dd);

          Set set = map.entrySet();
          Iterator it = set.iterator();
          while (it.hasNext()) {
            Map.Entry<String, String> entry1 = (Map.Entry<String, String>) it.next();
            doc.put(entry1.getKey(), entry1.getValue());
          }
          coll.insert(doc);
        }
      } catch (Exception ex) {
      }
    }
コード例 #27
0
  /**
   * Starts activity.
   *
   * @throws IgniteInterruptedCheckedException If interrupted.
   */
  public void init() throws IgniteInterruptedCheckedException {
    if (isDone()) return;

    if (init.compareAndSet(false, true)) {
      if (isDone()) return;

      try {
        // Wait for event to occur to make sure that discovery
        // will return corresponding nodes.
        U.await(evtLatch);

        assert discoEvt != null : this;
        assert !dummy && !forcePreload : this;

        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion());

        oldestNode.set(oldest);

        startCaches();

        // True if client node joined or failed.
        boolean clientNodeEvt;

        if (F.isEmpty(reqs)) {
          int type = discoEvt.type();

          assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED
              : discoEvt;

          clientNodeEvt = CU.clientNode(discoEvt.eventNode());
        } else {
          assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt;

          boolean clientOnlyStart = true;

          for (DynamicCacheChangeRequest req : reqs) {
            if (!req.clientStartOnly()) {
              clientOnlyStart = false;

              break;
            }
          }

          clientNodeEvt = clientOnlyStart;
        }

        if (clientNodeEvt) {
          ClusterNode node = discoEvt.eventNode();

          // Client need to initialize affinity for local join event or for stated client caches.
          if (!node.isLocal()) {
            for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
              if (cacheCtx.isLocal()) continue;

              GridDhtPartitionTopology top = cacheCtx.topology();

              top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));

              if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) {
                initTopology(cacheCtx);

                top.beforeExchange(this);
              } else
                cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion());
            }

            if (exchId.isLeft())
              cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

            onDone(exchId.topologyVersion());

            skipPreload = cctx.kernalContext().clientNode();

            return;
          }
        }

        if (cctx.kernalContext().clientNode()) {
          skipPreload = true;

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            GridDhtPartitionTopology top = cacheCtx.topology();

            top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));
          }

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            initTopology(cacheCtx);
          }

          if (oldestNode.get() != null) {
            rmtNodes =
                new ConcurrentLinkedQueue<>(
                    CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

            rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

            ready.set(true);

            initFut.onDone(true);

            if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

            sendPartitions();
          } else onDone(exchId.topologyVersion());

          return;
        }

        assert oldestNode.get() != null;

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) {
            if (cacheCtx
                .discovery()
                .cacheAffinityNodes(cacheCtx.name(), topologyVersion())
                .isEmpty())
              U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex());
          }

          cacheCtx.preloader().onExchangeFutureAdded();
        }

        List<String> cachesWithoutNodes = null;

        if (exchId.isLeft()) {
          for (String name : cctx.cache().cacheNames()) {
            if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) {
              if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>();

              cachesWithoutNodes.add(name);

              // Fire event even if there is no client cache started.
              if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) {
                Event evt =
                    new CacheEvent(
                        name,
                        cctx.localNode(),
                        cctx.localNode(),
                        "All server nodes have left the cluster.",
                        EventType.EVT_CACHE_NODES_LEFT,
                        0,
                        false,
                        null,
                        null,
                        null,
                        null,
                        false,
                        null,
                        false,
                        null,
                        null,
                        null);

                cctx.gridEvents().record(evt);
              }
            }
          }
        }

        if (cachesWithoutNodes != null) {
          StringBuilder sb =
              new StringBuilder(
                  "All server nodes for the following caches have left the cluster: ");

          for (int i = 0; i < cachesWithoutNodes.size(); i++) {
            String cache = cachesWithoutNodes.get(i);

            sb.append('\'').append(cache).append('\'');

            if (i != cachesWithoutNodes.size() - 1) sb.append(", ");
          }

          U.quietAndWarn(log, sb.toString());

          U.quietAndWarn(log, "Must have server nodes for caches to operate.");
        }

        assert discoEvt != null;

        assert exchId.nodeId().equals(discoEvt.eventNode().id());

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          GridClientPartitionTopology clientTop =
              cctx.exchange().clearClientTopology(cacheCtx.cacheId());

          long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();

          // Update before waiting for locks.
          if (!cacheCtx.isLocal())
            cacheCtx
                .topology()
                .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId()));
        }

        // Grab all alive remote nodes with order of equal or less than last joined node.
        rmtNodes =
            new ConcurrentLinkedQueue<>(
                CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

        rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

        for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        AffinityTopologyVersion topVer = exchId.topologyVersion();

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Must initialize topology after we get discovery event.
          initTopology(cacheCtx);

          cacheCtx.preloader().updateLastExchangeFuture(this);
        }

        IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer);

        // Assign to class variable so it will be included into toString() method.
        this.partReleaseFut = partReleaseFut;

        if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this);

        while (true) {
          try {
            partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            // Print pending transactions and locks that might have led to hang.
            dumpPendingObjects();
          }
        }

        if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this);

        if (!F.isEmpty(reqs)) blockGateways();

        if (exchId.isLeft())
          cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

        IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion());

        while (true) {
          try {
            locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            U.warn(
                log,
                "Failed to wait for locks release future. "
                    + "Dumping pending objects that might be the cause: "
                    + cctx.localNodeId());

            U.warn(log, "Locked entries:");

            Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks =
                cctx.mvcc().unfinishedLocks(exchId.topologyVersion());

            for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet())
              U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']');
          }
        }

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Notify replication manager.
          GridCacheContext drCacheCtx =
              cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;

          if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft());

          // Partition release future is done so we can flush the write-behind store.
          cacheCtx.store().forceFlush();

          // Process queued undeploys prior to sending/spreading map.
          cacheCtx.preloader().unwindUndeploys();

          GridDhtPartitionTopology top = cacheCtx.topology();

          assert topVer.equals(top.topologyVersion())
              : "Topology version is updated only in this class instances inside single ExchangeWorker thread.";

          top.beforeExchange(this);
        }

        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
          top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId()));

          top.beforeExchange(this);
        }
      } catch (IgniteInterruptedCheckedException e) {
        onDone(e);

        throw e;
      } catch (Throwable e) {
        U.error(
            log,
            "Failed to reinitialize local partitions (preloading will be stopped): " + exchId,
            e);

        onDone(e);

        if (e instanceof Error) throw (Error) e;

        return;
      }

      if (F.isEmpty(rmtIds)) {
        onDone(exchId.topologyVersion());

        return;
      }

      ready.set(true);

      initFut.onDone(true);

      if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

      // If this node is not oldest.
      if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions();
      else {
        boolean allReceived = allReceived();

        if (allReceived && replied.compareAndSet(false, true)) {
          if (spreadPartitions()) onDone(exchId.topologyVersion());
        }
      }

      scheduleRecheck();
    } else assert false : "Skipped init future: " + this;
  }
コード例 #28
0
  /** {@inheritDoc} */
  @Override
  public GridFuture<?> addData(Map<K, V> entries) throws IllegalStateException {
    A.notNull(entries, "entries");

    return addData(entries.entrySet());
  }
コード例 #29
0
  /**
   * Remove particular entry from the trash directory or subdirectory.
   *
   * @param parentId Parent ID.
   * @param id Entry id.
   * @throws IgniteCheckedException If delete failed for some reason.
   */
  private void deleteDirectory(IgniteUuid parentId, IgniteUuid id) throws IgniteCheckedException {
    assert parentId != null;
    assert id != null;

    while (true) {
      IgfsFileInfo info = meta.info(id);

      if (info != null) {
        assert info.isDirectory();

        Map<String, IgfsListingEntry> listing = info.listing();

        if (listing.isEmpty()) return; // Directory is empty.

        Map<String, IgfsListingEntry> delListing;

        if (listing.size() <= MAX_DELETE_BATCH) delListing = listing;
        else {
          delListing = new HashMap<>(MAX_DELETE_BATCH, 1.0f);

          int i = 0;

          for (Map.Entry<String, IgfsListingEntry> entry : listing.entrySet()) {
            delListing.put(entry.getKey(), entry.getValue());

            if (++i == MAX_DELETE_BATCH) break;
          }
        }

        GridCompoundFuture<Object, ?> fut = new GridCompoundFuture<>();

        // Delegate to child folders.
        for (IgfsListingEntry entry : delListing.values()) {
          if (!cancelled) {
            if (entry.isDirectory()) deleteDirectory(id, entry.fileId());
            else {
              IgfsFileInfo fileInfo = meta.info(entry.fileId());

              if (fileInfo != null) {
                assert fileInfo.isFile();

                fut.add(data.delete(fileInfo));
              }
            }
          } else return;
        }

        fut.markInitialized();

        // Wait for data cache to delete values before clearing meta cache.
        try {
          fut.get();
        } catch (IgniteFutureCancelledCheckedException ignore) {
          // This future can be cancelled only due to IGFS shutdown.
          cancelled = true;

          return;
        }

        // Actual delete of folder content.
        Collection<IgniteUuid> delIds = meta.delete(id, delListing);

        if (delListing == listing && delListing.size() == delIds.size())
          break; // All entries were deleted.
      } else break; // Entry was deleted concurrently.
    }
  }
コード例 #30
0
ファイル: Gossiper.java プロジェクト: Jashinta/570
 public Set<Entry<InetAddress, EndpointState>> getEndpointStates() {
   return endpointStateMap_.entrySet();
 }