Ejemplo n.º 1
0
  public void cleanupCompletedTransactions() {
    if (!completedTransactions.isEmpty()) {
      try {
        log.tracef(
            "About to cleanup completed transaction. Initial size is %d",
            completedTransactions.size());
        // this iterator is weekly consistent and will never throw ConcurrentModificationException
        Iterator<Map.Entry<GlobalTransaction, Long>> iterator =
            completedTransactions.entrySet().iterator();
        long timeout = configuration.transaction().completedTxTimeout();

        int removedEntries = 0;
        long beginning = timeService.time();
        while (iterator.hasNext()) {
          Map.Entry<GlobalTransaction, Long> e = iterator.next();
          long ageMillis = timeService.timeDuration(e.getValue(), TimeUnit.MILLISECONDS);
          if (ageMillis >= timeout) {
            iterator.remove();
            removedEntries++;
          }
        }
        long duration = timeService.timeDuration(beginning, TimeUnit.MILLISECONDS);

        log.tracef(
            "Finished cleaning up completed transactions. %d transactions were removed, total duration was %d millis, "
                + "current number of completed transactions is %d",
            removedEntries, duration, completedTransactions.size());
      } catch (Exception e) {
        log.errorf(e, "Failed to cleanup completed transactions: %s", e.getMessage());
      }
    }
  }
Ejemplo n.º 2
0
  private void shutDownGracefully() {
    if (log.isDebugEnabled())
      log.debugf(
          "Wait for on-going transactions to finish for %s.",
          Util.prettyPrintTime(
              configuration.transaction().cacheStopTimeout(), TimeUnit.MILLISECONDS));
    long failTime = currentMillisFromNanotime() + configuration.transaction().cacheStopTimeout();
    boolean txsOnGoing = areTxsOnGoing();
    while (txsOnGoing && currentMillisFromNanotime() < failTime) {
      try {
        Thread.sleep(30);
        txsOnGoing = areTxsOnGoing();
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        if (clustered) {
          log.debugf(
              "Interrupted waiting for on-going transactions to finish. %s local transactions and %s remote transactions",
              localTransactions.size(), remoteTransactions.size());
        } else {
          log.debugf(
              "Interrupted waiting for %s on-going transactions to finish.",
              localTransactions.size());
        }
      }
    }

    if (txsOnGoing) {
      log.unfinishedTransactionsRemain(
          localTransactions == null ? 0 : localTransactions.size(),
          remoteTransactions == null ? 0 : remoteTransactions.size());
    } else {
      log.debug("All transactions terminated");
    }
  }
Ejemplo n.º 3
0
 /** Update metrics for alive/dead nodes. */
 private void setAliveDeadMetrics() {
   clusterManager.getMetrics().setAliveNodes(nameToNode.size());
   int totalHosts = hostsReader.getHosts().size();
   if (totalHosts > 0) {
     clusterManager.getMetrics().setDeadNodes(totalHosts - nameToNode.size());
   }
 }
Ejemplo n.º 4
0
  @Override
  public double metricValue() {
    // The metric value is calculated by taking the summation
    // of all entities and divide them by the number of entities
    // available in a dataset.
    double val = 0.0;
    for (Entity e : entityDirectory.values()) val += e.getBasicValue();

    return (entityDirectory.size() == 0) ? 0.0 : (val / (double) entityDirectory.size());
  }
Ejemplo n.º 5
0
 /** {@inheritDoc} */
 @Override
 public void printMemoryStats() {
   X.println(">>> ");
   X.println(">>> Mvcc manager memory stats [grid=" + cctx.gridName() + ']');
   X.println(">>>   rmvLocksSize: " + rmvLocks.size());
   X.println(">>>   dhtLocCandsSize: " + dhtLocCands.size());
   X.println(">>>   lockedSize: " + locked.size());
   X.println(">>>   futsSize: " + futs.size());
   X.println(">>>   near2dhtSize: " + near2dht.size());
   X.println(">>>   finishFutsSize: " + finishFuts.size());
 }
  @Test
  public void testSoftKeyWeakValue() throws InterruptedException {
    System.setProperty(FinalizeManager.class.getName() + ".thread.enabled", StringPool.FALSE);

    String testKey1 = new String("testKey1");
    String testKey2 = new String("testKey2");
    Object testValue1 = new Object();
    Object testValue2 = new Object();

    ConcurrentMap<String, Object> concurrentReferenceMap =
        new ConcurrentReferenceKeyHashMap<String, Object>(
            new ConcurrentReferenceValueHashMap<Reference<String>, Object>(
                FinalizeManager.WEAK_REFERENCE_FACTORY),
            FinalizeManager.SOFT_REFERENCE_FACTORY);

    Assert.assertNull(concurrentReferenceMap.put(testKey1, testValue1));
    Assert.assertNull(concurrentReferenceMap.put(testKey2, testValue2));
    Assert.assertEquals(2, concurrentReferenceMap.size());
    Assert.assertTrue(concurrentReferenceMap.containsKey(testKey1));
    Assert.assertTrue(concurrentReferenceMap.containsValue(testValue1));
    Assert.assertSame(testValue1, concurrentReferenceMap.get(testKey1));
    Assert.assertTrue(concurrentReferenceMap.containsKey(testKey2));
    Assert.assertTrue(concurrentReferenceMap.containsValue(testValue2));
    Assert.assertSame(testValue2, concurrentReferenceMap.get(testKey2));

    testKey1 = null;

    GCUtil.gc(true);

    ReflectionTestUtil.invoke(FinalizeManager.class, "_pollingCleanup", new Class<?>[0]);

    Assert.assertEquals(2, concurrentReferenceMap.size());
    Assert.assertTrue(concurrentReferenceMap.containsValue(testValue1));
    Assert.assertTrue(concurrentReferenceMap.containsKey(testKey2));
    Assert.assertTrue(concurrentReferenceMap.containsValue(testValue2));
    Assert.assertSame(testValue2, concurrentReferenceMap.get(testKey2));

    GCUtil.fullGC(true);

    ReflectionTestUtil.invoke(FinalizeManager.class, "_pollingCleanup", new Class<?>[0]);

    Assert.assertEquals(1, concurrentReferenceMap.size());
    Assert.assertTrue(concurrentReferenceMap.containsKey(testKey2));
    Assert.assertTrue(concurrentReferenceMap.containsValue(testValue2));
    Assert.assertSame(testValue2, concurrentReferenceMap.get(testKey2));

    testValue2 = null;

    GCUtil.gc(true);

    ReflectionTestUtil.invoke(FinalizeManager.class, "_pollingCleanup", new Class<?>[0]);

    Assert.assertTrue(concurrentReferenceMap.isEmpty());
  }
    private void doFinish() {
      if (finished.compareAndSet(false, true)) {
        Releasables.close(indexShardReference);
        final ShardId shardId = shardIt.shardId();
        final ActionWriteResponse.ShardInfo.Failure[] failuresArray;
        if (!shardReplicaFailures.isEmpty()) {
          int slot = 0;
          failuresArray = new ActionWriteResponse.ShardInfo.Failure[shardReplicaFailures.size()];
          for (Map.Entry<String, Throwable> entry : shardReplicaFailures.entrySet()) {
            RestStatus restStatus = ExceptionsHelper.status(entry.getValue());
            failuresArray[slot++] =
                new ActionWriteResponse.ShardInfo.Failure(
                    shardId.getIndex(),
                    shardId.getId(),
                    entry.getKey(),
                    entry.getValue(),
                    restStatus,
                    false);
          }
        } else {
          failuresArray = ActionWriteResponse.EMPTY;
        }
        finalResponse.setShardInfo(
            new ActionWriteResponse.ShardInfo(totalShards, success.get(), failuresArray));

        listener.onResponse(finalResponse);
      }
    }
Ejemplo n.º 8
0
  public Collection<CloneSet> detectCloneSets(final Collection<InstantCodeFragmentInfo> fragments) {
    final ConcurrentMap<Long, Set<InstantCodeFragmentInfo>> fragmentsCategorizedByHash =
        categorizeFragments(fragments);
    final Long[] keys = fragmentsCategorizedByHash.keySet().toArray(new Long[] {});
    final AtomicInteger index = new AtomicInteger(0);
    final CloneSetMakingThread[] makingThreads = new CloneSetMakingThread[threadsCount];
    final Thread[] threads = new Thread[threadsCount];
    final ConcurrentMap<Long, CloneSet> detectedSets = new ConcurrentHashMap<Long, CloneSet>();
    final ConcurrentMap<String, Set<CloneSet>> cloneSetsCategorizedByPath =
        new ConcurrentHashMap<String, Set<CloneSet>>();

    for (int i = 0; i < threadsCount; i++) {
      final CloneSetMakingThread makingThread =
          new CloneSetMakingThread(
              index, keys, fragmentsCategorizedByHash, detectedSets, cloneSetsCategorizedByPath);
      makingThreads[i] = makingThread;
      threads[i] = new Thread(makingThread);
      threads[i].start();
    }

    for (final Thread thread : threads) {
      try {
        thread.join();
      } catch (InterruptedException e) {
        e.printStackTrace();
      }
    }

    System.out.println("\t" + detectedSets.size() + " clone sets are detected");
    System.out.println();

    return refineSets(detectedSets, cloneSetsCategorizedByPath);
  }
Ejemplo n.º 9
0
  /* ------------------------------------------------------------ */
  private void shrinkCache() {
    // While we need to shrink
    while (_cache.size() > 0
        && (_cachedFiles.get() > _maxCachedFiles || _cachedSize.get() > _maxCacheSize)) {
      // Scan the entire cache and generate an ordered list by last accessed time.
      SortedSet<Content> sorted =
          new TreeSet<Content>(
              new Comparator<Content>() {
                public int compare(Content c1, Content c2) {
                  if (c1._lastAccessed < c2._lastAccessed) return -1;

                  if (c1._lastAccessed > c2._lastAccessed) return 1;

                  if (c1._length < c2._length) return -1;

                  return c1._key.compareTo(c2._key);
                }
              });
      for (Content content : _cache.values()) sorted.add(content);

      // Invalidate least recently used first
      for (Content content : sorted) {
        if (_cachedFiles.get() <= _maxCachedFiles && _cachedSize.get() <= _maxCacheSize) break;
        if (content == _cache.remove(content.getKey())) content.invalidate();
      }
    }
  }
  /** {@inheritDoc} */
  @Override
  public void printMemoryStats() {
    super.printMemoryStats();

    X.println(">>>   threadsSize: " + threads.size());
    X.println(">>>   futsSize: " + futs.size());
  }
Ejemplo n.º 11
0
  private void initStorageConfig(String configClassName) {
    // add the configurations of the storage engines needed by user stores
    try {
      Class<?> configClass = ReflectUtils.loadClass(configClassName);
      StorageConfiguration configuration =
          (StorageConfiguration)
              ReflectUtils.callConstructor(
                  configClass,
                  new Class<?>[] {VoldemortConfig.class},
                  new Object[] {voldemortConfig});
      logger.info("Initializing " + configuration.getType() + " storage engine.");
      storageConfigs.put(configuration.getType(), configuration);

      if (voldemortConfig.isJmxEnabled())
        JmxUtils.registerMbean(configuration.getType() + "StorageConfiguration", configuration);
    } catch (IllegalStateException e) {
      logger.error("Error loading storage configuration '" + configClassName + "'.", e);
    }

    if (storageConfigs.size() == 0)
      throw new ConfigurationException("No storage engine has been enabled!");

    // now, add the configurations of the storage engines needed by system
    // stores, if not yet exist
    initSystemStorageConfig();
  }
Ejemplo n.º 12
0
  public Runnable prepareMergeRunnable() {
    Map<MapContainer, Collection<Record>> recordMap =
        new HashMap<MapContainer, Collection<Record>>(mapContainers.size());
    InternalPartitionService partitionService = nodeEngine.getPartitionService();
    int partitionCount = partitionService.getPartitionCount();
    Address thisAddress = nodeEngine.getClusterService().getThisAddress();

    for (MapContainer mapContainer : mapContainers.values()) {
      for (int i = 0; i < partitionCount; i++) {
        RecordStore recordStore = getPartitionContainer(i).getRecordStore(mapContainer.getName());
        // add your owned entries to the map so they will be merged
        if (thisAddress.equals(partitionService.getPartitionOwner(i))) {
          Collection<Record> records = recordMap.get(mapContainer);
          if (records == null) {
            records = new ArrayList<Record>();
            recordMap.put(mapContainer, records);
          }
          records.addAll(recordStore.getReadonlyRecordMap().values());
        }
        // clear all records either owned or backup
        recordStore.reset();
      }
    }
    return new Merger(recordMap);
  }
Ejemplo n.º 13
0
 /**
  * Gets all the consumer endpoints.
  *
  * @return consumer endpoints
  */
 public static Collection<Endpoint> getConsumerEndpoints() {
   Collection<Endpoint> endpoints = new ArrayList<Endpoint>(CONSUMERS.size());
   for (DirectVmConsumer consumer : CONSUMERS.values()) {
     endpoints.add(consumer.getEndpoint());
   }
   return endpoints;
 }
Ejemplo n.º 14
0
 public String toString() {
     StringBuilder sb=new StringBuilder();
     int size=size();
     sb.append(size + " messages");
     if(size <= 100)
         sb.append(" in " + segments.size() + " segments");
     return sb.toString();
 }
Ejemplo n.º 15
0
 public Set<Map.Entry<Data, Data>> entrySetData() {
   checkIfLoaded();
   Map<Data, Data> temp = new HashMap<Data, Data>(records.size());
   for (Data key : records.keySet()) {
     temp.put(key, mapService.toData(records.get(key).getValue()));
   }
   return temp.entrySet();
 }
Ejemplo n.º 16
0
 public Set<Data> keySet() {
   checkIfLoaded();
   Set<Data> keySet = new HashSet<Data>(records.size());
   for (Data data : records.keySet()) {
     keySet.add(data);
   }
   return keySet;
 }
Ejemplo n.º 17
0
 public Collection<Data> valuesData() {
   checkIfLoaded();
   Collection<Data> values = new ArrayList<Data>(records.size());
   for (Record record : records.values()) {
     values.add(mapService.toData(record.getValue()));
   }
   return values;
 }
 @Override
 public Iterator<Entry> iterator() {
   final Set<Entry> entrySet = new HashSet<Entry>(candidatesAndInitialisers.size());
   for (final Map.Entry<FieldNode, Initialisers> entry : candidatesAndInitialisers.entrySet()) {
     entrySet.add(new DefaultEntry(entry.getKey(), entry.getValue()));
   }
   return entrySet.iterator();
 }
Ejemplo n.º 19
0
 public static int getRunningContainers() {
   int count = 0;
   for (int nmId = 0; nmId < NUM_NODEMANAGERS; nmId++) {
     NodeManager nm = yarnCluster.getNodeManager(nmId);
     ConcurrentMap<ContainerId, Container> containers = nm.getNMContext().getContainers();
     count += containers.size();
   }
   return count;
 }
 @Override
 public int size() {
   readLock.lock();
   try {
     return delegate.size();
   } finally {
     readLock.unlock();
   }
 }
 protected Integer registerCompletionLatch(Integer countDownLatchId, int count) {
   if (!syncListenerRegistrations.isEmpty()) {
     int size = syncListenerRegistrations.size();
     CountDownLatch countDownLatch = new CountDownLatch(count * size);
     syncLocks.put(countDownLatchId, countDownLatch);
     return countDownLatchId;
   }
   return MutableOperation.IGNORE_COMPLETION;
 }
Ejemplo n.º 22
0
  /**
   * We set the indication to true when _tasks map changes for logging _tasks status on next quartz
   * execution.
   *
   * @param asyncTaskMap - Map to copy to _tasks map.
   */
  private void setNewMap(ConcurrentMap<Guid, SPMTask> asyncTaskMap) {
    // If not the same set _tasks to be as asyncTaskMap.
    _tasks = asyncTaskMap;

    // Set the indication to true for logging.
    logChangedMap = true;

    // Log tasks to poll now.
    log.info("Setting new tasks map. The map contains now {} tasks", _tasks.size());
  }
Ejemplo n.º 23
0
  public void cleanupStaleTransactions(CacheTopology cacheTopology) {
    int topologyId = cacheTopology.getTopologyId();
    List<Address> members = cacheTopology.getMembers();

    // We only care about transactions originated before this topology update
    if (getMinTopologyId() >= topologyId) return;

    log.tracef(
        "Checking for transactions originated on leavers. Current members are %s, remote transactions: %d",
        members, remoteTransactions.size());
    Set<GlobalTransaction> toKill = new HashSet<GlobalTransaction>();
    for (Map.Entry<GlobalTransaction, RemoteTransaction> e : remoteTransactions.entrySet()) {
      GlobalTransaction gt = e.getKey();
      RemoteTransaction remoteTx = e.getValue();
      log.tracef("Checking transaction %s", gt);
      // The topology id check is needed for joiners
      if (remoteTx.getTopologyId() < topologyId && !members.contains(gt.getAddress())) {
        toKill.add(gt);
      }
    }

    if (toKill.isEmpty()) {
      log.tracef("No global transactions pertain to originator(s) who have left the cluster.");
    } else {
      log.tracef("%s global transactions pertain to leavers and need to be killed", toKill.size());
    }

    for (GlobalTransaction gtx : toKill) {
      log.tracef("Killing remote transaction originating on leaver %s", gtx);
      RollbackCommand rc = new RollbackCommand(cacheName, gtx);
      rc.init(invoker, icc, TransactionTable.this);
      try {
        rc.perform(null);
        log.tracef("Rollback of transaction %s complete.", gtx);
      } catch (Throwable e) {
        log.unableToRollbackGlobalTx(gtx, e);
      }
    }

    log.tracef(
        "Completed cleaning transactions originating on leavers. Remote transactions remaining: %d",
        remoteTransactions.size());
  }
Ejemplo n.º 24
0
 public void copyUserDataTo(UserDataHolderBase other) {
   ConcurrentMap<Key, Object> map = myUserMap;
   if (map == null) {
     other.myUserMap = null;
   } else {
     ConcurrentMap<Key, Object> fresh = createDataMap(map.size());
     fresh.putAll(map);
     other.myUserMap = fresh;
   }
 }
Ejemplo n.º 25
0
 public ClonePairRefiningThread(
     final AtomicLong index,
     final ConcurrentMap<Long, ClonePair> clonePairs,
     final ConcurrentMap<String, Set<ClonePair>> clonePairsCategorizedByPath) {
   this.index = index;
   this.clonePairs = clonePairs;
   this.clonePairsCategorizedByPath = clonePairsCategorizedByPath;
   this.subsumedClonePairs = new HashSet<Long>();
   this.length = clonePairs.size();
 }
Ejemplo n.º 26
0
 /* ------------------------------------------------------------ */
 public void flushCache() {
   if (_cache != null) {
     while (_cache.size() > 0) {
       for (String path : _cache.keySet()) {
         Content content = _cache.remove(path);
         if (content != null) content.invalidate();
       }
     }
   }
 }
 public void removeRecordIndex(Long oldValue, Long recordId) {
   ConcurrentMap<Long, Record> records = mapRecords.get(oldValue);
   if (records != null) {
     records.remove(recordId);
     if (records.size() == 0) {
       mapRecords.remove(oldValue);
       sortedSet.remove(oldValue);
     }
   }
 }
Ejemplo n.º 28
0
  public void cleanupLeaverTransactions(List<Address> members) {
    // Can happen if the cache is non-transactional
    if (remoteTransactions == null) return;

    if (trace)
      log.tracef(
          "Checking for transactions originated on leavers. Current cache members are %s, remote transactions: %d",
          members, remoteTransactions.size());
    HashSet<Address> membersSet = new HashSet<>(members);
    List<GlobalTransaction> toKill = new ArrayList<>();
    for (Map.Entry<GlobalTransaction, RemoteTransaction> e : remoteTransactions.entrySet()) {
      GlobalTransaction gt = e.getKey();
      if (trace) log.tracef("Checking transaction %s", gt);
      if (!membersSet.contains(gt.getAddress())) {
        toKill.add(gt);
      }
    }

    if (toKill.isEmpty()) {
      if (trace)
        log.tracef("No remote transactions pertain to originator(s) who have left the cluster.");
    } else {
      log.debugf("The originating node left the cluster for %d remote transactions", toKill.size());
      for (GlobalTransaction gtx : toKill) {
        if (partitionHandlingManager.canRollbackTransactionAfterOriginatorLeave(gtx)) {
          log.debugf(
              "Rolling back transaction %s because originator %s left the cluster",
              gtx, gtx.getAddress());
          killTransaction(gtx);
        } else {
          log.debugf(
              "Keeping transaction %s after the originator %s left the cluster.",
              gtx, gtx.getAddress());
        }
      }

      if (trace)
        log.tracef(
            "Completed cleaning transactions originating on leavers. Remote transactions remaining: %d",
            remoteTransactions.size());
    }
  }
  public FileElement[] getKnownTreeRoots() {
    List<FileElement> files = new ArrayList<FileElement>(myRoots.size());
    for (PsiFile file : myRoots.values()) {
      final FileElement treeElement = ((PsiFileImpl) file).getTreeElement();
      if (treeElement != null) {
        files.add(treeElement);
      }
    }

    return files.toArray(new FileElement[files.size()]);
  }
Ejemplo n.º 30
0
 @Override
 public String toString() {
   return String.format(
       "%s@%x{v%d,queueSize=%d,windowSize=%d,streams=%d}",
       getClass().getSimpleName(),
       hashCode(),
       version,
       queue.size(),
       getWindowSize(),
       streams.size());
 }