Exemplo n.º 1
0
  /**
   * If my peer is responsible, I'll issue a put if absent to make sure all replicas are stored.
   *
   * @param locationKey The location key
   * @param domainKey The domain key
   * @param dataMapConverted The data to store
   * @return The future of the put
   */
  protected FutureDone<?> send(final Number160 locationKey) {
    int replicationFactor = replication.replicationFactor() - 1;
    List<PeerAddress> closePeers = new ArrayList<PeerAddress>();
    SortedSet<PeerStatistic> sortedSet =
        peer.peerBean().peerMap().closePeers(locationKey, replicationFactor);
    int count = 0;
    List<FutureDone<?>> retVal = new ArrayList<FutureDone<?>>(replicationFactor);
    for (PeerStatistic peerStatistic : sortedSet) {
      if (replication.rejectReplication(peerStatistic.peerAddress())) {
        continue;
      }

      count++;
      closePeers.add(peerStatistic.peerAddress());
      // this must be inside the loop as we need to retain the data for every peer

      Number640 min = new Number640(locationKey, Number160.ZERO, Number160.ZERO, Number160.ZERO);
      Number640 max =
          new Number640(locationKey, Number160.MAX_VALUE, Number160.MAX_VALUE, Number160.MAX_VALUE);
      final NavigableMap<Number640, Data> dataMap = peer.storageLayer().get(min, max, -1, true);

      retVal.add(replicationSender.sendDirect(peerStatistic.peerAddress(), locationKey, dataMap));
      if (count == replicationFactor) {
        break;
      }
    }
    LOG.debug(
        "[storage refresh] I ({}) restore {} to {}", peer.peerAddress(), locationKey, closePeers);
    return FutureDone.whenAll(retVal);
  }
Exemplo n.º 2
0
 public static Collection<PeerAddress> flatten(List<Map<Number160, PeerStatistic>> maps) {
   Collection<PeerAddress> result = new ArrayList<PeerAddress>();
   for (Map<Number160, PeerStatistic> map : maps) {
     for (PeerStatistic peerStatatistic : map.values()) {
       result.add(peerStatatistic.peerAddress());
     }
   }
   return result;
 }
Exemplo n.º 3
0
 @Override
 public void run() {
   synchronized (lock) {
     // make sure we only have 5 ping in parallel
     if (shutdown || COUNTER.get() > MAX_PING) {
       return;
     }
     for (Maintainable maintainable : maintainables) {
       PeerStatistic peerStatatistic = maintainable.nextForMaintenance(runningFutures.values());
       if (peerStatatistic == null) {
         continue;
       }
       BaseFuture future;
       if (peerStatatistic.isLocal()) {
         future = peer.localAnnounce().ping().peerAddress(peerStatatistic.peerAddress()).start();
         LOG.debug(
             "maintenance local ping from {} to {}",
             peer.peerAddress(),
             peerStatatistic.peerAddress());
       } else {
         future = peer.ping().peerAddress(peerStatatistic.peerAddress()).start();
         LOG.debug(
             "maintenance ping from {} to {}", peer.peerAddress(), peerStatatistic.peerAddress());
       }
       peer.notifyAutomaticFutures(future);
       runningFutures.put(future, peerStatatistic.peerAddress());
       COUNTER.incrementAndGet();
       future.addListener(
           new BaseFutureAdapter<BaseFuture>() {
             @Override
             public void operationComplete(BaseFuture future) throws Exception {
               synchronized (lock) {
                 runningFutures.remove(future);
                 COUNTER.decrementAndGet();
               }
             }
           });
     }
   }
 }