private void addPlayersToSaveTransaction(
      SaveTransactionBuilder saveTransactionBuilder, NetworkSystem networkSystem) {
    unloadedAndSavingPlayerMap.clear();
    /**
     * New entries might be added concurrently. By using putAll + clear to transfer entries we might
     * loose new ones added in between putAll and clear. By iterating we can make sure that all
     * entities removed from unloadedAndUnsavedPlayerMap get added to unloadedAndSavingPlayerMap.
     */
    Iterator<Map.Entry<String, EntityData.PlayerStore>> unsavedEntryIterator =
        unloadedAndUnsavedPlayerMap.entrySet().iterator();
    while (unsavedEntryIterator.hasNext()) {
      Map.Entry<String, EntityData.PlayerStore> entry = unsavedEntryIterator.next();
      unloadedAndSavingPlayerMap.put(entry.getKey(), entry.getValue());
      unsavedEntryIterator.remove();
    }

    for (Client client : networkSystem.getPlayers()) {
      // If there is a newer undisposed version of the player,we don't need to save the disposed
      // version:
      unloadedAndSavingPlayerMap.remove(client.getId());
      EntityRef character = client.getEntity().getComponent(ClientComponent.class).character;
      saveTransactionBuilder.addLoadedPlayer(client.getId(), createPlayerStore(client, character));
    }

    for (Map.Entry<String, EntityData.PlayerStore> entry : unloadedAndSavingPlayerMap.entrySet()) {
      saveTransactionBuilder.addUnloadedPlayer(entry.getKey(), entry.getValue());
    }
  }
  private void addChunksToSaveTransaction(
      SaveTransactionBuilder saveTransactionBuilder, ChunkProvider chunkProvider) {
    unloadedAndSavingChunkMap.clear();
    /**
     * New entries might be added concurrently. By using putAll + clear to transfer entries we might
     * loose new ones added in between putAll and clear. Bz iterating we can make sure that all
     * entires removed from unloadedAndUnsavedChunkMap get added to unloadedAndSavingChunkMap.
     */
    Iterator<Map.Entry<Vector3i, CompressedChunkBuilder>> unsavedEntryIterator =
        unloadedAndUnsavedChunkMap.entrySet().iterator();
    while (unsavedEntryIterator.hasNext()) {
      Map.Entry<Vector3i, CompressedChunkBuilder> entry = unsavedEntryIterator.next();
      unloadedAndSavingChunkMap.put(entry.getKey(), entry.getValue());
      unsavedEntryIterator.remove();
    }

    chunkProvider
        .getAllChunks()
        .stream()
        .filter(ManagedChunk::isReady)
        .forEach(
            chunk -> {
              // If there is a newer undisposed version of the chunk,we don't need to save the
              // disposed version:
              unloadedAndSavingChunkMap.remove(chunk.getPosition());
              ChunkImpl chunkImpl =
                  (ChunkImpl) chunk; // this storage manager can only work with ChunkImpls
              saveTransactionBuilder.addLoadedChunk(chunk.getPosition(), chunkImpl);
            });

    for (Map.Entry<Vector3i, CompressedChunkBuilder> entry : unloadedAndSavingChunkMap.entrySet()) {
      saveTransactionBuilder.addUnloadedChunk(entry.getKey(), entry.getValue());
    }
  }
Пример #3
0
  @ManagedOperation(description = "Dumps all locks")
  public String printLocks() {
    StringBuilder sb = new StringBuilder();
    sb.append("server locks:\n");
    for (Map.Entry<String, ServerLock> entry : server_locks.entrySet()) {
      sb.append(entry.getKey()).append(": ").append(entry.getValue()).append("\n");
    }

    sb.append("\nmy locks: ");
    boolean first_element = true;
    for (Map.Entry<String, Map<Owner, ClientLock>> entry : client_locks.entrySet()) {
      if (first_element) first_element = false;
      else sb.append(", ");
      sb.append(entry.getKey()).append(" (");
      Map<Owner, ClientLock> owners = entry.getValue();
      boolean first = true;
      for (Map.Entry<Owner, ClientLock> entry2 : owners.entrySet()) {
        if (first) first = false;
        else sb.append(", ");
        sb.append(entry2.getKey());
        ClientLock cl = entry2.getValue();
        if (!cl.acquired || cl.denied) sb.append(", unlocked");
      }
      sb.append(")");
    }
    return sb.toString();
  }
    private void doFinish() {
      if (finished.compareAndSet(false, true)) {
        Releasables.close(indexShardReference);
        final ShardId shardId = shardIt.shardId();
        final ActionWriteResponse.ShardInfo.Failure[] failuresArray;
        if (!shardReplicaFailures.isEmpty()) {
          int slot = 0;
          failuresArray = new ActionWriteResponse.ShardInfo.Failure[shardReplicaFailures.size()];
          for (Map.Entry<String, Throwable> entry : shardReplicaFailures.entrySet()) {
            RestStatus restStatus = ExceptionsHelper.status(entry.getValue());
            failuresArray[slot++] =
                new ActionWriteResponse.ShardInfo.Failure(
                    shardId.getIndex(),
                    shardId.getId(),
                    entry.getKey(),
                    entry.getValue(),
                    restStatus,
                    false);
          }
        } else {
          failuresArray = ActionWriteResponse.EMPTY;
        }
        finalResponse.setShardInfo(
            new ActionWriteResponse.ShardInfo(totalShards, success.get(), failuresArray));

        listener.onResponse(finalResponse);
      }
    }
  private void finishSynchronization(String liveID) throws Exception {
    for (JournalContent jc : EnumSet.allOf(JournalContent.class)) {
      Journal journal = journalsHolder.remove(jc);
      journal.synchronizationLock();
      try {
        // files should be already in place.
        filesReservedForSync.remove(jc);
        registerJournal(jc.typeByte, journal);
        journal.stop();
        journal.start();
        journal.loadSyncOnly(JournalState.SYNCING_UP_TO_DATE);
      } finally {
        journal.synchronizationUnlock();
      }
    }
    ByteBuffer buffer = ByteBuffer.allocate(4 * 1024);
    for (Entry<Long, ReplicatedLargeMessage> entry : largeMessages.entrySet()) {
      ReplicatedLargeMessage lm = entry.getValue();
      if (lm instanceof LargeServerMessageInSync) {
        LargeServerMessageInSync lmSync = (LargeServerMessageInSync) lm;
        lmSync.joinSyncedData(buffer);
      }
    }

    journalsHolder = null;
    backupQuorum.liveIDSet(liveID);
    activation.setRemoteBackupUpToDate();
    ActiveMQServerLogger.LOGGER.backupServerSynched(server);
    return;
  }
Пример #6
0
  public void cleanupCompletedTransactions() {
    if (!completedTransactions.isEmpty()) {
      try {
        log.tracef(
            "About to cleanup completed transaction. Initial size is %d",
            completedTransactions.size());
        // this iterator is weekly consistent and will never throw ConcurrentModificationException
        Iterator<Map.Entry<GlobalTransaction, Long>> iterator =
            completedTransactions.entrySet().iterator();
        long timeout = configuration.transaction().completedTxTimeout();

        int removedEntries = 0;
        long beginning = timeService.time();
        while (iterator.hasNext()) {
          Map.Entry<GlobalTransaction, Long> e = iterator.next();
          long ageMillis = timeService.timeDuration(e.getValue(), TimeUnit.MILLISECONDS);
          if (ageMillis >= timeout) {
            iterator.remove();
            removedEntries++;
          }
        }
        long duration = timeService.timeDuration(beginning, TimeUnit.MILLISECONDS);

        log.tracef(
            "Finished cleaning up completed transactions. %d transactions were removed, total duration was %d millis, "
                + "current number of completed transactions is %d",
            removedEntries, duration, completedTransactions.size());
      } catch (Exception e) {
        log.errorf(e, "Failed to cleanup completed transactions: %s", e.getMessage());
      }
    }
  }
Пример #7
0
  @ManagedOperation(
      description =
          "Sends a STABLE message to all senders. This causes message purging and potential"
              + " retransmissions from senders")
  public void sendStableMessages() {
    for (Map.Entry<Address, ReceiverEntry> entry : recv_table.entrySet()) {
      Address dest = entry.getKey();
      ReceiverEntry val = entry.getValue();
      NakReceiverWindow win = val != null ? val.received_msgs : null;
      if (win != null) {
        long[] tmp = win.getDigest();
        long low = tmp[0], high = tmp[1];

        if (val.last_highest == high) {
          if (val.num_stable_msgs >= val.max_stable_msgs) {
            continue;
          } else val.num_stable_msgs++;
        } else {
          val.last_highest = high;
          val.num_stable_msgs = 1;
        }
        sendStableMessage(dest, val.recv_conn_id, low, high);
      }
    }
  }
Пример #8
0
 /**
  * Get any runnable node that is not one of the excluded nodes
  *
  * @param excluded the list of nodes to ignore
  * @return the runnable node, null if no runnable node can be found
  */
 public ClusterNode getRunnableNodeForAny(Set<String> excluded) {
   double avgLoad = loadManager.getAverageLoad(type);
   // Make two passes over the nodes. In the first pass, try to find a
   // node that has lower than average number of grants on it. If that does
   // not find a node, try looking at all nodes.
   for (int pass = 0; pass < 2; pass++) {
     for (Map.Entry<String, NodeContainer> e : hostToRunnableNodes.entrySet()) {
       NodeContainer nodeContainer = e.getValue();
       if (nodeContainer == null) {
         continue;
       }
       synchronized (nodeContainer) {
         if (nodeContainer.isEmpty()) {
           continue;
         }
         for (ClusterNode node : nodeContainer) {
           if (excluded == null || !excluded.contains(node.getHost())) {
             if (resourceLimit.hasEnoughResource(node)) {
               // When pass == 0, try to average out the load.
               if (pass == 0) {
                 if (node.getGrantCount(type) < avgLoad) {
                   return node;
                 }
               } else {
                 return node;
               }
             }
           }
         }
       }
     }
   }
   return null;
 }
  /**
   * Removes our metadata of all executors registered for the given application, and optionally also
   * deletes the local directories associated with the executors of that application in a separate
   * thread.
   *
   * <p>It is not valid to call registerExecutor() for an executor with this appId after invoking
   * this method.
   */
  public void applicationRemoved(String appId, boolean cleanupLocalDirs) {
    logger.info("Application {} removed, cleanupLocalDirs = {}", appId, cleanupLocalDirs);
    Iterator<Map.Entry<AppExecId, ExecutorShuffleInfo>> it = executors.entrySet().iterator();
    while (it.hasNext()) {
      Map.Entry<AppExecId, ExecutorShuffleInfo> entry = it.next();
      AppExecId fullId = entry.getKey();
      final ExecutorShuffleInfo executor = entry.getValue();

      // Only touch executors associated with the appId that was removed.
      if (appId.equals(fullId.appId)) {
        it.remove();
        if (db != null) {
          try {
            db.delete(dbAppExecKey(fullId));
          } catch (IOException e) {
            logger.error("Error deleting {} from executor state db", appId, e);
          }
        }

        if (cleanupLocalDirs) {
          logger.info("Cleaning up executor {}'s {} local dirs", fullId, executor.localDirs.length);

          // Execute the actual deletion in a different thread, as it may take some time.
          directoryCleaner.execute(
              new Runnable() {
                @Override
                public void run() {
                  deleteExecutorDirs(executor.localDirs);
                }
              });
        }
      }
    }
  }
  @SuppressWarnings("unchecked")
  protected void internalRemoveFromClusterNodeLabels(Collection<String> labelsToRemove) {
    // remove labels from nodes
    for (Map.Entry<String, Host> nodeEntry : nodeCollections.entrySet()) {
      Host host = nodeEntry.getValue();
      if (null != host) {
        host.labels.removeAll(labelsToRemove);
        for (Node nm : host.nms.values()) {
          if (nm.labels != null) {
            nm.labels.removeAll(labelsToRemove);
          }
        }
      }
    }

    // remove labels from node labels collection
    for (String label : labelsToRemove) {
      labelCollections.remove(label);
    }

    // create event to remove labels
    if (null != dispatcher) {
      dispatcher.getEventHandler().handle(new RemoveClusterNodeLabels(labelsToRemove));
    }

    LOG.info("Remove labels: [" + StringUtils.join(labelsToRemove.iterator(), ",") + "]");
  }
Пример #11
0
  public void cleanupTimedOutTransactions() {
    if (trace)
      log.tracef(
          "About to cleanup remote transactions older than %d ms",
          configuration.transaction().completedTxTimeout());
    long beginning = timeService.time();
    long cutoffCreationTime =
        beginning - TimeUnit.MILLISECONDS.toNanos(configuration.transaction().completedTxTimeout());
    List<GlobalTransaction> toKill = new ArrayList<>();

    // Check remote transactions.
    for (Map.Entry<GlobalTransaction, RemoteTransaction> e : remoteTransactions.entrySet()) {
      GlobalTransaction gtx = e.getKey();
      RemoteTransaction remoteTx = e.getValue();
      if (remoteTx != null) {
        if (trace) log.tracef("Checking transaction %s", gtx);
        // Check the time.
        if (remoteTx.getCreationTime() - cutoffCreationTime < 0) {
          long duration =
              timeService.timeDuration(
                  remoteTx.getCreationTime(), beginning, TimeUnit.MILLISECONDS);
          log.remoteTransactionTimeout(gtx, duration);
          toKill.add(gtx);
        }
      }
    }

    // Rollback the orphaned transactions and release any held locks.
    for (GlobalTransaction gtx : toKill) {
      killTransaction(gtx);
    }
  }
Пример #12
0
 @ManagedOperation(description = "Compacts the retransmission tables")
 public void compact() {
   for (Map.Entry<Address, ReceiverEntry> entry : recv_table.entrySet()) {
     NakReceiverWindow win = entry.getValue().received_msgs;
     win.compact();
   }
 }
Пример #13
0
    public void prepare(String query, InetAddress toExclude) throws InterruptedException {
      for (Map.Entry<Host, HostConnectionPool> entry : pools.entrySet()) {
        if (entry.getKey().getAddress().equals(toExclude)) continue;

        // Let's not wait too long if we can't get a connection. Things
        // will fix themselves once the user tries a query anyway.
        Connection c = null;
        try {
          c = entry.getValue().borrowConnection(200, TimeUnit.MILLISECONDS);
          c.write(new PrepareMessage(query)).get();
        } catch (ConnectionException e) {
          // Again, not being able to prepare the query right now is no big deal, so just ignore
        } catch (BusyConnectionException e) {
          // Same as above
        } catch (TimeoutException e) {
          // Same as above
        } catch (ExecutionException e) {
          // We shouldn't really get exception while preparing a
          // query, so log this (but ignore otherwise as it's not a big deal)
          logger.error(
              String.format(
                  "Unexpected error while preparing query (%s) on %s", query, entry.getKey()),
              e);
        } finally {
          if (c != null) entry.getValue().returnConnection(c);
        }
      }
    }
Пример #14
0
  /**
   * This method writes the state of the NodeManager to disk
   *
   * @param jsonGenerator The instance of JsonGenerator, which will be used to write JSON to disk
   * @throws IOException
   */
  public void write(JsonGenerator jsonGenerator) throws IOException {
    jsonGenerator.writeStartObject();

    // nameToNode begins
    jsonGenerator.writeFieldName("nameToNode");
    jsonGenerator.writeStartObject();
    for (Map.Entry<String, ClusterNode> entry : nameToNode.entrySet()) {
      jsonGenerator.writeFieldName(entry.getKey());
      entry.getValue().write(jsonGenerator);
    }
    jsonGenerator.writeEndObject();
    // nameToNode ends

    // hostsToSessions begins
    // We create a new Map of type <ClusterNode.name, Set<SessionIds>>.
    // The original hostsToSessions map has the ClusterNode as its key, and
    // we do not need to persist the entire ClusterNode again, since we have
    // already done that with nameToNode.
    Map<String, Set<String>> hostsToSessionsMap = new HashMap<String, Set<String>>();
    for (Map.Entry<ClusterNode, Set<String>> entry : hostsToSessions.entrySet()) {
      hostsToSessionsMap.put(entry.getKey().getName(), entry.getValue());
    }
    jsonGenerator.writeObjectField("hostsToSessions", hostsToSessionsMap);
    // hostsToSessions ends

    jsonGenerator.writeObjectField("nameToApps", nameToApps);

    // faultManager is not required

    // We can rebuild the loadManager
    jsonGenerator.writeEndObject();
  }
Пример #15
0
 public Set<Map.Entry<K, V>> entrySet() {
   Set<Map.Entry<K, V>> entries = new HashSet<>();
   for (Map.Entry<K, V> entry : map.entrySet()) {
     entries.add(new Entry<>(entry));
   }
   return entries;
 }
Пример #16
0
  public void set(Map<String, Sink> newSinkMap) {
    try {
      for (Map.Entry<String, Sink> sink : sinkMap.entrySet()) {
        if (!newSinkMap.containsKey(sink.getKey())) { // removed
          Sink removedSink = sinkMap.remove(sink.getKey());
          if (removedSink != null) {
            log.info(String.format("Removing sink '%s'", sink.getKey()));
            removedSink.close();
          }
        }
      }

      for (Map.Entry<String, Sink> sink : newSinkMap.entrySet()) {
        if (!sinkMap.containsKey(sink.getKey())) { // added
          log.info(String.format("Adding sink '%s'", sink.getKey()));
          sink.getValue().open();
          sinkMap.put(sink.getKey(), sink.getValue());
        }
      }

    } catch (Exception e) {
      log.error("Exception on building SinkManager: " + e.getMessage(), e);
      if (sinkMap.isEmpty()) {
        throw new RuntimeException("At least one sink is needed");
      }
    }
  }
Пример #17
0
 public String getId(ConnectorOutputTableHandle tableHandle) {
   for (Entry<String, ConnectorOutputHandleResolver> entry : handleIdResolvers.entrySet()) {
     if (entry.getValue().canHandle(tableHandle)) {
       return entry.getKey();
     }
   }
   throw new IllegalArgumentException("No connector for output table handle: " + tableHandle);
 }
Пример #18
0
  /** @param reconnectFut Reconnect future. */
  public void onDisconnected(IgniteFuture<?> reconnectFut) {
    CacheException err =
        new CacheException(
            "Query was cancelled, client node disconnected.",
            new IgniteClientDisconnectedException(reconnectFut, "Client node disconnected."));

    for (Map.Entry<Long, QueryRun> e : runs.entrySet()) e.getValue().disconnected(err);
  }
Пример #19
0
 /**
  * Gets component information for an instance.
  *
  * @param instance the instance to find info for, not null
  * @return the component information, not null
  * @throws IllegalArgumentException if no component is available
  */
 public ComponentInfo getInfo(final Object instance) {
   for (final Entry<ComponentKey, Object> entry : _instanceMap.entrySet()) {
     if (entry.getValue() == instance) {
       return getInfo(entry.getKey().getType(), entry.getKey().getClassifier());
     }
   }
   throw new IllegalArgumentException("Unknown component instance: " + instance);
 }
 @Override
 public Iterator<Entry> iterator() {
   final Set<Entry> entrySet = new HashSet<Entry>(candidatesAndInitialisers.size());
   for (final Map.Entry<FieldNode, Initialisers> entry : candidatesAndInitialisers.entrySet()) {
     entrySet.add(new DefaultEntry(entry.getKey(), entry.getValue()));
   }
   return entrySet.iterator();
 }
  private Map<String, Integer> getValues(ConcurrentMap<String, AtomicInteger> src) {
    Map<String, Integer> map = new HashMap<String, Integer>();
    for (Map.Entry<String, AtomicInteger> entry : src.entrySet()) {
      map.put(entry.getKey(), entry.getValue().get());
    }

    return map;
  }
 @Override
 public Set<Entry<TransformerKey, Transformer>> entrySet() {
   Set<Entry<TransformerKey, Transformer>> answer =
       new LinkedHashSet<Entry<TransformerKey, Transformer>>();
   answer.addAll(staticMap.entrySet());
   answer.addAll(super.entrySet());
   return answer;
 }
Пример #23
0
 private ConnectorMetadataEntry lookupConnectorFor(OutputTableHandle tableHandle) {
   for (Entry<String, ConnectorMetadataEntry> entry : connectors.entrySet()) {
     if (entry.getValue().getMetadata().canHandle(tableHandle)) {
       return entry.getValue();
     }
   }
   throw new IllegalArgumentException("No connector for output table handle: " + tableHandle);
 }
Пример #24
0
 public String getId(ConnectorIndexHandle indexHandle) {
   for (Entry<String, ConnectorHandleResolver> entry : handleIdResolvers.entrySet()) {
     if (entry.getValue().canHandle(indexHandle)) {
       return entry.getKey();
     }
   }
   throw new IllegalArgumentException("No connector for index handle: " + indexHandle);
 }
Пример #25
0
 public String getId(ConnectorSplit split) {
   for (Entry<String, ConnectorHandleResolver> entry : handleIdResolvers.entrySet()) {
     if (entry.getValue().canHandle(split)) {
       return entry.getKey();
     }
   }
   throw new IllegalArgumentException("No connector for split: " + split);
 }
Пример #26
0
 public String getId(ConnectorTableLayoutHandle handle) {
   if (handle instanceof LegacyTableLayoutHandle) {
     LegacyTableLayoutHandle legacyHandle = (LegacyTableLayoutHandle) handle;
     for (Entry<String, ConnectorHandleResolver> entry : handleIdResolvers.entrySet()) {
       if (entry.getValue().canHandle(legacyHandle.getTable())) {
         return entry.getKey();
       }
     }
   } else {
     for (Entry<String, ConnectorHandleResolver> entry : handleIdResolvers.entrySet()) {
       if (entry.getValue().canHandle(handle)) {
         return entry.getKey();
       }
     }
   }
   throw new IllegalArgumentException("No connector for table handle: " + handle);
 }
Пример #27
0
  public String reportSinkStat() {
    StringBuilder sb = new StringBuilder();
    for (Map.Entry<String, Sink> entry : sinkMap.entrySet()) {
      sb.append(entry.getKey()).append(':').append(entry.getValue().getStat()).append("\n\n");
    }

    return sb.toString();
  }
 @Override
 public Operation getPostJoinOperation() {
   PostJoinCacheOperation postJoinCacheOperation = new PostJoinCacheOperation();
   for (Map.Entry<String, CacheConfig> cacheConfigEntry : configs.entrySet()) {
     postJoinCacheOperation.addCacheConfig(cacheConfigEntry.getValue());
   }
   return postJoinCacheOperation;
 }
Пример #29
0
 @PreDestroy
 public void shutdown() {
   log.info("SinkManager shutting down");
   for (Map.Entry<String, Sink> entry : sinkMap.entrySet()) {
     entry.getValue().close();
   }
   sinkMap.clear();
 }
Пример #30
0
 @Override
 public Map<String, String> getCatalogNames() {
   ImmutableMap.Builder<String, String> catalogsMap = ImmutableMap.builder();
   for (Map.Entry<String, ConnectorMetadataEntry> entry : connectors.entrySet()) {
     catalogsMap.put(entry.getKey(), entry.getValue().getConnectorId());
   }
   return catalogsMap.build();
 }