private boolean registerSmelted(SmeltedRecipe recipe) {
    boolean failed = false;
    Plugin plugin = recipe.getPlugin();
    if (plugin != null) {
      ConcurrentHashMap<Integer, Set<SmeltedRecipe>> recipesMap =
          (ConcurrentHashMap<Integer, Set<SmeltedRecipe>>) registeredSmeltedRecipes.get(plugin);
      if (recipesMap == null) {
        recipesMap = new ConcurrentHashMap<Integer, Set<SmeltedRecipe>>();
        registeredSmeltedRecipes.put(plugin, recipesMap);
      }
      if (recipesMap.get(recipe.getIngredients().size()) == null) {
        Set<SmeltedRecipe> recipes =
            Collections.newSetFromMap(new ConcurrentHashMap<SmeltedRecipe, Boolean>());
        registeredSmeltedRecipes.get(plugin).put(recipe.getIngredients().size(), recipes);
      }
      failed =
          !registeredSmeltedRecipes.get(plugin).get(recipe.getIngredients().size()).add(recipe)
              || failed;
    }

    if (allSmeltedRecipes.get(recipe.getIngredients().size()) == null) {
      Set<SmeltedRecipe> recipes =
          Collections.newSetFromMap(new ConcurrentHashMap<SmeltedRecipe, Boolean>());
      allSmeltedRecipes.put(recipe.getIngredients().size(), recipes);
    }
    failed = !allSmeltedRecipes.get(recipe.getIngredients().size()).add(recipe) || failed;
    return !failed;
  }
Example #2
0
  @Override
  public void subscribe(
      String orgDomain,
      String sessionId,
      int processId,
      String callback,
      Map<String, Object> options) {
    Set<String> sessions = orgSessionMap.get(orgDomain);
    if (sessions == null)
      orgSessionMap.putIfAbsent(
          orgDomain, Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>()));

    Set<Binding> bindings = pushBindingsMap.get(sessionId);
    if (bindings == null)
      pushBindingsMap.putIfAbsent(
          callback, Collections.newSetFromMap(new ConcurrentHashMap<Binding, Boolean>()));

    sessions = orgSessionMap.get(orgDomain);
    sessions.add(sessionId);

    bindings = pushBindingsMap.get(callback);
    Binding binding = new Binding(sessionId, processId);
    bindings.add(binding);

    PushCondition condition =
        new PushCondition(orgDomain, binding.sessionId, binding.processId, callback, options);
    pushConditions.put(condition.getKey(), condition);

    logger.trace(
        "kraken msgbus: subscribe push, org [{}], session [{}], callback [{}]",
        new Object[] {orgDomain, sessionId, callback});
  }
  @Test
  public void testCheck() {
    Random rnd = random();
    Set<Object> jdk = Collections.newSetFromMap(new IdentityHashMap<Object, Boolean>());
    RamUsageEstimator.IdentityHashSet<Object> us = new RamUsageEstimator.IdentityHashSet<Object>();

    int max = 100000;
    int threshold = 256;
    for (int i = 0; i < max; i++) {
      // some of these will be interned and some will not so there will be collisions.
      Integer v = rnd.nextInt(threshold);

      boolean e1 = jdk.contains(v);
      boolean e2 = us.contains(v);
      Assert.assertEquals(e1, e2);

      e1 = jdk.add(v);
      e2 = us.add(v);
      Assert.assertEquals(e1, e2);
    }

    Set<Object> collected = Collections.newSetFromMap(new IdentityHashMap<Object, Boolean>());
    for (Object o : us) {
      collected.add(o);
    }

    Assert.assertEquals(collected, jdk);
  }
 private BlockPoolSliceStorage() {
   super(NodeType.DATA_NODE);
   storagesWithRollingUpgradeMarker =
       Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
   storagesWithoutRollingUpgradeMarker =
       Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
 }
Example #5
0
  /**
   * Constructs a new game.
   *
   * @param web <code>true</code> if this game is meant to be an applet (which can be played
   *     online), and <code>false</code> otherwise. Note: this doesn't work yet.
   */
  public Game() {
    Game.applet = false;
    canvas = new Canvas(this);
    solidShapes = Collections.newSetFromMap(new ConcurrentHashMap<Shape, Boolean>());
    allShapes = Collections.newSetFromMap(new ConcurrentHashMap<Shape, Boolean>());

    // TODO: sort out which data structures actually have to support concurrency
    layerContents = new ConcurrentHashMap<Integer, java.util.List<Shape>>();
    layers = new CopyOnWriteArrayList<Integer>();
    layerOf = new ConcurrentHashMap<Shape, Integer>();

    counters = new ArrayList<Counter>();

    Mouse mouse = new Mouse();
    if (applet) {
      addMouseMotionListener(mouse);
      addMouseListener(mouse);
      addKeyListener(new Keyboard());
    } else {
      frame = new JFrame();
      frame.addMouseMotionListener(mouse);
      frame.addMouseListener(mouse);
      frame.addKeyListener(new Keyboard());
      frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
    }

    setDefaults();
  }
 BlockPoolSliceStorage(int namespaceID, String bpID, long cTime, String clusterId) {
   super(NodeType.DATA_NODE);
   this.namespaceID = namespaceID;
   this.blockpoolID = bpID;
   this.cTime = cTime;
   this.clusterID = clusterId;
   storagesWithRollingUpgradeMarker =
       Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
   storagesWithoutRollingUpgradeMarker =
       Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
 }
Example #7
0
  /**
   * Test that compaction should execute silently when there is no entry logs to compact. {@see
   * https://issues.apache.org/jira/browse/BOOKKEEPER-700}
   */
  @Test(timeout = 60000)
  public void testWhenNoLogsToCompact() throws Exception {
    tearDown(); // I dont want the test infrastructure
    ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
    File tmpDir = createTempDir("bkTest", ".dir");
    File curDir = Bookie.getCurrentDirectory(tmpDir);
    Bookie.checkDirectoryStructure(curDir);
    conf.setLedgerDirNames(new String[] {tmpDir.toString()});

    LedgerDirsManager dirs = new LedgerDirsManager(conf, conf.getLedgerDirs());
    final Set<Long> ledgers = Collections.newSetFromMap(new ConcurrentHashMap<Long, Boolean>());
    ActiveLedgerManager manager = getActiveLedgerManager(ledgers);
    CheckpointSource checkpointSource =
        new CheckpointSource() {
          @Override
          public Checkpoint newCheckpoint() {
            return null;
          }

          @Override
          public void checkpointComplete(Checkpoint checkpoint, boolean compact)
              throws IOException {}
        };
    InterleavedLedgerStorage storage =
        new InterleavedLedgerStorage(
            conf, manager, dirs, dirs, checkpointSource, NullStatsLogger.INSTANCE);

    double threshold = 0.1;
    // shouldn't throw exception
    storage.gcThread.doCompactEntryLogs(threshold);
  }
 private void insertInTrailingEntityMap(Object entity) {
   if (hasChainedVariables) {
     for (Map.Entry<PlanningVariableDescriptor, Map<Object, Set<Object>>> entry :
         chainedVariableToTrailingEntitiesMap.entrySet()) {
       PlanningVariableDescriptor variableDescriptor = entry.getKey();
       if (variableDescriptor.getPlanningEntityDescriptor().appliesToPlanningEntity(entity)) {
         Object value = variableDescriptor.getValue(entity);
         Map<Object, Set<Object>> valueToTrailingEntityMap = entry.getValue();
         Set<Object> trailingEntities = valueToTrailingEntityMap.get(value);
         if (trailingEntities == null) {
           trailingEntities = Collections.newSetFromMap(new IdentityHashMap<Object, Boolean>());
           valueToTrailingEntityMap.put(value, trailingEntities);
         }
         boolean addSucceeded = trailingEntities.add(entity);
         if (!addSucceeded) {
           throw new IllegalStateException(
               "The ScoreDirector ("
                   + getClass()
                   + ") is corrupted,"
                   + " because the entity ("
                   + entity
                   + ") for chained planningVariable ("
                   + variableDescriptor.getVariableName()
                   + ") cannot be inserted: it was already inserted.");
         }
       }
     }
   }
 }
 public void insertInTrailingEntityMap(
     PlanningVariableDescriptor variableDescriptor, Object entity) {
   if (hasChainedVariables && variableDescriptor.isChained()) {
     Map<Object, Set<Object>> valueToTrailingEntityMap =
         trailingEntitiesMap.get(variableDescriptor);
     if (valueToTrailingEntityMap == null) {
       throw new IllegalStateException(
           "The ScoreDirector ("
               + getClass()
               + ") is bugged,"
               + " because the chained planningVariable ("
               + variableDescriptor.getVariableName()
               + ") was not found in the trailingEntitiesMap.");
     }
     Object value = variableDescriptor.getValue(entity);
     Set<Object> trailingEntities = valueToTrailingEntityMap.get(value);
     if (trailingEntities == null) {
       trailingEntities = Collections.newSetFromMap(new IdentityHashMap<Object, Boolean>());
       valueToTrailingEntityMap.put(value, trailingEntities);
     }
     boolean addSucceeded = trailingEntities.add(entity);
     if (!addSucceeded) {
       throw new IllegalStateException(
           "The ScoreDirector ("
               + getClass()
               + ") is corrupted,"
               + " because the entity ("
               + entity
               + ") for chained planningVariable ("
               + variableDescriptor.getVariableName()
               + ") cannot be inserted: it was already inserted.");
     }
   }
 }
Example #10
0
 private MultiTransferContext(Connection connection, Session session) {
   this.id = count.incrementAndGet();
   this.connection = connection;
   this.session = session;
   this.existingDirectories =
       Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
 }
Example #11
0
 /**
  * Ctor.
  *
  * @param prof Current Profile
  */
 public StartsDockerDaemon(final Profile prof) {
   this.profile = prof;
   this.client = DockerClientBuilder.getInstance().build();
   this.containers =
       Collections.newSetFromMap(
           // @checkstyle MagicNumber (1 line)
           new ConcurrentHashMap<CreateContainerResponse, Boolean>(1, 0.9f, 1));
 }
Example #12
0
  static {
    allowedTokenizerArgs = Collections.newSetFromMap(new IdentityHashMap<Class<?>, Boolean>());
    allowedTokenizerArgs.addAll(argProducers.keySet());
    allowedTokenizerArgs.add(Reader.class);
    allowedTokenizerArgs.add(AttributeFactory.class);
    allowedTokenizerArgs.add(AttributeSource.class);

    allowedTokenFilterArgs = Collections.newSetFromMap(new IdentityHashMap<Class<?>, Boolean>());
    allowedTokenFilterArgs.addAll(argProducers.keySet());
    allowedTokenFilterArgs.add(TokenStream.class);
    // TODO: fix this one, thats broken:
    allowedTokenFilterArgs.add(CommonGramsFilter.class);

    allowedCharFilterArgs = Collections.newSetFromMap(new IdentityHashMap<Class<?>, Boolean>());
    allowedCharFilterArgs.addAll(argProducers.keySet());
    allowedCharFilterArgs.add(Reader.class);
  }
public final class ConsoleAccessRegistry {

  private enum EventHandler {
    H;

    @SubscribeEvent
    public void unloadWorld(WorldEvent.Unload e) {
      synchronized (mutex) {
        cs.values().removeAll(worldMapping.removeAll(e.world));
        unloadedWorlds.add(e.world);
      }
    }
  }

  static {
    MinecraftForge.EVENT_BUS.register(EventHandler.H);
  }

  private static final Object mutex = new Object();

  private static final Map<UUID, IConsoleAccess> cs = Maps.newHashMap();
  private static final SetMultimap<World, IConsoleAccess> worldMapping =
      MultimapBuilder.hashKeys().hashSetValues().build();
  private static final Set<World> unloadedWorlds = Collections.newSetFromMap(new WeakHashMap<>());

  private ConsoleAccessRegistry() {
    super();
  }

  private static UUID fabricUUID(BlockPosition bp, World world, String name) {
    return UUID.nameUUIDFromBytes((world.getWorldInfo().getWorldName() + bp + name).getBytes());
  }

  public static UUID register(BlockPosition bp, World world, String name, IConsoleAccess c) {
    synchronized (mutex) {
      if (unloadedWorlds.contains(world)) return null;
      UUID id = fabricUUID(bp, world, name);

      cs.put(id, c);
      worldMapping.put(world, c);

      return id;
    }
  }

  public static void unregister(UUID id, World world) {
    synchronized (mutex) {
      worldMapping.get(world).remove(id);
      cs.remove(id);
    }
  }

  public static IConsoleAccess get(UUID id) {
    synchronized (mutex) {
      return cs.get(id);
    }
  }
}
 private void registerAuthenticatedSession(WsSession wsSession, String httpSessionId) {
   Set<WsSession> wsSessions = authenticatedSessions.get(httpSessionId);
   if (wsSessions == null) {
     wsSessions = Collections.newSetFromMap(new ConcurrentHashMap<WsSession, Boolean>());
     authenticatedSessions.putIfAbsent(httpSessionId, wsSessions);
     wsSessions = authenticatedSessions.get(httpSessionId);
   }
   wsSessions.add(wsSession);
 }
 /**
  * Return the connections associated to the given pool.
  *
  * @param pool
  */
 protected Set<WorkerConnection> getPoolWorkerConnections(Pool pool) {
   Set<WorkerConnection> workerConnections = poolWorkerConnections.get(pool);
   if (workerConnections == null) {
     workerConnections =
         Collections.newSetFromMap(new ConcurrentHashMap<WorkerConnection, Boolean>());
     poolWorkerConnections.put(pool, workerConnections);
   }
   return workerConnections;
 }
Example #16
0
  /**
   * Creates a new host monitor.
   *
   * @param packetService packet service used to send packets on the data plane
   * @param hostManager host manager used to look up host information and probe existing hosts
   * @param interfaceService interface service for interface information
   */
  public HostMonitor(
      PacketService packetService, HostManager hostManager, InterfaceService interfaceService) {

    this.packetService = packetService;
    this.hostManager = hostManager;
    this.interfaceService = interfaceService;

    monitoredAddresses = Collections.newSetFromMap(new ConcurrentHashMap<>());
    hostProviders = new ConcurrentHashMap<>();
  }
Example #17
0
/**
 * Local client.
 *
 * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
 */
public class LocalClient implements Client {
  private final UUID id;
  private final LocalServerRegistry registry;
  private final Context context;
  private final Set<LocalConnection> connections =
      Collections.newSetFromMap(new ConcurrentHashMap<>());

  public LocalClient(UUID id, LocalServerRegistry registry, Serializer serializer) {
    this.id = id;
    this.registry = registry;
    this.context = new SingleThreadContext("test-" + id.toString(), serializer.clone());
  }

  @Override
  public UUID id() {
    return id;
  }

  /** Returns the current execution context. */
  private Context getContext() {
    Context context = Context.currentContext();
    if (context == null) {
      throw new IllegalStateException("not on a Copycat thread");
    }
    return context;
  }

  @Override
  public CompletableFuture<Connection> connect(InetSocketAddress address) {
    Context context = getContext();
    LocalServer server = registry.get(address);
    if (server == null) {
      return Futures.exceptionalFutureAsync(
          new TransportException("failed to connect"), context.executor());
    }

    LocalConnection connection = new LocalConnection(id, this.context, connections);
    connections.add(connection);
    return server.connect(connection).thenApplyAsync(v -> connection, context.executor());
  }

  @Override
  public CompletableFuture<Void> close() {
    CompletableFuture<Void> future = new CompletableFuture<>();

    Context context = getContext();
    CompletableFuture[] futures = new CompletableFuture[connections.size()];
    int i = 0;
    for (LocalConnection connection : connections) {
      futures[i++] = connection.close();
    }
    CompletableFuture.allOf(futures).thenRunAsync(() -> future.complete(null), context.executor());
    return future;
  }
}
Example #18
0
 private void add(Long user, Long item) {
   if (user == null || item == null) {
     return;
   }
   Set<Long> items = model.get(user);
   if (items == null) {
     items = Collections.newSetFromMap(new ConcurrentHashMap<Long, Boolean>());
     model.put(user, items);
   }
   items.add(item);
 }
 /**
  * Set the context and system services so they do not need to be retrieved every time.
  *
  * @param context The context to get the subscription and telephony manager for.
  */
 private void setup(Context context) {
   if (mContext == null) {
     mContext = context;
     mSubscriptionManager = SubscriptionManager.from(context);
     mTelephonyManager = (TelephonyManager) mContext.getSystemService(Context.TELEPHONY_SERVICE);
     mActiveVvmSources =
         Collections.newSetFromMap(new ConcurrentHashMap<PhoneAccountHandle, Boolean>(8, 0.9f, 1));
     mPhoneStateListenerMap =
         new ConcurrentHashMap<PhoneAccountHandle, PhoneStateListener>(8, 0.9f, 1);
   }
 }
 @Override
 public void addChannelListener(final Channel channel, final ChannelListener channelListener) {
   Set<ChannelListener> listeners = this.channelListenerRegistry.get(channel);
   if (listeners == null) {
     listeners = Collections.newSetFromMap(new ConcurrentHashMap<ChannelListener, Boolean>());
     if (this.channelListenerRegistry.putIfAbsent(channel, listeners) != null) {
       // was already created
       listeners = this.channelListenerRegistry.get(channel);
     }
   }
   listeners.add(channelListener);
 }
Example #21
0
  private void cleanSession(String orgDomain, String sessionId) {
    Set<String> bindingsToRemove = Collections.newSetFromMap(new HashMap<String, Boolean>());
    for (Entry<String, Set<Binding>> entry : pushBindingsMap.entrySet()) {
      Set<Binding> bindings = entry.getValue();
      Set<Binding> toRemove = Collections.newSetFromMap(new HashMap<Binding, Boolean>());
      for (Binding binding : bindings) {
        if (binding.sessionId.equals(sessionId)) toRemove.add(binding);
      }
      bindings.removeAll(toRemove);
      if (bindings.isEmpty()) bindingsToRemove.add(entry.getKey());
    }
    for (String callback : bindingsToRemove) pushBindingsMap.remove(callback);

    Set<String> sessions = orgSessionMap.get(orgDomain);
    if (sessions != null) sessions.remove(sessionId);

    // clear all related conditions
    for (PushCondition.Key key : pushConditions.keySet())
      if (key.getOrgDomain() == orgDomain && key.getSessionId() == sessionId)
        pushConditions.remove(key);
  }
 public Node copy() {
   Node c = new Node(nodeId);
   if (labels != null) {
     c.labels = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
     c.labels.addAll(labels);
   } else {
     c.labels = null;
   }
   c.resource = Resources.clone(resource);
   c.running = running;
   return c;
 }
Example #23
0
public class SubTileBergamute extends SubTileFunctional {
  private static final int RANGE = 4;
  private static final Set<SubTileBergamute> existingFlowers =
      Collections.newSetFromMap(new WeakHashMap<>());

  @Override
  public void onUpdate() {
    super.onUpdate();

    if (supertile.getWorld().isRemote) {
      if (!existingFlowers.contains(this)) {
        existingFlowers.add(this);
      }
    }
  }

  // todo seems expensive when we have lots of sounds cache maybe?
  protected static SubTileBergamute getBergamuteNearby(float x, float y, float z) {
    return existingFlowers
        .stream()
        .filter(f -> f.redstoneSignal == 0)
        .filter(f -> f.supertile.getWorld().getTileEntity(f.supertile.getPos()) == f.supertile)
        .filter(f -> f.supertile.getDistanceSq(x, y, z) <= RANGE * RANGE)
        .findAny()
        .orElse(null);
  }

  @Override
  public boolean acceptsRedstone() {
    return true;
  }

  @Override
  public int getMaxMana() {
    return 1;
  }

  @Override
  public int getColor() {
    return 0xF46C6C;
  }

  @Override
  public RadiusDescriptor getRadius() {
    return new RadiusDescriptor.Circle(toBlockPos(), RANGE);
  }

  @Override
  public LexiconEntry getEntry() {
    return LexiconData.bergamute;
  }
}
Example #24
0
  public void beforeDeleteNodeSite(NodeRef siteNodeRef) {
    String siteId = (String) nodeService.getProperty(siteNodeRef, ContentModel.PROP_NAME);

    Set<String> deletedSiteIds =
        (Set<String>) AlfrescoTransactionSupport.getResource(KEY_DELETED_SITE_IDS);
    if (deletedSiteIds == null) {
      deletedSiteIds =
          Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>()); // Java 6
      AlfrescoTransactionSupport.bindResource(KEY_DELETED_SITE_IDS, deletedSiteIds);
    }

    deletedSiteIds.add(siteId);

    AlfrescoTransactionSupport.bindListener(deleteSiteTransactionListener);
  }
Example #25
0
public class SkillsComponent extends EComponent {
  private Set<Class<? extends AnnotatedSkill>> trainedSkills =
      Collections.newSetFromMap(new ConcurrentHashMap<Class<? extends AnnotatedSkill>, Boolean>());

  public <T extends AnnotatedSkill> void addSkill(Class<T> skill) {
    trainedSkills.add(skill);
  }

  public <T extends AnnotatedSkill> boolean removeSkill(Class<T> skill) {
    return trainedSkills.remove(skill);
  }

  public <T extends AnnotatedSkill> boolean hasSkill(Class<T> skill) {
    return trainedSkills.contains(skill);
  }
}
Example #26
0
 @Test
 public void testExecutor() throws InterruptedException {
   for (int i = 0; i < REPETITIONS; ++i) {
     final Set<String> ret = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
     Lock l = new ReentrantReadWriteLock().writeLock();
     final ExecutorService d = BlockDoerExecutorService.create(l);
     d.execute(new SaveString(ret, "here"));
     Thread.sleep(r.nextInt(2));
     d.execute(new SaveString(ret, "there"));
     Thread.sleep(r.nextInt(2));
     d.execute(new SaveString(ret, "everywhere"));
     d.shutdown();
     d.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
     assertEquals(ImmutableSet.of("here", "there", "everywhere"), ret);
   }
 }
Example #27
0
  /**
   * Code-generates an object whose methods return (snapshots of) the same values as the given
   * object.
   *
   * @param o The object to snapshot.
   * @param typeToSnapshot The type to read the snapshot attributes from. Must be a superclass of o
   *     or an interface implemented by o, and methods not supplied by {@code methodBodyCallback}
   *     must meed the requirements laid out in the class-level SnapshotMaker documentation.
   * @param typeToExtend The type of the snapshot to produce. Must be a subclass or subinterface of
   *     typeToSnapshot, and the additional methods present in typeToExtend vs. typeToSnapshot must
   *     be provided by the MethodMaker callback, since they can't be generated from o.
   * @param methodBodyCallback A callback that can provide method bodies, preventing the standard
   *     snapshot behaviour for those methods. This callback is optional; null is acceptable as "no
   *     callback."
   * @param typesToRecurseOn The types for which the snapshot maker should be applied recursively.
   * @return A Statement representing the value of the object
   * @throws CyclicalObjectGraphException if any objects reachable from {@code o} form a reference
   *     cycle. The simplest example of this would be a method on {@code o} that returns {@code o}
   *     itself. You may be able to work around such a problem by supplying a canned representation
   *     of one of the objects in the cycle.
   */
  public static Statement makeSnapshotAsSubclass(
      final Object o,
      final MetaClass typeToSnapshot,
      final MetaClass typeToExtend,
      final MethodBodyCallback methodBodyCallback,
      final MetaClass... typesToRecurseOn) {

    return makeSnapshotAsSubclass(
        o,
        typeToSnapshot,
        typeToExtend,
        methodBodyCallback,
        new HashSet<MetaClass>(Arrays.asList(typesToRecurseOn)),
        new IdentityHashMap<Object, Statement>(),
        Collections.newSetFromMap(new IdentityHashMap<Object, Boolean>()));
  }
Example #28
0
 private <K, V> void join(ConcurrentMap<K, Set<V>> map, K key, V value) {
   Set<V> clients = map.get(key);
   if (clients == null) {
     clients = Collections.newSetFromMap(PlatformDependent.<V, Boolean>newConcurrentHashMap());
     Set<V> oldClients = map.putIfAbsent(key, clients);
     if (oldClients != null) {
       clients = oldClients;
     }
   }
   clients.add(value);
   // object may be changed due to other concurrent call
   if (clients != map.get(key)) {
     // re-join if queue has been replaced
     join(map, key, value);
   }
 }
Example #29
0
  // pkg-private for testing
  synchronized void assertConsistent() {
    if (requiresEviction()) {
      throw new AssertionError(
          "requires evictions: size="
              + mostRecentlyUsedQueries.size()
              + ", maxSize="
              + maxSize
              + ", ramBytesUsed="
              + ramBytesUsed()
              + ", maxRamBytesUsed="
              + maxRamBytesUsed);
    }
    for (LeafCache leafCache : cache.values()) {
      Set<Query> keys = Collections.newSetFromMap(new IdentityHashMap<>());
      keys.addAll(leafCache.cache.keySet());
      keys.removeAll(mostRecentlyUsedQueries);
      if (!keys.isEmpty()) {
        throw new AssertionError(
            "One leaf cache contains more keys than the top-level cache: " + keys);
      }
    }
    long recomputedRamBytesUsed =
        HASHTABLE_RAM_BYTES_PER_ENTRY * cache.size()
            + LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY * uniqueQueries.size();
    for (Query query : mostRecentlyUsedQueries) {
      recomputedRamBytesUsed += ramBytesUsed(query);
    }
    for (LeafCache leafCache : cache.values()) {
      recomputedRamBytesUsed += HASHTABLE_RAM_BYTES_PER_ENTRY * leafCache.cache.size();
      for (DocIdSet set : leafCache.cache.values()) {
        recomputedRamBytesUsed += set.ramBytesUsed();
      }
    }
    if (recomputedRamBytesUsed != ramBytesUsed) {
      throw new AssertionError(
          "ramBytesUsed mismatch : " + ramBytesUsed + " != " + recomputedRamBytesUsed);
    }

    long recomputedCacheSize = 0;
    for (LeafCache leafCache : cache.values()) {
      recomputedCacheSize += leafCache.cache.size();
    }
    if (recomputedCacheSize != getCacheSize()) {
      throw new AssertionError(
          "cacheSize mismatch : " + getCacheSize() + " != " + recomputedCacheSize);
    }
  }
  public long registerAck(UUID sessionId, final AckCallback callback) {
    Set<Long> callbackIds = clientCallbackIds.get(sessionId);
    if (callbackIds == null) {
      callbackIds = Collections.newSetFromMap(new ConcurrentHashMap<Long, Boolean>());
      Set<Long> oldCallbackIds = clientCallbackIds.putIfAbsent(sessionId, callbackIds);
      if (oldCallbackIds != null) {
        callbackIds = oldCallbackIds;
      }
    }
    long index = ackIndex.incrementAndGet();
    callbackIds.add(index);
    ackCallbacks.put(index, callback);

    scheduleTimeout(index, sessionId, callback);

    return index;
  }