Beispiel #1
0
  @Override
  public ActionVersionMap getActionVersionMapByActionType(VdcActionType action_type) {
    ActionVersionMap result = cache.get(action_type);
    if (result != null) {
      if (result.isNullValue()) {
        return null;
      }
      return result;
    }
    MapSqlParameterSource parameterSource =
        getCustomMapSqlParameterSource().addValue("action_type", action_type);

    result =
        getCallsHandler()
            .executeRead(
                "Getaction_version_mapByaction_type",
                ActionVersionMapMapper.instance,
                parameterSource);
    if (result == null) {
      cache.putIfAbsent(action_type, nullActionVersionMap);
    } else {
      cache.putIfAbsent(action_type, result);
    }
    result = cache.get(action_type);
    if (result.isNullValue()) {
      return null;
    }
    return result;
  }
  void onPullRequestHook(PullRequest pr) throws IOException {

    ConcurrentMap<Integer, GhprbPullRequest> pulls = helper.getTrigger().getPulls();

    if ("closed".equals(pr.getAction())) {
      pulls.remove(pr.getNumber());
    } else if (helper.isProjectDisabled()) {
      logger.log(Level.FINE, "Not processing Pull request since the build is disabled");
    } else if ("opened".equals(pr.getAction()) || "reopened".equals(pr.getAction())) {
      GhprbPullRequest pull = pulls.get(pr.getNumber());
      if (pull == null) {
        pulls.putIfAbsent(pr.getNumber(), new GhprbPullRequest(pr.getPullRequest(), helper, this));
        pull = pulls.get(pr.getNumber());
      }
      pull.check(pr.getPullRequest());
    } else if ("synchronize".equals(pr.getAction())) {
      GhprbPullRequest pull = pulls.get(pr.getNumber());
      if (pull == null) {
        pulls.putIfAbsent(pr.getNumber(), new GhprbPullRequest(pr.getPullRequest(), helper, this));
        pull = pulls.get(pr.getNumber());
      }
      if (pull == null) {
        logger.log(Level.SEVERE, "Pull Request #{0} doesn''t exist", pr.getNumber());
        return;
      }
      pull.check(pr.getPullRequest());
    } else {
      logger.log(Level.WARNING, "Unknown Pull Request hook action: {0}", pr.getAction());
    }
    GhprbTrigger.getDscp().save();
  }
 protected void addListenerLocally(
     String regId, CacheEntryListenerConfiguration<K, V> cacheEntryListenerConfiguration) {
   if (cacheEntryListenerConfiguration.isSynchronous()) {
     syncListenerRegistrations.putIfAbsent(cacheEntryListenerConfiguration, regId);
   } else {
     asyncListenerRegistrations.putIfAbsent(cacheEntryListenerConfiguration, regId);
   }
 }
  @Test
  public void testPutIfAbsent() throws Exception {
    ConcurrentMap<SimpleKey, SimpleValue> map = redisson.getMap("simple");
    SimpleKey key = new SimpleKey("1");
    SimpleValue value = new SimpleValue("2");
    map.put(key, value);
    Assert.assertEquals(value, map.putIfAbsent(key, new SimpleValue("3")));
    Assert.assertEquals(value, map.get(key));

    SimpleKey key1 = new SimpleKey("2");
    SimpleValue value1 = new SimpleValue("4");
    Assert.assertNull(map.putIfAbsent(key1, value1));
    Assert.assertEquals(value1, map.get(key1));
  }
 public void register(String componentName, BindingProvider bindingProvider) {
   BindingProvider bp;
   if ((bp = bindingProviders.putIfAbsent(componentName, bindingProvider)) != null) {
     throw new ManagementException(
         "Binding provider " + bp + " already registered for component " + componentName);
   }
 }
  /**
   * @param e Entry.
   * @return Entry.
   */
  private CacheContinuousQueryEntry handleEntry(CacheContinuousQueryEntry e) {
    assert e != null;
    assert entryBufs != null;

    if (internal) {
      if (e.isFiltered()) return null;
      else return e;
    }

    // Initial query entry.
    // This events should be fired immediately.
    if (e.updateCounter() == -1) return e;

    EntryBuffer buf = entryBufs.get(e.partition());

    if (buf == null) {
      buf = new EntryBuffer();

      EntryBuffer oldRec = entryBufs.putIfAbsent(e.partition(), buf);

      if (oldRec != null) buf = oldRec;
    }

    return buf.handle(e);
  }
  @Override
  public void cacheBlock(BlockCacheKey blockName, Cacheable toBeCached) {
    ByteBuffer storedBlock;

    try {
      storedBlock = backingStore.alloc(toBeCached.getSerializedLength());
    } catch (InterruptedException e) {
      LOG.warn("SlabAllocator was interrupted while waiting for block to become available");
      LOG.warn(e);
      return;
    }

    CacheablePair newEntry = new CacheablePair(toBeCached.getDeserializer(), storedBlock);
    toBeCached.serialize(storedBlock);

    synchronized (this) {
      CacheablePair alreadyCached = backingMap.putIfAbsent(blockName, newEntry);

      if (alreadyCached != null) {
        backingStore.free(storedBlock);
        throw new RuntimeException("already cached " + blockName);
      }
      if (actionWatcher != null) {
        actionWatcher.onInsertion(blockName, this);
      }
    }
    newEntry.recentlyAccessed.set(System.nanoTime());
    this.size.addAndGet(newEntry.heapSize());
  }
Beispiel #8
0
  /**
   * We want to treat comments specially in a way to skip comment prefix on line indent calculation.
   *
   * <p>Example:
   *
   * <pre>
   *   if (true) {
   *     int i1;
   * //    int i2;
   *     int i3;
   *   }
   * </pre>
   *
   * We want to use 'int i2;' start offset as the third line indent (though it has non-white space
   * comment prefix (//) at the first column.
   *
   * <p>This method tries to parse comment prefix for the language implied by the given comment
   * type. It uses {@link #NO_COMMENT_INFO_MARKER} as an indicator that that information is
   * unavailable
   *
   * @param commentType target comment type
   * @return prefix of the comment denoted by the given type if any; {@link #NO_COMMENT_INFO_MARKER}
   *     otherwise
   */
  @NotNull
  private static String getCommentPrefix(@NotNull IElementType commentType) {
    Commenter c = LanguageCommenters.INSTANCE.forLanguage(commentType.getLanguage());
    if (!(c instanceof CodeDocumentationAwareCommenter)) {
      COMMENT_PREFIXES.put(commentType, NO_COMMENT_INFO_MARKER);
      return NO_COMMENT_INFO_MARKER;
    }
    CodeDocumentationAwareCommenter commenter = (CodeDocumentationAwareCommenter) c;

    IElementType lineCommentType = commenter.getLineCommentTokenType();
    String lineCommentPrefix = commenter.getLineCommentPrefix();
    if (lineCommentType != null) {
      COMMENT_PREFIXES.put(
          lineCommentType, lineCommentPrefix == null ? NO_COMMENT_INFO_MARKER : lineCommentPrefix);
    }

    IElementType blockCommentType = commenter.getBlockCommentTokenType();
    String blockCommentPrefix = commenter.getBlockCommentPrefix();
    if (blockCommentType != null) {
      COMMENT_PREFIXES.put(
          blockCommentType,
          blockCommentPrefix == null ? NO_COMMENT_INFO_MARKER : blockCommentPrefix);
    }

    IElementType docCommentType = commenter.getDocumentationCommentTokenType();
    String docCommentPrefix = commenter.getDocumentationCommentPrefix();
    if (docCommentType != null) {
      COMMENT_PREFIXES.put(
          docCommentType, docCommentPrefix == null ? NO_COMMENT_INFO_MARKER : docCommentPrefix);
    }

    COMMENT_PREFIXES.putIfAbsent(commentType, NO_COMMENT_INFO_MARKER);
    return COMMENT_PREFIXES.get(commentType);
  }
  public int sendAlarm(final AlarmInfo alarmInfo) {
    AlarmType type = alarmInfo.type;
    String id = alarmInfo.getId();
    String extraInfo = alarmInfo.getExtraInfo();

    if (terminationToken.isToShutdown()) {
      // 记录告警信息
      System.err.println("rejected alarm:" + id + "," + extraInfo);
      return -1;
    }

    int duplicateSubmissionCount = 0;
    try {

      AtomicInteger prevSubmittedCounter;

      prevSubmittedCounter =
          submittedAlarmRegistry.putIfAbsent(
              type.toString() + ':' + id + '@' + extraInfo, new AtomicInteger(0));
      if (null == prevSubmittedCounter) {
        terminationToken.reservations.incrementAndGet();
        alarmQueue.put(alarmInfo);
      } else {

        // 故障未恢复,不用重复发送告警信息给服务器,故仅增加计数
        duplicateSubmissionCount = prevSubmittedCounter.incrementAndGet();
      }
    } catch (Throwable t) {
      t.printStackTrace();
    }

    return duplicateSubmissionCount;
  }
 @Override
 public ThreadPoolExecutor getThreadPool(
     HystrixThreadPoolKey threadPoolKey,
     HystrixProperty<Integer> corePoolSize,
     HystrixProperty<Integer> maximumPoolSize,
     HystrixProperty<Integer> keepAliveTime,
     TimeUnit unit,
     BlockingQueue<Runnable> workQueue) {
   final String nameFormat =
       Joiner.on('-').join(ImmutableList.of("hystrix"), threadPoolKey.name(), "%d");
   final ThreadFactory threadFactory =
       new ThreadFactoryBuilder().setNameFormat(nameFormat).build();
   final String key = threadPoolKey.name();
   final ThreadPoolExecutor existing =
       executors.putIfAbsent(
           key,
           new ThreadPoolExecutor(
               corePoolSize.get(),
               maximumPoolSize.get(),
               keepAliveTime.get(),
               unit,
               workQueue,
               threadFactory));
   final ThreadPoolExecutor threadPoolExecutor = executors.get(key);
   if (existing == null) {
     environment
         .lifecycle()
         .manage(new ExecutorServiceManager(threadPoolExecutor, Duration.seconds(5), nameFormat));
   }
   return threadPoolExecutor;
 }
  /**
   * Submit the work for actual execution.
   *
   * @throws InvalidProtocolBufferException
   */
  public void submitWork(SubmitWorkRequestProto request, String llapHost, int llapPort) {
    // Register the pending events to be sent for this spec.
    VertexOrBinary vob = request.getWorkSpec();
    assert vob.hasVertexBinary() != vob.hasVertex();
    SignableVertexSpec vertex = null;
    try {
      vertex =
          vob.hasVertex() ? vob.getVertex() : SignableVertexSpec.parseFrom(vob.getVertexBinary());
    } catch (InvalidProtocolBufferException e) {
      throw new RuntimeException(e);
    }
    QueryIdentifierProto queryIdentifierProto = vertex.getQueryIdentifier();
    TezTaskAttemptID attemptId =
        Converters.createTaskAttemptId(
            queryIdentifierProto,
            vertex.getVertexIndex(),
            request.getFragmentNumber(),
            request.getAttemptNumber());
    final String fragmentId = attemptId.toString();

    pendingEvents.putIfAbsent(
        fragmentId,
        new PendingEventData(
            new TaskHeartbeatInfo(fragmentId, llapHost, llapPort), Lists.<TezEvent>newArrayList()));

    // Setup timer task to check for hearbeat timeouts
    timer.scheduleAtFixedRate(
        new HeartbeatCheckTask(), connectionTimeout, connectionTimeout, TimeUnit.MILLISECONDS);

    // Send out the actual SubmitWorkRequest
    communicator.sendSubmitWork(
        request,
        llapHost,
        llapPort,
        new LlapProtocolClientProxy.ExecuteRequestCallback<SubmitWorkResponseProto>() {

          @Override
          public void setResponse(SubmitWorkResponseProto response) {
            if (response.hasSubmissionState()) {
              if (response.getSubmissionState().equals(SubmissionStateProto.REJECTED)) {
                String msg = "Fragment: " + fragmentId + " rejected. Server Busy.";
                LOG.info(msg);
                if (responder != null) {
                  Throwable err = new RuntimeException(msg);
                  responder.submissionFailed(fragmentId, err);
                }
                return;
              }
            }
          }

          @Override
          public void indicateError(Throwable t) {
            String msg = "Failed to submit: " + fragmentId;
            LOG.error(msg, t);
            Throwable err = new RuntimeException(msg, t);
            responder.submissionFailed(fragmentId, err);
          }
        });
  }
Beispiel #12
0
  /* ------------------------------------------------------------ */
  private HttpContent load(String pathInContext, Resource resource) throws IOException {
    Content content = null;

    if (resource == null || !resource.exists()) return null;

    // Will it fit in the cache?
    if (!resource.isDirectory() && isCacheable(resource)) {
      // Create the Content (to increment the cache sizes before adding the content
      content = new Content(pathInContext, resource);

      // reduce the cache to an acceptable size.
      shrinkCache();

      // Add it to the cache.
      Content added = _cache.putIfAbsent(pathInContext, content);
      if (added != null) {
        content.invalidate();
        content = added;
      }

      return content;
    }

    return new HttpContent.ResourceAsHttpContent(
        resource,
        _mimeTypes.getMimeByExtension(resource.toString()),
        getMaxCachedFileSize(),
        _etags);
  }
Beispiel #13
0
 // Nullness checker is not powerful enough to prove null-safety of
 // this method
 @SuppressWarnings("nullness")
 void incrementBy(String key, long delta) {
   // We use a compareAndSet strategy to update the map, which is much
   // faster when there isn't too much contention.  Look at a value, and
   // conditionally update the map if the value hasn't changed.
   // If it has changed, repeat.
   Long oldValue = map.get(key);
   if (oldValue == null) {
     // Currently, the slot is empty
     oldValue = map.putIfAbsent(key, delta);
     if (oldValue == null) {
       // The slot was still empty when we set it
       return;
     } else {
       // Someone filled in the slot behind our back.  oldValue has
       // its current value
     }
   }
   while (true) {
     if (map.replace(key, oldValue, oldValue + delta)) {
       break;
     }
     // Nullness checker doesn't understand that this cannot return null.
     oldValue = map.get(key);
   }
 }
 /**
  * Loads the cache result, computing it if needed by executing the query phase and otherwise
  * deserializing the cached value into the {@link SearchContext#queryResult() context's query
  * result}. The combination of load + compute allows to have a single load operation that will
  * cause other requests with the same key to wait till its loaded an reuse the same cache.
  */
 public void loadIntoContext(
     final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase)
     throws Exception {
   assert canCache(request, context);
   Key key = buildKey(request, context);
   Loader loader = new Loader(queryPhase, context, key);
   Value value = cache.get(key, loader);
   if (loader.isLoaded()) {
     key.shard.requestCache().onMiss();
     // see if its the first time we see this reader, and make sure to register a cleanup key
     CleanupKey cleanupKey =
         new CleanupKey(
             context.indexShard(),
             ((DirectoryReader) context.searcher().getIndexReader()).getVersion());
     if (!registeredClosedListeners.containsKey(cleanupKey)) {
       Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE);
       if (previous == null) {
         context.searcher().getIndexReader().addReaderClosedListener(cleanupKey);
       }
     }
   } else {
     key.shard.requestCache().onHit();
     // restore the cached query result into the context
     final QuerySearchResult result = context.queryResult();
     result.readFromWithId(context.id(), value.reference.streamInput());
     result.shardTarget(context.shardTarget());
   }
 }
  /**
   * Starts multi-update lock. Will wait for topology future is ready.
   *
   * @return Topology version.
   * @throws IgniteCheckedException If failed.
   */
  public AffinityTopologyVersion beginMultiUpdate() throws IgniteCheckedException {
    IgniteBiTuple<IgniteUuid, GridDhtTopologyFuture> tup = multiTxHolder.get();

    if (tup != null)
      throw new IgniteCheckedException("Nested multi-update locks are not supported");

    top.readLock();

    GridDhtTopologyFuture topFut;

    AffinityTopologyVersion topVer;

    try {
      // While we are holding read lock, register lock future for partition release future.
      IgniteUuid lockId = IgniteUuid.fromUuid(ctx.localNodeId());

      topVer = top.topologyVersion();

      MultiUpdateFuture fut = new MultiUpdateFuture(topVer);

      MultiUpdateFuture old = multiTxFuts.putIfAbsent(lockId, fut);

      assert old == null;

      topFut = top.topologyVersionFuture();

      multiTxHolder.set(F.t(lockId, topFut));
    } finally {
      top.readUnlock();
    }

    topFut.get();

    return topVer;
  }
Beispiel #16
0
 public void scheduleCleanMultimap(String name, String timeoutSetName) {
   RedissonCacheTask task = new RedissonCacheTask(name, timeoutSetName, null, true);
   RedissonCacheTask prevTask = tasks.putIfAbsent(name, task);
   if (prevTask == null) {
     task.schedule();
   }
 }
Beispiel #17
0
 public void schedule(String name, String timeoutSetName, String maxIdleSetName) {
   RedissonCacheTask task = new RedissonCacheTask(name, timeoutSetName, maxIdleSetName, false);
   RedissonCacheTask prevTask = tasks.putIfAbsent(name, task);
   if (prevTask == null) {
     task.schedule();
   }
 }
 @Override
 public ServerStatus startServer(
     final String serverName, final ModelNode domainModel, final boolean blocking) {
   if (shutdown || connectionFinished) {
     throw HostControllerMessages.MESSAGES.hostAlreadyShutdown();
   }
   ManagedServer server = servers.get(serverName);
   if (server == null) {
     // Create a new authKey
     final byte[] authKey = new byte[16];
     new Random(new SecureRandom().nextLong()).nextBytes(authKey);
     removeNullChar(authKey);
     // Create the managed server
     final ManagedServer newServer = createManagedServer(serverName, authKey);
     server = servers.putIfAbsent(serverName, newServer);
     if (server == null) {
       server = newServer;
     }
   }
   // Start the server
   server.start(createBootFactory(serverName, domainModel));
   synchronized (shutdownCondition) {
     shutdownCondition.notifyAll();
   }
   if (blocking) {
     // Block until the server started message
     server.awaitState(ManagedServer.InternalState.SERVER_STARTED);
   } else {
     // Wait until the server opens the mgmt connection
     server.awaitState(ManagedServer.InternalState.SERVER_STARTING);
   }
   return server.getState();
 }
 private QueueConsumer recreateTopicConsumer(String subscriptionId, boolean autoAck) {
   QueueConsumer consumer;
   if (subscriptionExists(subscriptionId)) {
     QueueConsumer tmp = null;
     try {
       tmp =
           createConsumer(
               true, autoAck, subscriptionId, null, consumerTimeoutSeconds * 1000, false);
     } catch (ActiveMQException e) {
       throw new RuntimeException(e);
     }
     consumer = queueConsumers.putIfAbsent(subscriptionId, tmp);
     if (consumer == null) {
       consumer = tmp;
       serviceManager.getTimeoutTask().add(this, subscriptionId);
     } else {
       tmp.shutdown();
     }
   } else {
     throw new WebApplicationException(
         Response.status(405)
             .entity("Failed to find subscriber " + subscriptionId + " you will have to reconnect")
             .type("text/plain")
             .build());
   }
   return consumer;
 }
 @Override
 public void reconnectServer(
     final String serverName,
     final ModelNode domainModel,
     final byte[] authKey,
     final boolean running,
     final boolean stopping) {
   if (shutdown || connectionFinished) {
     throw HostControllerMessages.MESSAGES.hostAlreadyShutdown();
   }
   ManagedServer existing = servers.get(serverName);
   if (existing != null) {
     ROOT_LOGGER.existingServerWithState(serverName, existing.getState());
     return;
   }
   final ManagedServer server = createManagedServer(serverName, authKey);
   if ((existing = servers.putIfAbsent(serverName, server)) != null) {
     ROOT_LOGGER.existingServerWithState(serverName, existing.getState());
     return;
   }
   if (running) {
     if (!stopping) {
       server.reconnectServerProcess(createBootFactory(serverName, domainModel));
       // Register the server proxy at the domain controller
       domainController.registerRunningServer(server.getProxyController());
     } else {
       server.setServerProcessStopping();
     }
   } else {
     server.removeServerProcess();
   }
   synchronized (shutdownCondition) {
     shutdownCondition.notifyAll();
   }
 }
Beispiel #21
0
 private void checkNewSubystem(final String extensionModuleName, final String subsystemName) {
   String existingModule = reverseMap.putIfAbsent(subsystemName, extensionModuleName);
   if (existingModule != null && !extensionModuleName.equals(existingModule)) {
     throw ControllerMessages.MESSAGES.duplicateSubsystem(
         subsystemName, extensionModuleName, existingModule);
   }
 }
  /**
   * @param sesId Session ID.
   * @param taskNodeId Task node ID.
   * @param taskName Task name.
   * @param dep Deployment.
   * @param taskClsName Task class name.
   * @param top Topology.
   * @param startTime Execution start time.
   * @param endTime Execution end time.
   * @param siblings Collection of siblings.
   * @param attrs Map of attributes.
   * @param fullSup {@code True} to enable distributed session attributes and checkpoints.
   * @return New session if one did not exist, or existing one.
   */
  public GridTaskSessionImpl createTaskSession(
      GridUuid sesId,
      UUID taskNodeId,
      String taskName,
      @Nullable GridDeployment dep,
      String taskClsName,
      @Nullable Collection<UUID> top,
      long startTime,
      long endTime,
      Collection<GridComputeJobSibling> siblings,
      Map<Object, Object> attrs,
      boolean fullSup) {
    if (!fullSup) {
      return new GridTaskSessionImpl(
          taskNodeId,
          taskName,
          dep,
          taskClsName,
          sesId,
          top,
          startTime,
          endTime,
          siblings,
          attrs,
          ctx,
          fullSup);
    }

    while (true) {
      GridTaskSessionImpl ses = sesMap.get(sesId);

      if (ses == null) {
        GridTaskSessionImpl old =
            sesMap.putIfAbsent(
                sesId,
                ses =
                    new GridTaskSessionImpl(
                        taskNodeId,
                        taskName,
                        dep,
                        taskClsName,
                        sesId,
                        top,
                        startTime,
                        endTime,
                        siblings,
                        attrs,
                        ctx,
                        fullSup));

        if (old != null) ses = old;
        else
          // Return without acquire.
          return ses;
      }

      if (ses.acquire()) return ses;
      else sesMap.remove(sesId, ses);
    }
  }
Beispiel #23
0
  private String processRemoveBlanks(final String s) {
    String result = s;
    for (String tag : vRemoveBlanks) {
      if (!P_REMOVE_PAIR_BLANKS.containsKey(tag)) {
        P_REMOVE_PAIR_BLANKS.putIfAbsent(
            tag, Pattern.compile("<" + tag + "(\\s[^>]*)?></" + tag + ">"));
      }
      result = regexReplace(P_REMOVE_PAIR_BLANKS.get(tag), "", result);
      if (!P_REMOVE_SELF_BLANKS.containsKey(tag)) {
        P_REMOVE_SELF_BLANKS.putIfAbsent(tag, Pattern.compile("<" + tag + "(\\s[^>]*)?/>"));
      }
      result = regexReplace(P_REMOVE_SELF_BLANKS.get(tag), "", result);
    }

    return result;
  }
Beispiel #24
0
  @Override
  public int doStartTag() throws JspException {
    String key =
        JspUtils.getCurrentServletPath((HttpServletRequest) pageContext.getRequest()) + "/" + name;
    bodyContent = null;
    output = OUTPUT_CACHE.get(key);

    // Output is expired? While producing, it's not considered expired
    // because lastProduced field is set far in the future.
    if (output != null && System.currentTimeMillis() - output.lastProduced > duration) {
      setOutput(output, null);
      OUTPUT_CACHE.remove(key);
      output = null;
    }

    // Output isn't cached, so flag it to be produced.
    if (output == null) {
      output = new Output();
      output.key = key;

      // Make sure there's only one producing output at [R].
      Output o = OUTPUT_CACHE.putIfAbsent(key, output);
      if (o == null) {
        LOGGER.info("Producing [{}] in [{}]", key, Thread.currentThread());
        return EVAL_BODY_BUFFERED;
      }

      output = o;
    }

    return SKIP_BODY;
  }
 public void addKnownInput(
     String hostName,
     int port,
     InputAttemptIdentifier srcAttemptIdentifier,
     int srcPhysicalIndex) {
   String identifier = InputHost.createIdentifier(hostName, port);
   InputHost host = knownSrcHosts.get(identifier);
   if (host == null) {
     host = new InputHost(hostName, port, inputContext.getApplicationId(), srcPhysicalIndex);
     assert identifier.equals(host.getIdentifier());
     InputHost old = knownSrcHosts.putIfAbsent(identifier, host);
     if (old != null) {
       host = old;
     }
   }
   if (LOG.isDebugEnabled()) {
     LOG.debug("Adding input: " + srcAttemptIdentifier + ", to host: " + host);
   }
   host.addKnownInput(srcAttemptIdentifier);
   lock.lock();
   try {
     boolean added = pendingHosts.offer(host);
     if (!added) {
       String errorMessage = "Unable to add host: " + host.getIdentifier() + " to pending queue";
       LOG.error(errorMessage);
       throw new TezUncheckedException(errorMessage);
     }
     wakeLoop.signal();
   } finally {
     lock.unlock();
   }
 }
Beispiel #26
0
 public ClientProxy getOrCreateProxy(String service, String id) {
   final ObjectNamespace ns = new DefaultObjectNamespace(service, id);
   ClientProxyFuture proxyFuture = proxies.get(ns);
   if (proxyFuture != null) {
     return proxyFuture.get();
   }
   final ClientProxyFactory factory = proxyFactories.get(service);
   if (factory == null) {
     throw new IllegalArgumentException("No factory registered for service: " + service);
   }
   final ClientProxy clientProxy = factory.create(id);
   proxyFuture = new ClientProxyFuture();
   final ClientProxyFuture current = proxies.putIfAbsent(ns, proxyFuture);
   if (current != null) {
     return current.get();
   }
   try {
     initialize(clientProxy);
   } catch (Exception e) {
     proxies.remove(ns);
     proxyFuture.set(e);
     throw ExceptionUtil.rethrow(e);
   }
   proxyFuture.set(clientProxy);
   return clientProxy;
 }
  /**
   * @param ctx Context.
   * @param e entry.
   * @return Entry collection.
   */
  private Collection<CacheContinuousQueryEntry> handleEvent(
      GridKernalContext ctx, CacheContinuousQueryEntry e) {
    assert e != null;

    if (internal) {
      if (e.isFiltered()) return Collections.emptyList();
      else return F.asList(e);
    }

    // Initial query entry or evicted entry. These events should be fired immediately.
    if (e.updateCounter() == -1L) return F.asList(e);

    PartitionRecovery rec = rcvs.get(e.partition());

    if (rec == null) {
      rec =
          new PartitionRecovery(
              ctx.log(getClass()),
              initTopVer,
              initUpdCntrs == null ? null : initUpdCntrs.get(e.partition()));

      PartitionRecovery oldRec = rcvs.putIfAbsent(e.partition(), rec);

      if (oldRec != null) rec = oldRec;
    }

    return rec.collectEntries(e);
  }
Beispiel #28
0
  private ReceiverEntry getOrCreateReceiverEntry(Address sender, long seqno, short conn_id) {
    NakReceiverWindow win =
        new NakReceiverWindow(
            sender,
            this,
            seqno - 1,
            timer,
            use_range_based_retransmitter,
            xmit_table_num_rows,
            xmit_table_msgs_per_row,
            xmit_table_resize_factor,
            xmit_table_max_compaction_time,
            xmit_table_automatic_purging);

    if (exponential_backoff > 0)
      win.setRetransmitTimeouts(new ExponentialInterval(exponential_backoff));
    else win.setRetransmitTimeouts(new StaticInterval(timeout));

    ReceiverEntry entry = new ReceiverEntry(win, conn_id, max_stable_msgs);
    ReceiverEntry entry2 = recv_table.putIfAbsent(sender, entry);
    if (entry2 != null) return entry2;
    if (log.isTraceEnabled())
      log.trace(
          local_addr
              + ": created receiver window for "
              + sender
              + " at seqno=#"
              + seqno
              + " for conn-id="
              + conn_id);
    return entry;
  }
 static long nextMemberId(Container container) {
   AtomicLong counter = CONTAINER_COUNTER.putIfAbsent(container.id(), new AtomicLong(-1));
   if (counter == null) {
     counter = CONTAINER_COUNTER.get(container.id());
   }
   return counter.incrementAndGet();
 }
 public Subscription addSubscription(
     String destination, String subTopicId, String subscriptionId, boolean noLocal) {
   Subscription subscription =
       new Subscription(this, destination, subTopicId, subscriptionId, noLocal);
   Subscription present = subscriptions.putIfAbsent(subscriptionId, subscription);
   return (present != null ? present : subscription);
 }