public Map<String, Object> process(Map<String, Object> input) {
    if (!isValid) {
      validate(true);
    }
    Long currentProcessId = processId.incrementAndGet();
    Semaphore resultSemaphoreForProcess = new Semaphore(0);
    resultSemaphores.put(currentProcessId, resultSemaphoreForProcess);

    // send input to all input pipeline stages
    for (PipelineStage inputStage : inputStages.keySet()) {
      Map<String, String> inputPortMapping = inputStages.get(inputStage);
      for (String inputPort : inputPortMapping.keySet()) {
        Object inputParam = input.get(inputPort);
        inputStage.consume(currentProcessId, inputPortMapping.get(inputPort), inputParam);
      }
    }

    // wait for the output to become ready
    resultSemaphoreForProcess.acquireUninterruptibly();

    if (Boolean.FALSE == processingStatus.remove(currentProcessId)) {
      Throwable t = processingException.remove(currentProcessId);
      throw new PipelineProcessingException(
          "Processing failed for id '" + currentProcessId + "'.", t);
    }
    // cleanup and return the result
    return clear(currentProcessId);
  }
 /**
  * Cleans the metadata (triggers, poll time) associated with a given behavior id
  *
  * @param application the application in which the metadata are stored
  * @param id the id of the behavior
  */
 private static void cleanMetadata(final Application application, String id) {
   id = getPageId(application, id);
   ConcurrentMap<String, List<DelayedMethodCallList>> triggersById = null;
   ConcurrentMap<String, Time> eventsTimeById = null;
   ConcurrentMap<String, String> pageIdsById = null;
   synchronized (application) {
     triggersById = application.getMetaData(TRIGGERS_KEY);
     eventsTimeById = application.getMetaData(EVENTS_KEY);
     pageIdsById = application.getMetaData(PAGE_ID_KEY);
   }
   if (triggersById != null) {
     final List<DelayedMethodCallList> triggers = triggersById.remove(id);
     if (triggers != null) {
       synchronized (triggers) {
         triggers.clear();
       }
     }
   }
   if (eventsTimeById != null) {
     eventsTimeById.remove(id);
   }
   if (pageIdsById != null) {
     pageIdsById.remove(id);
   }
 }
 protected final LocalTransaction removeLocalTransactionInternal(Transaction tx) {
   LocalTransaction localTx = localTransactions.get(tx);
   if (localTx != null) {
     globalToLocalTransactions.remove(localTx.getGlobalTransaction());
     localTransactions.remove(tx);
     releaseResources(localTx);
   }
   return localTx;
 }
Esempio n. 4
0
 public void unregister(ObjectName name) throws JMException {
   if (isRegistered(name)) {
     ObjectName on = mbeansRegistered.remove(name);
     server.unregisterMBean(on);
     LOG.debug("Unregistered MBean with ObjectName: {}", name);
   } else {
     mbeansRegistered.remove(name);
   }
 }
Esempio n. 5
0
 /** {@inheritDoc} */
 public void deleteStreamById(int streamId) {
   if (streamId > 0) {
     if (streams.get(streamId - 1) != null) {
       pendingVideos.remove(streamId);
       usedStreams.decrementAndGet();
       streams.remove(streamId - 1);
       streamBuffers.remove(streamId - 1);
     }
   }
 }
Esempio n. 6
0
 /** Release a lock from the global pool. */
 private void releaseGlobalLock(
     ConcurrentMap<Long, ForsetiLockManager.Lock> lockMap, long resourceId) {
   ForsetiLockManager.Lock lock = lockMap.get(resourceId);
   if (lock instanceof ExclusiveLock) {
     lockMap.remove(resourceId);
   } else if (lock instanceof SharedLock && ((SharedLock) lock).release(this)) {
     // We were the last to hold this lock, it is now dead and we should remove it.
     lockMap.remove(resourceId);
   }
 }
 void removeUnconfirmedTransactions(Collection<TransactionImpl> transactions) {
   List<Transaction> removedList = new ArrayList<>();
   for (TransactionImpl transaction : transactions) {
     if (unconfirmedTransactions.remove(transaction.getId()) != null) {
       transaction.undoUnconfirmed();
       unconfirmedTransactionHashes.remove(transaction.getHash());
       removedList.add(transaction);
     }
   }
   transactionListeners.notify(removedList, Event.REMOVED_UNCONFIRMED_TRANSACTIONS);
 }
 @Override
 public void timerUndeployed(final String timedObjectId) {
   final Lock lock = getLock(timedObjectId);
   try {
     lock.lock();
     locks.remove(timedObjectId);
     directories.remove(timedObjectId);
   } finally {
     lock.unlock();
   }
 }
  @Test
  public void testRemoveValueFail() {
    ConcurrentMap<SimpleKey, SimpleValue> map = redisson.getMap("simple");
    map.put(new SimpleKey("1"), new SimpleValue("2"));

    boolean res = map.remove(new SimpleKey("2"), new SimpleValue("1"));
    Assert.assertFalse(res);

    boolean res1 = map.remove(new SimpleKey("1"), new SimpleValue("3"));
    Assert.assertFalse(res1);

    SimpleValue val1 = map.get(new SimpleKey("1"));
    Assert.assertEquals("2", val1.getValue());
  }
  @Test
  public void testConcurrentMapRemove() {
    final ConcurrentMap<Object, Object> map = new TrieMap<Object, Object>();

    for (int i = 128; i < COUNT; i++) {
      TestHelper.assertFalse(map.remove(i, i));
      TestHelper.assertTrue(null == map.put(i, i));
      TestHelper.assertFalse(map.remove(i, "lol"));
      TestHelper.assertTrue(map.containsKey(i));
      TestHelper.assertTrue(map.remove(i, i));
      TestHelper.assertFalse(map.containsKey(i));
      TestHelper.assertTrue(null == map.put(i, i));
    }
  }
Esempio n. 11
0
  protected void handleTaskSubmittedRequest(
      Runnable runnable, Address source, long requestId, long threadId) {
    // We store in our map so that when that task is
    // finished so that we can send back to the owner
    // with the results
    _running.put(runnable, new Owner(source, requestId));
    // We give the task to the thread that is now waiting for it to be returned
    // If we can't offer then we have to respond back to
    // caller that we can't handle it.  They must have
    // gotten our address when we had a consumer, but
    // they went away between then and now.
    boolean received;
    try {
      _tasks.put(threadId, runnable);

      CyclicBarrier barrier = _taskBarriers.remove(threadId);
      if (received = (barrier != null)) {
        // Only wait 10 milliseconds, in case if the consumer was
        // stopped between when we were told it was available and now
        barrier.await(10, TimeUnit.MILLISECONDS);
      }
    } catch (InterruptedException e) {
      if (log.isDebugEnabled()) log.debug("Interrupted while handing off task");
      Thread.currentThread().interrupt();
      received = false;
    } catch (BrokenBarrierException e) {
      if (log.isDebugEnabled())
        log.debug(
            "Consumer " + threadId + " has been interrupted, " + "must retry to submit elsewhere");
      received = false;
    } catch (TimeoutException e) {
      if (log.isDebugEnabled())
        log.debug("Timeout waiting to hand off to barrier, consumer " + threadId + " must be slow");
      // This should only happen if the consumer put the latch then got
      // interrupted but hadn't yet removed the latch, should almost never
      // happen
      received = false;
    }

    if (!received) {
      // Clean up the tasks request
      _tasks.remove(threadId);
      if (log.isDebugEnabled()) log.debug("Run rejected not able to pass off to consumer");
      // If we couldn't hand off the task we have to tell the client
      // and also reupdate the coordinator that our consumer is ready
      sendRequest(source, Type.RUN_REJECTED, requestId, null);
      _running.remove(runnable);
    }
  }
 @Override
 public void deletePartition(Integer partitionId) {
   partitionLocks.remove(partitionId);
   if (inMemoryPartitions.containsKey(partitionId)) {
     inMemoryPartitions.remove(partitionId);
   } else {
     if (loadedPartition != null && loadedPartition.getId() == partitionId) {
       loadedPartition = null;
     } else {
       File file = new File(getPartitionPath(partitionId));
       file.delete();
     }
     onDiskPartitions.remove(partitionId);
   }
 }
 public static int getAndClearCount(String key) {
   AtomicInteger counter = counters.remove(key);
   if (null == counter) {
     throw new IllegalArgumentException("Key has never been used in function: " + key);
   }
   return counter.get();
 }
Esempio n. 14
0
  public static CurrencyInfo reloadCurrencyInfo(String id) throws SQLException {
    CurrencyInfo currency = CurrencyInfo.loadCurrency(id);
    if (currency == null) CurrencyDb.remove(id);
    else CurrencyDb.put(currency.CurrencyId, currency);

    return currency;
  }
Esempio n. 15
0
 public ClientProxy getOrCreateProxy(String service, String id) {
   final ObjectNamespace ns = new DefaultObjectNamespace(service, id);
   ClientProxyFuture proxyFuture = proxies.get(ns);
   if (proxyFuture != null) {
     return proxyFuture.get();
   }
   final ClientProxyFactory factory = proxyFactories.get(service);
   if (factory == null) {
     throw new IllegalArgumentException("No factory registered for service: " + service);
   }
   final ClientProxy clientProxy = factory.create(id);
   proxyFuture = new ClientProxyFuture();
   final ClientProxyFuture current = proxies.putIfAbsent(ns, proxyFuture);
   if (current != null) {
     return current.get();
   }
   try {
     initialize(clientProxy);
   } catch (Exception e) {
     proxies.remove(ns);
     proxyFuture.set(e);
     throw ExceptionUtil.rethrow(e);
   }
   proxyFuture.set(clientProxy);
   return clientProxy;
 }
Esempio n. 16
0
  @Override
  public int doStartTag() throws JspException {
    String key =
        JspUtils.getCurrentServletPath((HttpServletRequest) pageContext.getRequest()) + "/" + name;
    bodyContent = null;
    output = OUTPUT_CACHE.get(key);

    // Output is expired? While producing, it's not considered expired
    // because lastProduced field is set far in the future.
    if (output != null && System.currentTimeMillis() - output.lastProduced > duration) {
      setOutput(output, null);
      OUTPUT_CACHE.remove(key);
      output = null;
    }

    // Output isn't cached, so flag it to be produced.
    if (output == null) {
      output = new Output();
      output.key = key;

      // Make sure there's only one producing output at [R].
      Output o = OUTPUT_CACHE.putIfAbsent(key, output);
      if (o == null) {
        LOGGER.info("Producing [{}] in [{}]", key, Thread.currentThread());
        return EVAL_BODY_BUFFERED;
      }

      output = o;
    }

    return SKIP_BODY;
  }
Esempio n. 17
0
  /**
   * Removes a Resident from the Datasource
   *
   * @param resident
   */
  public boolean removeResident(Resident resident) {
    boolean result = residents.remove(resident.getUUID()) != null;

    for (Town t : towns.values()) if (t.hasResident(resident)) t.removeResident(resident);

    return result;
  }
Esempio n. 18
0
 public void removePercolateQuery(String idAsString) {
   HashedBytesRef id = new HashedBytesRef(idAsString);
   Query query = percolateQueries.remove(id);
   if (query != null) {
     shardPercolateService.removedQuery(id, query);
   }
 }
        @Override
        public void run() {

          try {
            try {
              List<Transaction> transactionList = new ArrayList<>();
              int curTime = Convert.getEpochTime();
              for (TransactionImpl transaction : nonBroadcastedTransactions.values()) {
                if (TransactionDb.hasTransaction(transaction.getId())
                    || transaction.getExpiration() < curTime) {
                  nonBroadcastedTransactions.remove(transaction.getId());
                } else if (transaction.getTimestamp() < curTime - 30) {
                  transactionList.add(transaction);
                }
              }

              if (transactionList.size() > 0) {
                Peers.sendToSomePeers(transactionList);
              }

            } catch (Exception e) {
              Logger.logDebugMessage("Error in transaction re-broadcasting thread", e);
            }
          } catch (Throwable t) {
            Logger.logMessage("CRITICAL ERROR. PLEASE REPORT TO THE DEVELOPERS.\n" + t.toString());
            t.printStackTrace();
            System.exit(1);
          }
        }
 private void handleTransportDisconnect(DiscoveryNode node) {
   if (!latestNodes.nodeExists(node.id())) {
     return;
   }
   NodeFD nodeFD = nodesFD.remove(node);
   if (nodeFD == null) {
     return;
   }
   if (!running) {
     return;
   }
   nodeFD.running = false;
   if (connectOnNetworkDisconnect) {
     try {
       transportService.connectToNode(node);
       nodesFD.put(node, new NodeFD());
       threadPool.schedule(pingInterval, ThreadPool.Names.SAME, new SendPingRequest(node));
     } catch (Exception e) {
       logger.trace("[node  ] [{}] transport disconnected (with verified connect)", node);
       notifyNodeFailure(node, "transport disconnected (with verified connect)");
     }
   } else {
     logger.trace("[node  ] [{}] transport disconnected", node);
     notifyNodeFailure(node, "transport disconnected");
   }
 }
Esempio n. 21
0
  void onPullRequestHook(PullRequest pr) throws IOException {

    ConcurrentMap<Integer, GhprbPullRequest> pulls = helper.getTrigger().getPulls();

    if ("closed".equals(pr.getAction())) {
      pulls.remove(pr.getNumber());
    } else if (helper.isProjectDisabled()) {
      logger.log(Level.FINE, "Not processing Pull request since the build is disabled");
    } else if ("opened".equals(pr.getAction()) || "reopened".equals(pr.getAction())) {
      GhprbPullRequest pull = pulls.get(pr.getNumber());
      if (pull == null) {
        pulls.putIfAbsent(pr.getNumber(), new GhprbPullRequest(pr.getPullRequest(), helper, this));
        pull = pulls.get(pr.getNumber());
      }
      pull.check(pr.getPullRequest());
    } else if ("synchronize".equals(pr.getAction())) {
      GhprbPullRequest pull = pulls.get(pr.getNumber());
      if (pull == null) {
        pulls.putIfAbsent(pr.getNumber(), new GhprbPullRequest(pr.getPullRequest(), helper, this));
        pull = pulls.get(pr.getNumber());
      }
      if (pull == null) {
        logger.log(Level.SEVERE, "Pull Request #{0} doesn''t exist", pr.getNumber());
        return;
      }
      pull.check(pr.getPullRequest());
    } else {
      logger.log(Level.WARNING, "Unknown Pull Request hook action: {0}", pr.getAction());
    }
    GhprbTrigger.getDscp().save();
  }
 @Override
 public boolean removeEventHandler(Integer callId) {
   if (callId != null) {
     return eventHandlerMap.remove(callId) != null;
   }
   return false;
 }
Esempio n. 23
0
  /** Checks for stale records and removes them. */
  private void reapStaleRecords(long currentTime) {
    LinkedList<AdvertisementInfo> toRemove = new LinkedList<AdvertisementInfo>();

    synchronized (_clientLock) {
      for (AdvertisementInfo adInfo : _advertisements.values()) {
        long timeDiff = (currentTime / 1000000) - adInfo.timeStamp;

        if (timeDiff > STALE_TIME) toRemove.add(adInfo);
      }

      // reap if necessary
      if (toRemove.size() > 0) {
        StringBuilder sb = new StringBuilder();

        for (AdvertisementInfo adInfo : toRemove) {
          if (sb.length() > 0) sb.append(",");

          _advertisements.remove(adInfo.name);
          sb.append(adInfo.name);
        }

        _logger.info(
            "{} stale record{} removed. [{}]",
            toRemove.size(),
            toRemove.size() == 1 ? " was" : "s were",
            sb.toString());
      }
    }
  } // (method)
 private void deregisterHandler(final int portNumber) {
   final HttpServer httpServer = findHttpServer(portNumber);
   if (httpServer != null) {
     httpServer.close();
     servers.remove(portNumber);
   }
 }
Esempio n. 25
0
  /**
   * Removes candidate from the list of near local candidates.
   *
   * @param cand Candidate to remove.
   */
  public void removeExplicitLock(GridCacheMvccCandidate cand) {
    GridCacheExplicitLockSpan span = pendingExplicit.get(cand.threadId());

    if (span == null) return;

    if (span.removeCandidate(cand)) pendingExplicit.remove(cand.threadId(), span);
  }
Esempio n. 26
0
  /* ------------------------------------------------------------ */
  private void shrinkCache() {
    // While we need to shrink
    while (_cache.size() > 0
        && (_cachedFiles.get() > _maxCachedFiles || _cachedSize.get() > _maxCacheSize)) {
      // Scan the entire cache and generate an ordered list by last accessed time.
      SortedSet<Content> sorted =
          new TreeSet<Content>(
              new Comparator<Content>() {
                public int compare(Content c1, Content c2) {
                  if (c1._lastAccessed < c2._lastAccessed) return -1;

                  if (c1._lastAccessed > c2._lastAccessed) return 1;

                  if (c1._length < c2._length) return -1;

                  return c1._key.compareTo(c2._key);
                }
              });
      for (Content content : _cache.values()) sorted.add(content);

      // Invalidate least recently used first
      for (Content content : sorted) {
        if (_cachedFiles.get() <= _maxCachedFiles && _cachedSize.get() <= _maxCacheSize) break;
        if (content == _cache.remove(content.getKey())) content.invalidate();
      }
    }
  }
  @Override
  public boolean removePort(String uuid) {
    if (!portExists(uuid)) {
      return false;
    }
    NeutronPort port = getPort(uuid);
    portDB.remove(uuid);
    INeutronNetworkCRUD networkCRUD = NeutronCRUDInterfaces.getINeutronNetworkCRUD(this);
    INeutronSubnetCRUD systemCRUD = NeutronCRUDInterfaces.getINeutronSubnetCRUD(this);

    NeutronNetwork network = networkCRUD.getNetwork(port.getNetworkUUID());
    network.removePort(port);
    Iterator<Neutron_IPs> fixedIPIterator = port.getFixedIPs().iterator();
    while (fixedIPIterator.hasNext()) {
      Neutron_IPs ip = fixedIPIterator.next();
      NeutronSubnet subnet = systemCRUD.getSubnet(ip.getSubnetUUID());
      if (!ip.getIpAddress().equals(subnet.getGatewayIP())) {
        subnet.releaseIP(ip.getIpAddress());
      } else {
        subnet.resetGatewayIPAllocated();
      }
      subnet.removePort(port);
    }
    return true;
  }
  /**
   * @param sesId Session ID.
   * @param taskNodeId Task node ID.
   * @param taskName Task name.
   * @param dep Deployment.
   * @param taskClsName Task class name.
   * @param top Topology.
   * @param startTime Execution start time.
   * @param endTime Execution end time.
   * @param siblings Collection of siblings.
   * @param attrs Map of attributes.
   * @param fullSup {@code True} to enable distributed session attributes and checkpoints.
   * @return New session if one did not exist, or existing one.
   */
  public GridTaskSessionImpl createTaskSession(
      GridUuid sesId,
      UUID taskNodeId,
      String taskName,
      @Nullable GridDeployment dep,
      String taskClsName,
      @Nullable Collection<UUID> top,
      long startTime,
      long endTime,
      Collection<GridComputeJobSibling> siblings,
      Map<Object, Object> attrs,
      boolean fullSup) {
    if (!fullSup) {
      return new GridTaskSessionImpl(
          taskNodeId,
          taskName,
          dep,
          taskClsName,
          sesId,
          top,
          startTime,
          endTime,
          siblings,
          attrs,
          ctx,
          fullSup);
    }

    while (true) {
      GridTaskSessionImpl ses = sesMap.get(sesId);

      if (ses == null) {
        GridTaskSessionImpl old =
            sesMap.putIfAbsent(
                sesId,
                ses =
                    new GridTaskSessionImpl(
                        taskNodeId,
                        taskName,
                        dep,
                        taskClsName,
                        sesId,
                        top,
                        startTime,
                        endTime,
                        siblings,
                        attrs,
                        ctx,
                        fullSup));

        if (old != null) ses = old;
        else
          // Return without acquire.
          return ses;
      }

      if (ses.acquire()) return ses;
      else sesMap.remove(sesId, ses);
    }
  }
 /**
  * Clear all consumed data for the given process id from this data pipeline.
  *
  * @param id The process id.
  * @return The map of output data which has been stored for the given process id until now.
  */
 @Override
 public Map<String, Object> clear(Long id) {
   Semaphore resultSemaphoreForProcess = resultSemaphores.remove(id);
   Map<String, Object> result = pendingOutputs.remove(id);
   resultSemaphoreForProcess.release();
   return result;
 }
  @SuppressWarnings("unchecked")
  protected void internalRemoveFromClusterNodeLabels(Collection<String> labelsToRemove) {
    // remove labels from nodes
    for (Map.Entry<String, Host> nodeEntry : nodeCollections.entrySet()) {
      Host host = nodeEntry.getValue();
      if (null != host) {
        host.labels.removeAll(labelsToRemove);
        for (Node nm : host.nms.values()) {
          if (nm.labels != null) {
            nm.labels.removeAll(labelsToRemove);
          }
        }
      }
    }

    // remove labels from node labels collection
    for (String label : labelsToRemove) {
      labelCollections.remove(label);
    }

    // create event to remove labels
    if (null != dispatcher) {
      dispatcher.getEventHandler().handle(new RemoveClusterNodeLabels(labelsToRemove));
    }

    LOG.info("Remove labels: [" + StringUtils.join(labelsToRemove.iterator(), ",") + "]");
  }