private synchronized String allocName(String type, String namePattern) {
    Map<String, Integer> patterns = nameTypes.get(type);
    if (patterns == null) {
      patterns = new HashMap<String, Integer>();
      nameTypes.put(type, patterns);
    }

    Integer id = patterns.get(namePattern);
    if (id == null) id = 0;

    id++;
    patterns.put(namePattern, id);
    String agentName = namePattern.replaceFirst("#", id.toString());
    if (agentName.equals(namePattern))
      Log.warn("AllocName: missing '#' in name pattern '" + namePattern + "'");
    else
      Log.debug(
          "AllocName: for type="
              + type
              + " assigned '"
              + agentName
              + "' from pattern '"
              + namePattern
              + "'");
    return agentName;
  }
Example #2
0
  /**
   * Add a record telling what entity caps node a user has.
   *
   * @param user the user (Full JID)
   * @param node the node (of the caps packet extension)
   * @param hash the hashing algorithm used to calculate <tt>ver</tt>
   * @param ver the version (of the caps packet extension)
   * @param ext the ext (of the caps packet extension)
   * @param online indicates if the user is online
   */
  private void addUserCapsNode(
      String user, String node, String hash, String ver, String ext, boolean online) {
    if ((user != null) && (node != null) && (hash != null) && (ver != null)) {
      Caps caps = userCaps.get(user);

      if ((caps == null)
          || !caps.node.equals(node)
          || !caps.hash.equals(hash)
          || !caps.ver.equals(ver)) {
        caps = new Caps(node, hash, ver, ext);

        userCaps.put(user, caps);
      } else return;

      // Fire userCapsNodeAdded.
      UserCapsNodeListener[] listeners;

      synchronized (userCapsNodeListeners) {
        listeners = userCapsNodeListeners.toArray(NO_USER_CAPS_NODE_LISTENERS);
      }
      if (listeners.length != 0) {
        String nodeVer = caps.getNodeVer();

        for (UserCapsNodeListener listener : listeners)
          listener.userCapsNodeAdded(user, nodeVer, online);
      }
    }
  }
Example #3
0
  /**
   * Remove records telling what entity caps node a contact has.
   *
   * @param contact the contact
   */
  public void removeContactCapsNode(Contact contact) {
    Caps caps = null;
    String lastRemovedJid = null;

    Iterator<String> iter = userCaps.keySet().iterator();
    while (iter.hasNext()) {
      String jid = iter.next();

      if (StringUtils.parseBareAddress(jid).equals(contact.getAddress())) {
        caps = userCaps.get(jid);
        lastRemovedJid = jid;
        iter.remove();
      }
    }

    // fire only for the last one, at the end the event out
    // of the protocol will be one and for the contact
    if (caps != null) {
      UserCapsNodeListener[] listeners;
      synchronized (userCapsNodeListeners) {
        listeners = userCapsNodeListeners.toArray(NO_USER_CAPS_NODE_LISTENERS);
      }
      if (listeners.length != 0) {
        String nodeVer = caps.getNodeVer();

        for (UserCapsNodeListener listener : listeners)
          listener.userCapsNodeRemoved(lastRemovedJid, nodeVer, false);
      }
    }
  }
    /** {@inheritDoc} */
    @Nullable
    @Override
    public Map<String, Collection<?>> run(GridStreamerContext ctx, Collection<Bar> bars)
        throws GridException {
      ConcurrentMap<String, Bar> loc = ctx.localSpace();

      GridStreamerWindow win = ctx.window("stage2");

      // Add numbers to window.
      win.enqueueAll(bars);

      Collection<Bar> polled = win.pollEvictedBatch();

      if (!polled.isEmpty()) {
        Map<String, Bar> map = new HashMap<>();

        for (Bar polledBar : polled) {
          String symbol = polledBar.symbol();

          Bar bar = map.get(symbol);

          if (bar == null) map.put(symbol, bar = new Bar(symbol));

          bar.update(polledBar);
        }

        loc.putAll(map);
      }

      return null;
    }
    /** {@inheritDoc} */
    @Nullable
    @Override
    public Map<String, Collection<?>> run(GridStreamerContext ctx, Collection<Quote> quotes)
        throws GridException {
      GridStreamerWindow win = ctx.window("stage1");

      // Add numbers to window.
      win.enqueueAll(quotes);

      Collection<Quote> polled = win.pollEvictedBatch();

      if (!polled.isEmpty()) {
        Map<String, Bar> map = new HashMap<>();

        for (Quote quote : polled) {
          String symbol = quote.symbol();

          Bar bar = map.get(symbol);

          if (bar == null) map.put(symbol, bar = new Bar(symbol));

          bar.update(quote.price());
        }

        return Collections.<String, Collection<?>>singletonMap(ctx.nextStageName(), map.values());
      }

      return null;
    }
Example #6
0
 @SuppressWarnings("unchecked")
 private void finish(Properties lastIndexTimeProps) {
   LOG.info("Import completed successfully");
   statusMessages.put(
       "",
       "Indexing completed. Added/Updated: "
           + importStatistics.docCount
           + " documents. Deleted "
           + importStatistics.deletedDocCount
           + " documents.");
   if (requestParameters.commit) {
     writer.commit(requestParameters.optimize);
     addStatusMessage("Committed");
     if (requestParameters.optimize) addStatusMessage("Optimized");
   }
   try {
     writer.persist(lastIndexTimeProps);
   } catch (Exception e) {
     LOG.error("Could not write property file", e);
     statusMessages.put(
         "error",
         "Could not write property file. Delta imports will not work. "
             + "Make sure your conf directory is writable");
   }
 }
 public void run() {
   HazelcastClient hClient = TestUtility.newHazelcastClient(h);
   while (run) {
     Map<String, String> clientMap = hClient.getMap("putFromMultipleThreads");
     clientMap.put(String.valueOf(counter.incrementAndGet()), String.valueOf(counter.get()));
   }
 }
  private Map<Integer, Double> computeOptimizationScores(
      int expMinCluster, int expMaxCluster, int hop, Optimizer optimizer) {

    List<Integer> searchSpace = computeSearchSpace(expMinCluster, expMaxCluster, hop);

    List<Integer> newSearchSpace = new ArrayList<>(searchSpace);
    newSearchSpace.removeAll(scores.keySet());

    OptimizationManager_old optimizationManager =
        new OptimizationManager_old(parallelWorkers, maxJobsPerWorker);
    Map<Integer, Double> scores = optimizationManager.process(optimizer, newSearchSpace);
    this.scores.putAll(scores);

    for (int s : searchSpace) scores.put(s, this.scores.get(s));

    System.out.println("[]Tried search space:" + scores);
    // are scores valid? if not, half the hop, recompute search space, re-optimize
    if (invalid(scores)) {
      if (hop != 1) {
        hop = reduceHop(hop);
        return computeOptimizationScores(expMinCluster, expMaxCluster, hop, optimizer);
      } else {
        return null;
      }
    }
    return scores;
  }
  /**
   * If the key is to be associated with a valid value, a mutation is created for it with the given
   * table and columns. In the event the value in the column is missing (i.e., null), then it is
   * marked for {@link Deletion}. Similarly, if the entire value for a key is missing (i.e., null),
   * then the entire key is marked for {@link Deletion}.
   *
   * @param keyColumns the key to write.
   * @param values the values to write.
   * @throws IOException
   */
  @Override
  public void write(Map<String, ByteBuffer> keyColumns, List<ByteBuffer> values)
      throws IOException {
    TokenRange range = ringCache.getRange(getPartitionKey(keyColumns));

    // get the client for the given range, or create a new one
    final InetAddress address = ringCache.getEndpoints(range).get(0);
    RangeClient client = clients.get(address);
    if (client == null) {
      // haven't seen keys for this range: create new client
      client = new RangeClient(ringCache.getEndpoints(range));
      client.start();
      clients.put(address, client);
    }

    // add primary key columns to the bind variables
    List<ByteBuffer> allValues = new ArrayList<ByteBuffer>(values);
    for (ColumnMetadata column : partitionKeyColumns)
      allValues.add(keyColumns.get(column.getName()));
    for (ColumnMetadata column : clusterColumns) allValues.add(keyColumns.get(column.getName()));

    client.put(allValues);

    if (progressable != null) progressable.progress();
    if (context != null) HadoopCompat.progress(context);
  }
Example #10
0
  public MemtableUnfilteredPartitionIterator makePartitionIterator(
      final ColumnFilter columnFilter, final DataRange dataRange, final boolean isForThrift) {
    AbstractBounds<PartitionPosition> keyRange = dataRange.keyRange();

    boolean startIsMin = keyRange.left.isMinimum();
    boolean stopIsMin = keyRange.right.isMinimum();

    boolean isBound = keyRange instanceof Bounds;
    boolean includeStart = isBound || keyRange instanceof IncludingExcludingBounds;
    boolean includeStop = isBound || keyRange instanceof Range;
    Map<PartitionPosition, AtomicBTreePartition> subMap;
    if (startIsMin)
      subMap = stopIsMin ? partitions : partitions.headMap(keyRange.right, includeStop);
    else
      subMap =
          stopIsMin
              ? partitions.tailMap(keyRange.left, includeStart)
              : partitions.subMap(keyRange.left, includeStart, keyRange.right, includeStop);

    int minLocalDeletionTime = Integer.MAX_VALUE;

    // avoid iterating over the memtable if we purge all tombstones
    if (cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones())
      minLocalDeletionTime = findMinLocalDeletionTime(subMap.entrySet().iterator());

    final Iterator<Map.Entry<PartitionPosition, AtomicBTreePartition>> iter =
        subMap.entrySet().iterator();

    return new MemtableUnfilteredPartitionIterator(
        cfs, iter, isForThrift, minLocalDeletionTime, columnFilter, dataRange);
  }
Example #11
0
 private InboundTransferTask addTransfer(Address source, Set<Integer> segmentsFromSource) {
   synchronized (this) {
     segmentsFromSource.removeAll(
         transfersBySegment.keySet()); // already in progress segments are excluded
     if (!segmentsFromSource.isEmpty()) {
       InboundTransferTask inboundTransfer =
           new InboundTransferTask(
               segmentsFromSource,
               source,
               cacheTopology.getTopologyId(),
               this,
               rpcManager,
               commandsFactory,
               timeout,
               cacheName);
       for (int segmentId : segmentsFromSource) {
         transfersBySegment.put(segmentId, inboundTransfer);
       }
       List<InboundTransferTask> inboundTransfers =
           transfersBySource.get(inboundTransfer.getSource());
       if (inboundTransfers == null) {
         inboundTransfers = new ArrayList<InboundTransferTask>();
         transfersBySource.put(inboundTransfer.getSource(), inboundTransfers);
       }
       inboundTransfers.add(inboundTransfer);
       taskQueue.add(inboundTransfer);
       return inboundTransfer;
     } else {
       return null;
     }
   }
 }
Example #12
0
 protected void handleMapPutMany(String[] args) {
   int count = 1;
   if (args.length > 1) count = Integer.parseInt(args[1]);
   int b = 100;
   byte[] value = new byte[b];
   if (args.length > 2) {
     b = Integer.parseInt(args[2]);
     value = new byte[b];
   }
   int start = getMap().size();
   if (args.length > 3) {
     start = Integer.parseInt(args[3]);
   }
   Map theMap = new HashMap(count);
   for (int i = 0; i < count; i++) {
     theMap.put("key" + (start + i), value);
   }
   long t0 = Clock.currentTimeMillis();
   getMap().putAll(theMap);
   long t1 = Clock.currentTimeMillis();
   if (t1 - t0 > 1) {
     println(
         "size = "
             + getMap().size()
             + ", "
             + count * 1000 / (t1 - t0)
             + " evt/s, "
             + (count * 1000 / (t1 - t0)) * (b * 8) / 1024
             + " Kbit/s, "
             + count * b / 1024
             + " KB added");
   }
 }
  /**
   * Query all purchases made at a specific store for 3 specific products. This query uses
   * cross-cache joins between {@link DimStore}, {@link DimProduct} objects stored in {@code
   * 'replicated'} cache and {@link FactPurchase} objects stored in {@code 'partitioned'} cache.
   *
   * @throws IgniteException If failed.
   */
  private static void queryProductPurchases() {
    IgniteCache<Integer, FactPurchase> factCache = Ignition.ignite().cache(PARTITIONED_CACHE_NAME);

    // All purchases for certain product made at store2.
    // =================================================

    DimProduct p1 = rand(dataProduct.values());
    DimProduct p2 = rand(dataProduct.values());
    DimProduct p3 = rand(dataProduct.values());

    System.out.println(
        "IDs of products [p1=" + p1.getId() + ", p2=" + p2.getId() + ", p3=" + p3.getId() + ']');

    // Create cross cache query to get all purchases made at store2
    // for specified products.
    QueryCursor<Cache.Entry<Integer, FactPurchase>> prodPurchases =
        factCache.query(
            new SqlQuery(
                    FactPurchase.class,
                    "from \""
                        + REPLICATED_CACHE_NAME
                        + "\".DimStore, \""
                        + REPLICATED_CACHE_NAME
                        + "\".DimProduct, "
                        + "\""
                        + PARTITIONED_CACHE_NAME
                        + "\".FactPurchase "
                        + "where DimStore.id=FactPurchase.storeId and DimProduct.id=FactPurchase.productId "
                        + "and DimStore.name=? and DimProduct.id in(?, ?, ?)")
                .setArgs("Store2", p1.getId(), p2.getId(), p3.getId()));

    printQueryResults(
        "All purchases made at store2 for 3 specific products:", prodPurchases.getAll());
  }
Example #14
0
  /** Start the gossiper with the generation # retrieved from the System table */
  public void start(InetAddress localEndpoint, int generationNbr) {
    localEndpoint_ = localEndpoint;
    /* Get the seeds from the config and initialize them. */
    Set<InetAddress> seedHosts = DatabaseDescriptor.getSeeds();
    for (InetAddress seed : seedHosts) {
      if (seed.equals(localEndpoint)) continue;
      seeds_.add(seed);
    }

    /* initialize the heartbeat state for this localEndpoint */
    EndpointState localState = endpointStateMap_.get(localEndpoint_);
    if (localState == null) {
      HeartBeatState hbState = new HeartBeatState(generationNbr);
      localState = new EndpointState(hbState);
      localState.isAlive(true);
      localState.isAGossiper(true);
      endpointStateMap_.put(localEndpoint_, localState);
    }

    // notify snitches that Gossiper is about to start
    DatabaseDescriptor.getEndpointSnitch().gossiperStarting();

    scheduledGossipTask =
        StorageService.scheduledTasks.scheduleWithFixedDelay(
            new GossipTask(),
            Gossiper.intervalInMillis_,
            Gossiper.intervalInMillis_,
            TimeUnit.MILLISECONDS);
  }
  public static void shutdown() {
    logger_.info("Shutting down ...");
    synchronized (MessagingService.class) {
      /* Stop listening on any socket */
      for (SelectionKey skey : listenSockets_.values()) {
        SelectorManager.getSelectorManager().cancel(skey);
      }
      listenSockets_.clear();

      /* Shutdown the threads in the EventQueue's */
      messageDeserializationExecutor_.shutdownNow();
      messageSerializerExecutor_.shutdownNow();
      messageDeserializerExecutor_.shutdownNow();
      streamExecutor_.shutdownNow();

      /* shut down the cachetables */
      taskCompletionMap_.shutdown();
      callbackMap_.shutdown();

      /* Interrupt the selector manager thread */
      SelectorManager.getSelectorManager().interrupt();

      poolTable_.clear();
      verbHandlers_.clear();
      bShutdown_ = true;
    }
    logger_.debug("Shutdown invocation complete.");
  }
Example #16
0
  void notifyFailureDetector(List<GossipDigest> gDigests) {
    IFailureDetector fd = FailureDetector.instance;
    for (GossipDigest gDigest : gDigests) {
      EndpointState localEndpointState = endpointStateMap_.get(gDigest.endpoint_);
      /*
       * If the local endpoint state exists then report to the FD only
       * if the versions workout.
       */
      if (localEndpointState != null) {
        int localGeneration =
            endpointStateMap_.get(gDigest.endpoint_).getHeartBeatState().generation_;
        int remoteGeneration = gDigest.generation_;
        if (remoteGeneration > localGeneration) {
          fd.report(gDigest.endpoint_);
          continue;
        }

        if (remoteGeneration == localGeneration) {
          int localVersion = getMaxEndpointStateVersion(localEndpointState);
          // int localVersion =
          // endpointStateMap_.get(gDigest.endpoint_).getHeartBeatState().getHeartBeatVersion();
          int remoteVersion = gDigest.maxVersion_;
          if (remoteVersion > localVersion) {
            fd.report(gDigest.endpoint_);
          }
        }
      }
    }
  }
Example #17
0
  void notifyFailureDetector(Map<InetAddress, EndpointState> remoteEpStateMap) {
    IFailureDetector fd = FailureDetector.instance;
    for (Entry<InetAddress, EndpointState> entry : remoteEpStateMap.entrySet()) {
      InetAddress endpoint = entry.getKey();
      EndpointState remoteEndpointState = entry.getValue();
      EndpointState localEndpointState = endpointStateMap_.get(endpoint);
      /*
       * If the local endpoint state exists then report to the FD only
       * if the versions workout.
       */
      if (localEndpointState != null) {
        int localGeneration = localEndpointState.getHeartBeatState().generation_;
        int remoteGeneration = remoteEndpointState.getHeartBeatState().generation_;
        if (remoteGeneration > localGeneration) {
          fd.report(endpoint);
          continue;
        }

        if (remoteGeneration == localGeneration) {
          int localVersion = getMaxEndpointStateVersion(localEndpointState);
          // int localVersion = localEndpointState.getHeartBeatState().getHeartBeatVersion();
          int remoteVersion = remoteEndpointState.getHeartBeatState().getHeartBeatVersion();
          if (remoteVersion > localVersion) {
            fd.report(endpoint);
          }
        }
      }
    }
  }
Example #18
0
  /** @param aProperties the updated properties. */
  @SuppressWarnings("rawtypes")
  final void setProperties(final Dictionary aProperties) {
    final Map<String, String> newProps = new HashMap<String, String>();

    Enumeration keys = aProperties.keys();
    while (keys.hasMoreElements()) {
      final String key = (String) keys.nextElement();
      if (!KNOWN_KEYS.contains(key) && !IGNORED_KEYS.contains(key)) {
        LOG.log(Level.WARNING, "Unknown/unsupported profile key: " + key);
        continue;
      }

      final String value = aProperties.get(key).toString();
      newProps.put(key, value.trim());
    }

    // Verify whether all known keys are defined...
    final List<String> checkedKeys = new ArrayList<String>(KNOWN_KEYS);
    checkedKeys.removeAll(newProps.keySet());
    if (!checkedKeys.isEmpty()) {
      throw new IllegalArgumentException(
          "Profile settings not complete! Missing keys are: " + checkedKeys.toString());
    }

    this.properties.putAll(newProps);

    LOG.log(
        Level.INFO,
        "New device profile settings applied for {1} ({0}) ...", //
        new Object[] {getType(), getDescription()});
  }
Example #19
0
  /**
   * The gossip digest is built based on randomization rather than just looping through the
   * collection of live endpoints.
   *
   * @param gDigests list of Gossip Digests.
   */
  void makeRandomGossipDigest(List<GossipDigest> gDigests) {
    /* Add the local endpoint state */
    EndpointState epState = endpointStateMap_.get(localEndpoint_);
    int generation = epState.getHeartBeatState().getGeneration();
    int maxVersion = getMaxEndpointStateVersion(epState);
    gDigests.add(new GossipDigest(localEndpoint_, generation, maxVersion));

    List<InetAddress> endpoints = new ArrayList<InetAddress>(endpointStateMap_.keySet());
    Collections.shuffle(endpoints, random_);
    for (InetAddress endpoint : endpoints) {
      epState = endpointStateMap_.get(endpoint);
      if (epState != null) {
        generation = epState.getHeartBeatState().getGeneration();
        maxVersion = getMaxEndpointStateVersion(epState);
        gDigests.add(new GossipDigest(endpoint, generation, maxVersion));
      } else {
        gDigests.add(new GossipDigest(endpoint, 0, 0));
      }
    }

    /* FOR DEBUG ONLY - remove later */
    StringBuilder sb = new StringBuilder();
    for (GossipDigest gDigest : gDigests) {
      sb.append(gDigest);
      sb.append(" ");
    }
    if (logger_.isTraceEnabled()) logger_.trace("Gossip Digests are : " + sb.toString());
  }
Example #20
0
 public void verify() {
   assertEquals(numberOfRequests.size(), requestsNum);
   assertEquals(numberOfJobsMap.keySet().size(), requestsNum);
   for (int num : numberOfJobsMap.values()) {
     assertEquals(num, jobsNumPerRequest);
   }
 }
Example #21
0
    /**
     * Parse the parameters of a connection into a CoreNLP properties file that can be passed into
     * {@link StanfordCoreNLP}, and used in the I/O stages.
     *
     * @param httpExchange The http exchange; effectively, the request information.
     * @return A {@link Properties} object corresponding to a combination of default and passed
     *     properties.
     * @throws UnsupportedEncodingException Thrown if we could not decode the key/value pairs with
     *     UTF-8.
     */
    private Properties getProperties(HttpExchange httpExchange)
        throws UnsupportedEncodingException {
      // Load the default properties
      Properties props = new Properties();
      defaultProps
          .entrySet()
          .stream()
          .forEach(
              entry -> props.setProperty(entry.getKey().toString(), entry.getValue().toString()));

      // Try to get more properties from query string.
      Map<String, String> urlParams = getURLParams(httpExchange.getRequestURI());
      if (urlParams.containsKey("properties")) {
        StringUtils.decodeMap(URLDecoder.decode(urlParams.get("properties"), "UTF-8"))
            .entrySet()
            .forEach(entry -> props.setProperty(entry.getKey(), entry.getValue()));
      } else if (urlParams.containsKey("props")) {
        StringUtils.decodeMap(URLDecoder.decode(urlParams.get("properties"), "UTF-8"))
            .entrySet()
            .forEach(entry -> props.setProperty(entry.getKey(), entry.getValue()));
      }

      // Make sure the properties compile
      props.setProperty(
          "annotators",
          StanfordCoreNLP.ensurePrerequisiteAnnotators(
              props.getProperty("annotators").split("[, \t]+")));

      return props;
    }
Example #22
0
  /** @return matching bloc B bloc -> A bloc */
  public static Map<Integer, Integer> diff(FileDesc a, FileDesc b) {
    Map<Integer, List<IndexedHash>> blocA = new HashMap<Integer, List<IndexedHash>>();
    int i = 0;
    for (Bloc bloc : a.blocs) {
      List<IndexedHash> l = blocA.get(bloc.roll);
      if (l == null) {
        l = new ArrayList<IndexedHash>();
        blocA.put(bloc.roll, l);
      }
      l.add(new IndexedHash(i++, bloc.hash));
    }

    Map<Integer, Integer> map = new HashMap<Integer, Integer>();
    loop:
    for (i = 0; i < b.blocs.length; i++) {
      Bloc blocB = b.blocs[i];
      List<IndexedHash> list = blocA.get(blocB.roll);
      if (list != null) {
        for (IndexedHash bloc : list) {
          if (blocB.hash.equals(bloc.h)) {
            map.put(i, bloc.i);
            continue loop;
          }
        }
      }
    }
    return map;
  }
Example #23
0
 private List<TransactionInfo> getTransactions(
     Address source, Set<Integer> segments, int topologyId) {
   if (trace) {
     log.tracef(
         "Requesting transactions for segments %s of cache %s from node %s",
         segments, cacheName, source);
   }
   // get transactions and locks
   try {
     StateRequestCommand cmd =
         commandsFactory.buildStateRequestCommand(
             StateRequestCommand.Type.GET_TRANSACTIONS,
             rpcManager.getAddress(),
             topologyId,
             segments);
     Map<Address, Response> responses =
         rpcManager.invokeRemotely(
             Collections.singleton(source), cmd, ResponseMode.SYNCHRONOUS_IGNORE_LEAVERS, timeout);
     Response response = responses.get(source);
     if (response instanceof SuccessfulResponse) {
       return (List<TransactionInfo>) ((SuccessfulResponse) response).getResponseValue();
     }
     log.failedToRetrieveTransactionsForSegments(segments, cacheName, source, null);
   } catch (CacheException e) {
     log.failedToRetrieveTransactionsForSegments(segments, cacheName, source, e);
   }
   return null;
 }
Example #24
0
  /**
   * Execute the {@link Callable} tasks in parallel (per the configured size of the {@link
   * WorkerPool}) and wait for them to complete.
   *
   * @param tasks a map of {@link Callable}s with keys by which you will be able to access each
   *     return value
   * @return the return values of each {@link Callable}s mapped by their input key
   */
  public <K, V> Map<K, V> invokeAll(Map<K, Callable<V>> tasks) {
    String caller =
        LOGGER.isDebugEnabled() ? Thread.currentThread().getStackTrace()[2].toString() : "n/a";
    LOGGER.debug("[%s] is invoking %d mapped tasks", caller, tasks.size());

    List<K> orderedKeys = new ArrayList<K>(tasks.size());
    List<Callable<V>> orderedTasks = new ArrayList<Callable<V>>(tasks.size());
    for (Map.Entry<K, Callable<V>> entry : tasks.entrySet()) {
      orderedKeys.add(entry.getKey());
      orderedTasks.add(entry.getValue());
    }

    try {
      long start = System.currentTimeMillis();
      List<Future<V>> executorResults = executorService.invokeAll(orderedTasks);
      long finish = System.currentTimeMillis();
      LOGGER.debug("[%s] invoked %d mapped tasks in %d ms", caller, tasks.size(), finish - start);

      Map<K, V> mappedResults = new LinkedHashMap<K, V>(tasks.size());
      for (int i = 0; i < tasks.size(); i++) {
        K key = orderedKeys.get(i);
        V result = executorResults.get(i).get();
        mappedResults.put(key, result);
      }
      return mappedResults;
    } catch (InterruptedException e) {
      throw new RuntimeException(e);
    } catch (ExecutionException e) {
      throw new RuntimeException(e);
    }
  }
 /** Returns the {@link SegmentCacheIndex} for a given {@link SegmentHeader}. */
 private SegmentCacheIndex getIndex(SegmentHeader header) {
   // First we check the indexes that already exist.
   // This is fast.
   for (Entry<RolapStar, SegmentCacheIndex> entry : indexes.entrySet()) {
     final String factTableName = entry.getKey().getFactTable().getTableName();
     final ByteString schemaChecksum = entry.getKey().getSchema().getChecksum();
     if (!factTableName.equals(header.rolapStarFactTableName)) {
       continue;
     }
     if (!schemaChecksum.equals(header.schemaChecksum)) {
       continue;
     }
     return entry.getValue();
   }
   // The index doesn't exist. Let's create it.
   for (RolapSchema schema : RolapSchema.getRolapSchemas()) {
     if (!schema.getChecksum().equals(header.schemaChecksum)) {
       continue;
     }
     // We have a schema match.
     RolapStar star = schema.getStar(header.rolapStarFactTableName);
     if (star != null) {
       // Found it.
       indexes.put(star, new SegmentCacheIndexImpl(thread));
     }
     return indexes.get(star);
   }
   return null;
 }
 void completed(InetAddress remote, String cfname) {
   logger.debug("Repair completed for {} on {}", remote, cfname);
   RepairJob job = activeJobs.get(cfname);
   if (job.completedSynchronizationJob(remote)) {
     activeJobs.remove(cfname);
     if (activeJobs.isEmpty()) completed.signalAll();
   }
 }
Example #27
0
 /**
  * set value.
  *
  * @param key
  * @param value
  * @return context
  */
 public RpcContext set(String key, Object value) {
   if (value == null) {
     values.remove(key);
   } else {
     values.put(key, value);
   }
   return this;
 }
Example #28
0
 /**
  * set attachment.
  *
  * @param key
  * @param value
  * @return context
  */
 public RpcContext setAttachment(String key, String value) {
   if (value == null) {
     attachments.remove(key);
   } else {
     attachments.put(key, value);
   }
   return this;
 }
 static {
   Map<String, String> fakeData = new HashMap<>();
   fakeData.put("http://www.weather.gov", "Weather forecast");
   fakeData.put("http://www.espn.com", "Sports scores");
   fakeData.put("http://www.marketwatch.com", "Stock market data");
   fakeData.put("http://www.fandango.com", "Movie showtimes");
   data = Collections.unmodifiableMap(fakeData);
 }
  /**
   * This is called when JPM runs in the background to start jobs
   *
   * @throws Exception
   */
  public void daemon() throws Exception {
    Runtime.getRuntime()
        .addShutdownHook(
            new Thread("Daemon shutdown") {
              public void run() {

                for (Service service : startedByDaemon) {
                  try {
                    reporter.error("Stopping " + service);
                    service.stop();
                    reporter.error("Stopped " + service);
                  } catch (Exception e) {
                    // Ignore
                  }
                }
              }
            });
    List<ServiceData> services = getServices();
    Map<String, ServiceData> map = new HashMap<String, ServiceData>();
    for (ServiceData d : services) {
      map.put(d.name, d);
    }
    List<ServiceData> start = new ArrayList<ServiceData>();
    Set<ServiceData> set = new HashSet<ServiceData>();
    for (ServiceData sd : services) {
      checkStartup(map, start, sd, set);
    }

    if (start.isEmpty()) reporter.warning("No services to start");

    for (ServiceData sd : start) {
      try {
        Service service = getService(sd.name);
        reporter.trace("Starting " + service);
        String result = service.start();
        if (result != null) reporter.error("Started error " + result);
        else startedByDaemon.add(service);
        reporter.trace("Started " + service);
      } catch (Exception e) {
        reporter.error("Cannot start daemon %s, due to %s", sd.name, e);
      }
    }

    while (true) {
      for (Service sd : startedByDaemon) {
        try {
          if (!sd.isRunning()) {
            reporter.error("Starting due to failure " + sd);
            String result = sd.start();
            if (result != null) reporter.error("Started error " + result);
          }
        } catch (Exception e) {
          reporter.error("Cannot start daemon %s, due to %s", sd, e);
        }
      }
      Thread.sleep(10000);
    }
  }