/**
   * If the key is to be associated with a valid value, a mutation is created for it with the given
   * table and columns. In the event the value in the column is missing (i.e., null), then it is
   * marked for {@link Deletion}. Similarly, if the entire value for a key is missing (i.e., null),
   * then the entire key is marked for {@link Deletion}.
   *
   * @param keyColumns the key to write.
   * @param values the values to write.
   * @throws IOException
   */
  @Override
  public void write(Map<String, ByteBuffer> keyColumns, List<ByteBuffer> values)
      throws IOException {
    TokenRange range = ringCache.getRange(getPartitionKey(keyColumns));

    // get the client for the given range, or create a new one
    final InetAddress address = ringCache.getEndpoints(range).get(0);
    RangeClient client = clients.get(address);
    if (client == null) {
      // haven't seen keys for this range: create new client
      client = new RangeClient(ringCache.getEndpoints(range));
      client.start();
      clients.put(address, client);
    }

    // add primary key columns to the bind variables
    List<ByteBuffer> allValues = new ArrayList<ByteBuffer>(values);
    for (ColumnMetadata column : partitionKeyColumns)
      allValues.add(keyColumns.get(column.getName()));
    for (ColumnMetadata column : clusterColumns) allValues.add(keyColumns.get(column.getName()));

    client.put(allValues);

    if (progressable != null) progressable.progress();
    if (context != null) HadoopCompat.progress(context);
  }
Ejemplo n.º 2
0
    /**
     * Parse the parameters of a connection into a CoreNLP properties file that can be passed into
     * {@link StanfordCoreNLP}, and used in the I/O stages.
     *
     * @param httpExchange The http exchange; effectively, the request information.
     * @return A {@link Properties} object corresponding to a combination of default and passed
     *     properties.
     * @throws UnsupportedEncodingException Thrown if we could not decode the key/value pairs with
     *     UTF-8.
     */
    private Properties getProperties(HttpExchange httpExchange)
        throws UnsupportedEncodingException {
      // Load the default properties
      Properties props = new Properties();
      defaultProps
          .entrySet()
          .stream()
          .forEach(
              entry -> props.setProperty(entry.getKey().toString(), entry.getValue().toString()));

      // Try to get more properties from query string.
      Map<String, String> urlParams = getURLParams(httpExchange.getRequestURI());
      if (urlParams.containsKey("properties")) {
        StringUtils.decodeMap(URLDecoder.decode(urlParams.get("properties"), "UTF-8"))
            .entrySet()
            .forEach(entry -> props.setProperty(entry.getKey(), entry.getValue()));
      } else if (urlParams.containsKey("props")) {
        StringUtils.decodeMap(URLDecoder.decode(urlParams.get("properties"), "UTF-8"))
            .entrySet()
            .forEach(entry -> props.setProperty(entry.getKey(), entry.getValue()));
      }

      // Make sure the properties compile
      props.setProperty(
          "annotators",
          StanfordCoreNLP.ensurePrerequisiteAnnotators(
              props.getProperty("annotators").split("[, \t]+")));

      return props;
    }
Ejemplo n.º 3
0
  /**
   * The gossip digest is built based on randomization rather than just looping through the
   * collection of live endpoints.
   *
   * @param gDigests list of Gossip Digests.
   */
  void makeRandomGossipDigest(List<GossipDigest> gDigests) {
    /* Add the local endpoint state */
    EndpointState epState = endpointStateMap_.get(localEndpoint_);
    int generation = epState.getHeartBeatState().getGeneration();
    int maxVersion = getMaxEndpointStateVersion(epState);
    gDigests.add(new GossipDigest(localEndpoint_, generation, maxVersion));

    List<InetAddress> endpoints = new ArrayList<InetAddress>(endpointStateMap_.keySet());
    Collections.shuffle(endpoints, random_);
    for (InetAddress endpoint : endpoints) {
      epState = endpointStateMap_.get(endpoint);
      if (epState != null) {
        generation = epState.getHeartBeatState().getGeneration();
        maxVersion = getMaxEndpointStateVersion(epState);
        gDigests.add(new GossipDigest(endpoint, generation, maxVersion));
      } else {
        gDigests.add(new GossipDigest(endpoint, 0, 0));
      }
    }

    /* FOR DEBUG ONLY - remove later */
    StringBuilder sb = new StringBuilder();
    for (GossipDigest gDigest : gDigests) {
      sb.append(gDigest);
      sb.append(" ");
    }
    if (logger_.isTraceEnabled()) logger_.trace("Gossip Digests are : " + sb.toString());
  }
  private void createKafkaAdaptorListener(
      InputEventAdapterListener inputEventAdapterListener,
      InputEventAdapterConfiguration inputEventAdapterConfiguration) {

    Map<String, String> brokerProperties = new HashMap<String, String>();
    brokerProperties.putAll(inputEventAdapterConfiguration.getProperties());
    String zkConnect =
        brokerProperties.get(KafkaEventAdapterConstants.ADAPTOR_SUSCRIBER_ZOOKEEPER_CONNECT);
    String groupID = brokerProperties.get(KafkaEventAdapterConstants.ADAPTOR_SUSCRIBER_GROUP_ID);
    String threadsStr = brokerProperties.get(KafkaEventAdapterConstants.ADAPTOR_SUSCRIBER_THREADS);
    String optionalConfiguration =
        brokerProperties.get(KafkaEventAdapterConstants.ADAPTOR_OPTIONAL_CONFIGURATION_PROPERTIES);
    int threads = Integer.parseInt(threadsStr);

    String topic =
        inputEventAdapterConfiguration
            .getProperties()
            .get(KafkaEventAdapterConstants.ADAPTOR_SUSCRIBER_TOPIC);

    consumerKafkaAdaptor =
        new ConsumerKafkaAdaptor(
            topic,
            tenantId,
            KafkaEventAdapter.createConsumerConfig(zkConnect, groupID, optionalConfiguration));
    consumerKafkaAdaptor.run(threads, inputEventAdapterListener);
  }
Ejemplo n.º 5
0
  void notifyFailureDetector(List<GossipDigest> gDigests) {
    IFailureDetector fd = FailureDetector.instance;
    for (GossipDigest gDigest : gDigests) {
      EndpointState localEndpointState = endpointStateMap_.get(gDigest.endpoint_);
      /*
       * If the local endpoint state exists then report to the FD only
       * if the versions workout.
       */
      if (localEndpointState != null) {
        int localGeneration =
            endpointStateMap_.get(gDigest.endpoint_).getHeartBeatState().generation_;
        int remoteGeneration = gDigest.generation_;
        if (remoteGeneration > localGeneration) {
          fd.report(gDigest.endpoint_);
          continue;
        }

        if (remoteGeneration == localGeneration) {
          int localVersion = getMaxEndpointStateVersion(localEndpointState);
          // int localVersion =
          // endpointStateMap_.get(gDigest.endpoint_).getHeartBeatState().getHeartBeatVersion();
          int remoteVersion = gDigest.maxVersion_;
          if (remoteVersion > localVersion) {
            fd.report(gDigest.endpoint_);
          }
        }
      }
    }
  }
 @Test
 public void removeFromMap() {
   HazelcastClient hClient = getHazelcastClient();
   Map map = hClient.getMap("removeFromMap");
   assertNull(map.put("a", "b"));
   assertEquals("b", map.get("a"));
   assertEquals("b", map.remove("a"));
   assertNull(map.remove("a"));
   assertNull(map.get("a"));
 }
  private ByteBuffer getPartitionKey(Map<String, ByteBuffer> keyColumns) {
    ByteBuffer partitionKey;
    if (partitionKeyColumns.size() > 1) {
      ByteBuffer[] keys = new ByteBuffer[partitionKeyColumns.size()];
      for (int i = 0; i < keys.length; i++)
        keys[i] = keyColumns.get(partitionKeyColumns.get(i).getName());

      partitionKey = CompositeType.build(keys);
    } else {
      partitionKey = keyColumns.get(partitionKeyColumns.get(0).getName());
    }
    return partitionKey;
  }
Ejemplo n.º 8
0
  private void handleSpecialCommands(Map<String, Object> arow, DocWrapper doc) {
    Object value = arow.get("$deleteDocById");
    if (value != null) {
      if (value instanceof Collection) {
        Collection collection = (Collection) value;
        for (Object o : collection) {
          writer.deleteDoc(o.toString());
        }
      } else {
        writer.deleteDoc(value);
      }
    }
    value = arow.get("$deleteDocByQuery");
    if (value != null) {
      if (value instanceof Collection) {
        Collection collection = (Collection) value;
        for (Object o : collection) {
          writer.deleteByQuery(o.toString());
        }
      } else {
        writer.deleteByQuery(value.toString());
      }
    }
    value = arow.get("$docBoost");
    if (value != null) {
      float value1 = 1.0f;
      if (value instanceof Number) {
        value1 = ((Number) value).floatValue();
      } else {
        value1 = Float.parseFloat(value.toString());
      }
      doc.setDocumentBoost(value1);
    }

    value = arow.get("$skipDoc");
    if (value != null) {
      if (Boolean.parseBoolean(value.toString())) {
        throw new DataImportHandlerException(
            DataImportHandlerException.SKIP, "Document skipped :" + arow);
      }
    }

    value = arow.get("$skipRow");
    if (value != null) {
      if (Boolean.parseBoolean(value.toString())) {
        throw new DataImportHandlerException(DataImportHandlerException.SKIP_ROW);
      }
    }
  }
Ejemplo n.º 9
0
  static {
    threadPools.put(
        Stage.TRANSCODER,
        new ThreadPoolExecutor(
            3,
            3,
            5 * 60,
            TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(),
            new ThreadFactory() {
              public Thread newThread(Runnable r) {
                return ThreadPools.newThread(
                    r, "TranscoderThread-" + Stage.TRANSCODER + "-" + (transSeq++));
              }
            }));
    threadPools.put(
        Stage.RECOGNIZER,
        new ThreadPoolExecutor(
            3,
            3,
            5 * 60,
            TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(),
            new ThreadFactory() {
              public Thread newThread(Runnable r) {
                return ThreadPools.newThread(
                    r, "TranscoderThread-" + Stage.RECOGNIZER + "-" + (transSeq++));
              }
            }));

    // register them too
    ThreadPools.getThreadPools()
        .put(Executors.class.getName() + "." + Stage.TRANSCODER, threadPools.get(Stage.TRANSCODER));
    ThreadPools.getThreadPools()
        .put(Executors.class.getName() + "." + Stage.RECOGNIZER, threadPools.get(Stage.RECOGNIZER));

    // fill the rest of the map too, so we don't have to think about it any more later on.
    for (Stage s : Stage.values()) {
      if (!threadPools.containsKey(s)) {
        threadPools.put(s, ThreadPools.jobsExecutor);
      }
    }
    // default configuration, 5 + 1 executors.
    for (int i = 0; i < 5; i++) {
      executorsMap.put(new CommandExecutor.Method(), Stage.TRANSCODER);
    }
    executorsMap.put(new CommandExecutor.Method(), Stage.RECOGNIZER);
    readConfiguration();
  }
 @Test
 public void putToTheMap() throws InterruptedException {
   HazelcastClient hClient = getHazelcastClient();
   Map<String, String> clientMap = hClient.getMap("putToTheMap");
   assertEquals(0, clientMap.size());
   String result = clientMap.put("1", "CBDEF");
   assertNull(result);
   assertEquals("CBDEF", clientMap.get("1"));
   assertEquals("CBDEF", clientMap.get("1"));
   assertEquals("CBDEF", clientMap.get("1"));
   assertEquals(1, clientMap.size());
   result = clientMap.put("1", "B");
   assertEquals("CBDEF", result);
   assertEquals("B", clientMap.get("1"));
   assertEquals("B", clientMap.get("1"));
 }
Ejemplo n.º 11
0
  private synchronized String allocName(String type, String namePattern) {
    Map<String, Integer> patterns = nameTypes.get(type);
    if (patterns == null) {
      patterns = new HashMap<String, Integer>();
      nameTypes.put(type, patterns);
    }

    Integer id = patterns.get(namePattern);
    if (id == null) id = 0;

    id++;
    patterns.put(namePattern, id);
    String agentName = namePattern.replaceFirst("#", id.toString());
    if (agentName.equals(namePattern))
      Log.warn("AllocName: missing '#' in name pattern '" + namePattern + "'");
    else
      Log.debug(
          "AllocName: for type="
              + type
              + " assigned '"
              + agentName
              + "' from pattern '"
              + namePattern
              + "'");
    return agentName;
  }
Ejemplo n.º 12
0
  public static CudaEngine getCudaEngine(CudaObject co) {
    synchronized (cudaEngines) {
      if (!isCudaAvailable()) throw new CudaException("No cuda device found");
      try {
        initialization.awaitTermination(100, TimeUnit.SECONDS);
      } catch (InterruptedException e) {
        e.printStackTrace();
      }
      Pheromone p = (Pheromone) co;
      final int pheroID = cudaObjectID.incrementAndGet();

      final CudaEngine ce = cudaEngines.get(pheroID % availableDevicesNb);
      //			final CudaEngine ce = cudaEngines.get(1);
      //			final CudaEngine ce = cudaEngines.get(0);

      //			final CudaEngine ce;
      //			if(p.getName().contains("PRE")){
      //				ce = cudaEngines.get(0);
      //			}
      //			else{
      //				ce = cudaEngines.get(1);
      //			}
      //
      ce.cudaObjects.add(co);
      System.err.println(co + "ID " + pheroID + " getting cuda engine Id " + ce.Id);
      return ce;
    }
  }
Ejemplo n.º 13
0
  public void setProperty(final QName pid, final QName propName, final String value) {
    if (__log.isDebugEnabled()) __log.debug("Setting property " + propName + " on process " + pid);

    ProcessConfImpl pconf = _processes.get(pid);
    if (pconf == null) {
      String msg = __msgs.msgProcessNotFound(pid);
      __log.info(msg);
      throw new ContextException(msg);
    }

    final DeploymentUnitDir dudir = pconf.getDeploymentUnit();
    exec(
        new ProcessStoreImpl.Callable<Object>() {
          public Object call(ConfStoreConnection conn) {
            DeploymentUnitDAO dudao = conn.getDeploymentUnit(dudir.getName());
            if (dudao == null) return null;
            ProcessConfDAO proc = dudao.getProcess(pid);
            if (proc == null) return null;
            proc.setProperty(propName, value);
            return null;
          }
        });

    fireEvent(new ProcessStoreEvent(ProcessStoreEvent.Type.PROPERTY_CHANGED, pid, dudir.getName()));
  }
    /** {@inheritDoc} */
    @Nullable
    @Override
    public Map<String, Collection<?>> run(GridStreamerContext ctx, Collection<Bar> bars)
        throws GridException {
      ConcurrentMap<String, Bar> loc = ctx.localSpace();

      GridStreamerWindow win = ctx.window("stage2");

      // Add numbers to window.
      win.enqueueAll(bars);

      Collection<Bar> polled = win.pollEvictedBatch();

      if (!polled.isEmpty()) {
        Map<String, Bar> map = new HashMap<>();

        for (Bar polledBar : polled) {
          String symbol = polledBar.symbol();

          Bar bar = map.get(symbol);

          if (bar == null) map.put(symbol, bar = new Bar(symbol));

          bar.update(polledBar);
        }

        loc.putAll(map);
      }

      return null;
    }
    /** {@inheritDoc} */
    @Nullable
    @Override
    public Map<String, Collection<?>> run(GridStreamerContext ctx, Collection<Quote> quotes)
        throws GridException {
      GridStreamerWindow win = ctx.window("stage1");

      // Add numbers to window.
      win.enqueueAll(quotes);

      Collection<Quote> polled = win.pollEvictedBatch();

      if (!polled.isEmpty()) {
        Map<String, Bar> map = new HashMap<>();

        for (Quote quote : polled) {
          String symbol = quote.symbol();

          Bar bar = map.get(symbol);

          if (bar == null) map.put(symbol, bar = new Bar(symbol));

          bar.update(quote.price());
        }

        return Collections.<String, Collection<?>>singletonMap(ctx.nextStageName(), map.values());
      }

      return null;
    }
Ejemplo n.º 16
0
 private List<TransactionInfo> getTransactions(
     Address source, Set<Integer> segments, int topologyId) {
   if (trace) {
     log.tracef(
         "Requesting transactions for segments %s of cache %s from node %s",
         segments, cacheName, source);
   }
   // get transactions and locks
   try {
     StateRequestCommand cmd =
         commandsFactory.buildStateRequestCommand(
             StateRequestCommand.Type.GET_TRANSACTIONS,
             rpcManager.getAddress(),
             topologyId,
             segments);
     Map<Address, Response> responses =
         rpcManager.invokeRemotely(
             Collections.singleton(source), cmd, ResponseMode.SYNCHRONOUS_IGNORE_LEAVERS, timeout);
     Response response = responses.get(source);
     if (response instanceof SuccessfulResponse) {
       return (List<TransactionInfo>) ((SuccessfulResponse) response).getResponseValue();
     }
     log.failedToRetrieveTransactionsForSegments(segments, cacheName, source, null);
   } catch (CacheException e) {
     log.failedToRetrieveTransactionsForSegments(segments, cacheName, source, e);
   }
   return null;
 }
Ejemplo n.º 17
0
  /**
   * Helper function to get value from map.
   *
   * @param map Map to take value from.
   * @param key Key to search in map.
   * @param ifNull Default value if {@code null} was returned by map.
   * @param <K> Key type.
   * @param <V> Value type.
   * @return Value from map or default value if map return {@code null}.
   */
  public static <K, V> V getOrElse(Map<K, V> map, K key, V ifNull) {
    assert map != null;

    V res = map.get(key);

    return res != null ? res : ifNull;
  }
Ejemplo n.º 18
0
  private void checkStartup(
      Map<String, ServiceData> map,
      List<ServiceData> start,
      ServiceData sd,
      Set<ServiceData> cyclic) {
    if (sd.after.isEmpty() || start.contains(sd)) return;

    if (cyclic.contains(sd)) {
      reporter.error("Cyclic dependency for " + sd.name);
      return;
    }

    cyclic.add(sd);

    for (String dependsOn : sd.after) {
      if (dependsOn.equals("boot")) continue;

      ServiceData deps = map.get(dependsOn);
      if (deps == null) {
        reporter.error("No such service " + dependsOn + " but " + sd.name + " depends on it");
      } else {
        checkStartup(map, start, deps, cyclic);
      }
    }
    start.add(sd);
  }
Ejemplo n.º 19
0
  /**
   * Add a record telling what entity caps node a user has.
   *
   * @param user the user (Full JID)
   * @param node the node (of the caps packet extension)
   * @param hash the hashing algorithm used to calculate <tt>ver</tt>
   * @param ver the version (of the caps packet extension)
   * @param ext the ext (of the caps packet extension)
   * @param online indicates if the user is online
   */
  private void addUserCapsNode(
      String user, String node, String hash, String ver, String ext, boolean online) {
    if ((user != null) && (node != null) && (hash != null) && (ver != null)) {
      Caps caps = userCaps.get(user);

      if ((caps == null)
          || !caps.node.equals(node)
          || !caps.hash.equals(hash)
          || !caps.ver.equals(ver)) {
        caps = new Caps(node, hash, ver, ext);

        userCaps.put(user, caps);
      } else return;

      // Fire userCapsNodeAdded.
      UserCapsNodeListener[] listeners;

      synchronized (userCapsNodeListeners) {
        listeners = userCapsNodeListeners.toArray(NO_USER_CAPS_NODE_LISTENERS);
      }
      if (listeners.length != 0) {
        String nodeVer = caps.getNodeVer();

        for (UserCapsNodeListener listener : listeners)
          listener.userCapsNodeAdded(user, nodeVer, online);
      }
    }
  }
Ejemplo n.º 20
0
 /**
  * This method is part of IFailureDetectionEventListener interface. This is invoked by the Failure
  * Detector when it convicts an end point.
  *
  * <p>param @ endpoint end point that is convicted.
  */
 public void convict(InetAddress endpoint) {
   EndpointState epState = endpointStateMap_.get(endpoint);
   if (epState.isAlive()) {
     logger_.info("InetAddress {} is now dead.", endpoint);
     isAlive(endpoint, epState, false);
   }
 }
Ejemplo n.º 21
0
  /**
   * Remove records telling what entity caps node a contact has.
   *
   * @param contact the contact
   */
  public void removeContactCapsNode(Contact contact) {
    Caps caps = null;
    String lastRemovedJid = null;

    Iterator<String> iter = userCaps.keySet().iterator();
    while (iter.hasNext()) {
      String jid = iter.next();

      if (StringUtils.parseBareAddress(jid).equals(contact.getAddress())) {
        caps = userCaps.get(jid);
        lastRemovedJid = jid;
        iter.remove();
      }
    }

    // fire only for the last one, at the end the event out
    // of the protocol will be one and for the contact
    if (caps != null) {
      UserCapsNodeListener[] listeners;
      synchronized (userCapsNodeListeners) {
        listeners = userCapsNodeListeners.toArray(NO_USER_CAPS_NODE_LISTENERS);
      }
      if (listeners.length != 0) {
        String nodeVer = caps.getNodeVer();

        for (UserCapsNodeListener listener : listeners)
          listener.userCapsNodeRemoved(lastRemovedJid, nodeVer, false);
      }
    }
  }
Ejemplo n.º 22
0
 /** Returns the {@link SegmentCacheIndex} for a given {@link SegmentHeader}. */
 private SegmentCacheIndex getIndex(SegmentHeader header) {
   // First we check the indexes that already exist.
   // This is fast.
   for (Entry<RolapStar, SegmentCacheIndex> entry : indexes.entrySet()) {
     final String factTableName = entry.getKey().getFactTable().getTableName();
     final ByteString schemaChecksum = entry.getKey().getSchema().getChecksum();
     if (!factTableName.equals(header.rolapStarFactTableName)) {
       continue;
     }
     if (!schemaChecksum.equals(header.schemaChecksum)) {
       continue;
     }
     return entry.getValue();
   }
   // The index doesn't exist. Let's create it.
   for (RolapSchema schema : RolapSchema.getRolapSchemas()) {
     if (!schema.getChecksum().equals(header.schemaChecksum)) {
       continue;
     }
     // We have a schema match.
     RolapStar star = schema.getStar(header.rolapStarFactTableName);
     if (star != null) {
       // Found it.
       indexes.put(star, new SegmentCacheIndexImpl(thread));
     }
     return indexes.get(star);
   }
   return null;
 }
Ejemplo n.º 23
0
  /** Start the gossiper with the generation # retrieved from the System table */
  public void start(InetAddress localEndpoint, int generationNbr) {
    localEndpoint_ = localEndpoint;
    /* Get the seeds from the config and initialize them. */
    Set<InetAddress> seedHosts = DatabaseDescriptor.getSeeds();
    for (InetAddress seed : seedHosts) {
      if (seed.equals(localEndpoint)) continue;
      seeds_.add(seed);
    }

    /* initialize the heartbeat state for this localEndpoint */
    EndpointState localState = endpointStateMap_.get(localEndpoint_);
    if (localState == null) {
      HeartBeatState hbState = new HeartBeatState(generationNbr);
      localState = new EndpointState(hbState);
      localState.isAlive(true);
      localState.isAGossiper(true);
      endpointStateMap_.put(localEndpoint_, localState);
    }

    // notify snitches that Gossiper is about to start
    DatabaseDescriptor.getEndpointSnitch().gossiperStarting();

    scheduledGossipTask =
        StorageService.scheduledTasks.scheduleWithFixedDelay(
            new GossipTask(),
            Gossiper.intervalInMillis_,
            Gossiper.intervalInMillis_,
            TimeUnit.MILLISECONDS);
  }
Ejemplo n.º 24
0
  /**
   * Returns default <tt>WebRtcDataStream</tt> if it's ready or <tt>null</tt> otherwise.
   *
   * @return <tt>WebRtcDataStream</tt> if it's ready or <tt>null</tt> otherwise.
   * @throws IOException
   */
  public WebRtcDataStream getDefaultDataStream() throws IOException {
    WebRtcDataStream def;

    synchronized (this) {
      if (sctpSocket == null) {
        def = null;
      } else {
        // Channel that runs on sid 0
        def = channels.get(0);
        if (def == null) {
          def = openChannel(0, 0, 0, 0, "default");
        }
        // Pawel Domas: Must be acknowledged before use
        /*
         * XXX Lyubomir Marinov: We're always sending ordered. According
         * to "WebRTC Data Channel Establishment Protocol", we can start
         * sending messages containing user data after the
         * DATA_CHANNEL_OPEN message has been sent without waiting for
         * the reception of the corresponding DATA_CHANNEL_ACK message.
         */
        //                if (!def.isAcknowledged())
        //                    def = null;
      }
    }
    return def;
  }
Ejemplo n.º 25
0
  void notifyFailureDetector(Map<InetAddress, EndpointState> remoteEpStateMap) {
    IFailureDetector fd = FailureDetector.instance;
    for (Entry<InetAddress, EndpointState> entry : remoteEpStateMap.entrySet()) {
      InetAddress endpoint = entry.getKey();
      EndpointState remoteEndpointState = entry.getValue();
      EndpointState localEndpointState = endpointStateMap_.get(endpoint);
      /*
       * If the local endpoint state exists then report to the FD only
       * if the versions workout.
       */
      if (localEndpointState != null) {
        int localGeneration = localEndpointState.getHeartBeatState().generation_;
        int remoteGeneration = remoteEndpointState.getHeartBeatState().generation_;
        if (remoteGeneration > localGeneration) {
          fd.report(endpoint);
          continue;
        }

        if (remoteGeneration == localGeneration) {
          int localVersion = getMaxEndpointStateVersion(localEndpointState);
          // int localVersion = localEndpointState.getHeartBeatState().getHeartBeatVersion();
          int remoteVersion = remoteEndpointState.getHeartBeatState().getHeartBeatVersion();
          if (remoteVersion > localVersion) {
            fd.report(endpoint);
          }
        }
      }
    }
  }
Ejemplo n.º 26
0
 public ProcessConf getProcessConfiguration(final QName processId) {
   _rw.readLock().lock();
   try {
     return _processes.get(processId);
   } finally {
     _rw.readLock().unlock();
   }
 }
 void completed(InetAddress remote, String cfname) {
   logger.debug("Repair completed for {} on {}", remote, cfname);
   RepairJob job = activeJobs.get(cfname);
   if (job.completedSynchronizationJob(remote)) {
     activeJobs.remove(cfname);
     if (activeJobs.isEmpty()) completed.signalAll();
   }
 }
Ejemplo n.º 28
0
 public void setRetiredPackage(String packageName, boolean retired) {
   DeploymentUnitDir duDir = _deploymentUnits.get(packageName);
   if (duDir == null) throw new ContextException("Could not find package " + packageName);
   for (QName processName : duDir.getProcessNames()) {
     setState(
         toPid(processName, duDir.getVersion()),
         retired ? ProcessState.RETIRED : ProcessState.ACTIVE);
   }
 }
Ejemplo n.º 29
0
  private void fixTimeLimit() {
    if (timeLimitFuture != null) timeLimitFuture.cancel(true);

    if (running && limits.containsKey(TIMER_COUNTER)) {
      long delay = limits.get(TIMER_COUNTER) * 1000 - time;
      if (delay > 0) {
        timeLimitFuture = scheduler.schedule(new TimeLimitTask(), delay, TimeUnit.MILLISECONDS);
      }
    }
  }
 @Test
 public void getPuttedValueFromTheMap() {
   HazelcastClient hClient = getHazelcastClient();
   Map<String, String> clientMap = hClient.getMap("getPuttedValueFromTheMap");
   int size = clientMap.size();
   clientMap.put("1", "Z");
   String value = clientMap.get("1");
   assertEquals("Z", value);
   assertEquals(size + 1, clientMap.size());
 }