public static void shutdown() {
    logger_.info("Shutting down ...");
    synchronized (MessagingService.class) {
      /* Stop listening on any socket */
      for (SelectionKey skey : listenSockets_.values()) {
        SelectorManager.getSelectorManager().cancel(skey);
      }
      listenSockets_.clear();

      /* Shutdown the threads in the EventQueue's */
      messageDeserializationExecutor_.shutdownNow();
      messageSerializerExecutor_.shutdownNow();
      messageDeserializerExecutor_.shutdownNow();
      streamExecutor_.shutdownNow();

      /* shut down the cachetables */
      taskCompletionMap_.shutdown();
      callbackMap_.shutdown();

      /* Interrupt the selector manager thread */
      SelectorManager.getSelectorManager().interrupt();

      poolTable_.clear();
      verbHandlers_.clear();
      bShutdown_ = true;
    }
    logger_.debug("Shutdown invocation complete.");
  }
 void completed(InetAddress remote, String cfname) {
   logger.debug("Repair completed for {} on {}", remote, cfname);
   RepairJob job = activeJobs.get(cfname);
   if (job.completedSynchronizationJob(remote)) {
     activeJobs.remove(cfname);
     if (activeJobs.isEmpty()) completed.signalAll();
   }
 }
  protected MessagingService() {
    for (ReservedVerbs_ verbs : ReservedVerbs_.values()) {
      reservedVerbs_.put(verbs.toString(), verbs.toString());
    }
    verbHandlers_ = new HashMap<String, IVerbHandler>();
    endPoints_ = new HashSet<EndPoint>();
    /*
     * Leave callbacks in the cachetable long enough that any related messages will arrive
     * before the callback is evicted from the table. The concurrency level is set at 128
     * which is the sum of the threads in the pool that adds shit into the table and the
     * pool that retrives the callback from here.
     */
    int maxSize = MessagingConfig.getMessagingThreadCount();
    callbackMap_ = new Cachetable<String, IAsyncCallback>(2 * DatabaseDescriptor.getRpcTimeout());
    taskCompletionMap_ =
        new Cachetable<String, IAsyncResult>(2 * DatabaseDescriptor.getRpcTimeout());

    messageDeserializationExecutor_ =
        new DebuggableThreadPoolExecutor(
            maxSize,
            maxSize,
            Integer.MAX_VALUE,
            TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(),
            new ThreadFactoryImpl("MESSAGING-SERVICE-POOL"));

    messageSerializerExecutor_ =
        new DebuggableThreadPoolExecutor(
            maxSize,
            maxSize,
            Integer.MAX_VALUE,
            TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(),
            new ThreadFactoryImpl("MESSAGE-SERIALIZER-POOL"));

    messageDeserializerExecutor_ =
        new DebuggableThreadPoolExecutor(
            maxSize,
            maxSize,
            Integer.MAX_VALUE,
            TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(),
            new ThreadFactoryImpl("MESSAGE-DESERIALIZER-POOL"));

    streamExecutor_ =
        new DebuggableThreadPoolExecutor(
            1,
            1,
            Integer.MAX_VALUE,
            TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(),
            new ThreadFactoryImpl("MESSAGE-STREAMING-POOL"));

    protocol_ = hash(HashingSchemes.MD5, "FB-MESSAGING".getBytes());
    /* register the response verb handler */
    registerVerbHandlers(MessagingService.responseVerbHandler_, new ResponseVerbHandler());
    /* register stage for response */
    StageManager.registerStage(
        MessagingService.responseStage_, new MultiThreadedStage("RESPONSE-STAGE", maxSize));
  }
    public Map<Token, Float> describeOwnership(List<Token> sortedTokens) {
      // allTokens will contain the count and be returned, sorted_ranges is shorthand for
      // token<->token math.
      Map<Token, Float> allTokens = new HashMap<Token, Float>();
      List<Range<Token>> sortedRanges = new ArrayList<Range<Token>>();

      // this initializes the counts to 0 and calcs the ranges in order.
      Token lastToken = sortedTokens.get(sortedTokens.size() - 1);
      for (Token node : sortedTokens) {
        allTokens.put(node, new Float(0.0));
        sortedRanges.add(new Range<Token>(lastToken, node));
        lastToken = node;
      }

      for (String ks : Schema.instance.getKeyspaces()) {
        for (CFMetaData cfmd : Schema.instance.getKSMetaData(ks).cfMetaData().values()) {
          for (Range<Token> r : sortedRanges) {
            // Looping over every KS:CF:Range, get the splits size and add it to the count
            allTokens.put(
                r.right,
                allTokens.get(r.right)
                    + StorageService.instance.getSplits(ks, cfmd.cfName, r, 1).size());
          }
        }
      }

      // Sum every count up and divide count/total for the fractional ownership.
      Float total = new Float(0.0);
      for (Float f : allTokens.values()) total += f;
      for (Map.Entry<Token, Float> row : allTokens.entrySet())
        allTokens.put(row.getKey(), row.getValue() / total);

      return allTokens;
    }
      /**
       * Submit differencers for running. All tree *must* have been received before this is called.
       */
      public void submitDifferencers() {
        assert requestedEndpoints.size() == 0;

        // Right now, we only difference local host against each other. CASSANDRA-2610 will fix
        // that.
        // In the meantime ugly special casing will work good enough.
        MerkleTree localTree = trees.get(FBUtilities.getLocalAddress());
        assert localTree != null;
        for (Map.Entry<InetAddress, MerkleTree> entry : trees.entrySet()) {
          if (entry.getKey().equals(FBUtilities.getLocalAddress())) continue;

          Differencer differencer =
              new Differencer(cfname, entry.getKey(), entry.getValue(), localTree);
          syncJobs.add(entry.getKey());
          logger.debug("Queueing comparison " + differencer);
          StageManager.getStage(Stage.ANTI_ENTROPY).execute(differencer);
        }
        trees.clear(); // allows gc to do its thing
      }
    // we don't care about the return value but care about it throwing exception
    public void runMayThrow() throws Exception {
      if (endpoints.isEmpty()) {
        differencingDone.signalAll();
        logger.info(
            "No neighbors to repair with for "
                + tablename
                + " on "
                + range
                + ": "
                + getName()
                + " completed.");
        return;
      }

      // Checking all nodes are live
      for (InetAddress endpoint : endpoints) {
        if (!FailureDetector.instance.isAlive(endpoint)) {
          differencingDone.signalAll();
          logger.info(
              "Could not proceed on repair because a neighbor ("
                  + endpoint
                  + ") is dead: "
                  + getName()
                  + " failed.");
          return;
        }
      }

      AntiEntropyService.instance.sessions.put(getName(), this);
      Gossiper.instance.register(this);
      FailureDetector.instance.registerFailureDetectionEventListener(this);
      try {
        // Create and queue a RepairJob for each column family
        for (String cfname : cfnames) {
          RepairJob job = new RepairJob(cfname);
          jobs.offer(job);
          activeJobs.put(cfname, job);
        }

        jobs.peek().sendTreeRequests();

        // block whatever thread started this session until all requests have been returned:
        // if this thread dies, the session will still complete in the background
        completed.await();
        if (exception != null) throw exception;
      } catch (InterruptedException e) {
        throw new RuntimeException(
            "Interrupted while waiting for repair: repair will continue in the background.");
      } finally {
        FailureDetector.instance.unregisterFailureDetectionEventListener(this);
        Gossiper.instance.unregister(this);
        AntiEntropyService.instance.sessions.remove(getName());
      }
    }
 void failedNode(InetAddress remote) {
   String errorMsg =
       String.format("Problem during repair session %s, endpoint %s died", sessionName, remote);
   logger.error(errorMsg);
   exception = new IOException(errorMsg);
   // If a node failed, we stop everything (though there could still be some activity in the
   // background)
   jobs.clear();
   activeJobs.clear();
   differencingDone.signalAll();
   completed.signalAll();
 }
  public void deregisterAllVerbHandlers(EndPoint localEndPoint) {
    Iterator keys = verbHandlers_.keySet().iterator();
    String key = null;

    /*
     * endpoint specific verbhandlers can be distinguished because
     * their key's contain the name of the endpoint.
     */
    while (keys.hasNext()) {
      key = (String) keys.next();
      if (key.contains(localEndPoint.toString())) keys.remove();
    }
  }
 public static TcpConnectionManager getConnectionPool(EndPoint from, EndPoint to) {
   String key = from + ":" + to;
   TcpConnectionManager cp = poolTable_.get(key);
   if (cp == null) {
     lock_.lock();
     try {
       cp = poolTable_.get(key);
       if (cp == null) {
         cp =
             new TcpConnectionManager(
                 MessagingConfig.getConnectionPoolInitialSize(),
                 MessagingConfig.getConnectionPoolGrowthFactor(),
                 MessagingConfig.getConnectionPoolMaxSize(),
                 from,
                 to);
         poolTable_.put(key, cp);
       }
     } finally {
       lock_.unlock();
     }
   }
   return cp;
 }
    void completed(Differencer differencer) {
      logger.debug(
          String.format(
              "[repair #%s] Repair completed between %s and %s on %s",
              getName(), differencer.r1.endpoint, differencer.r2.endpoint, differencer.cfname));
      RepairJob job = activeJobs.get(differencer.cfname);
      if (job == null) {
        assert terminated;
        return;
      }

      if (job.completedSynchronization(differencer)) {
        activeJobs.remove(differencer.cfname);
        String remaining =
            activeJobs.size() == 0
                ? ""
                : String.format(
                    " (%d remaining column family to sync for this session)", activeJobs.size());
        logger.info(
            String.format(
                "[repair #%s] %s is fully synced%s", getName(), differencer.cfname, remaining));
        if (activeJobs.isEmpty()) completed.signalAll();
      }
    }
 public static ConnectionStatistics[] getPoolStatistics() {
   Set<ConnectionStatistics> stats = new HashSet<ConnectionStatistics>();
   Iterator<TcpConnectionManager> it = poolTable_.values().iterator();
   while (it.hasNext()) {
     TcpConnectionManager cp = it.next();
     ConnectionStatistics cs =
         new ConnectionStatistics(
             cp.getLocalEndPoint(),
             cp.getRemoteEndPoint(),
             cp.getPoolSize(),
             cp.getConnectionsInUse());
     stats.add(cs);
   }
   return stats.toArray(new ConnectionStatistics[0]);
 }
  public void listen(EndPoint localEp, boolean isHttp) throws IOException {
    ServerSocketChannel serverChannel = ServerSocketChannel.open();
    ServerSocket ss = serverChannel.socket();
    ss.bind(localEp.getInetAddress());
    serverChannel.configureBlocking(false);

    SelectionKeyHandler handler = null;
    if (isHttp) {
      handler = new HttpConnectionHandler();
    } else {
      handler = new TcpConnectionHandler(localEp);
    }

    SelectionKey key =
        SelectorManager.getSelectorManager()
            .register(serverChannel, handler, SelectionKey.OP_ACCEPT);
    endPoints_.add(localEp);
    listenSockets_.put(localEp, key);
  }
 public void terminate() {
   terminated = true;
   for (RepairJob job : jobs) job.terminate();
   jobs.clear();
   activeJobs.clear();
 }
    // we don't care about the return value but care about it throwing exception
    public void runMayThrow() throws Exception {
      logger.info(
          String.format(
              "[repair #%s] new session: will sync %s on range %s for %s.%s",
              getName(), repairedNodes(), range, tablename, Arrays.toString(cfnames)));

      if (endpoints.isEmpty()) {
        differencingDone.signalAll();
        logger.info(
            String.format(
                "[repair #%s] No neighbors to repair with on range %s: session completed",
                getName(), range));
        return;
      }

      // Checking all nodes are live
      for (InetAddress endpoint : endpoints) {
        if (!FailureDetector.instance.isAlive(endpoint)) {
          differencingDone.signalAll();
          logger.info(
              String.format(
                  "[repair #%s] Cannot proceed on repair because a neighbor (%s) is dead: session failed",
                  getName(), endpoint));
          return;
        }

        if (Gossiper.instance.getVersion(endpoint) < MessagingService.VERSION_11 && isSequential) {
          logger.info(
              String.format(
                  "[repair #%s] Cannot repair using snapshots as node %s is pre-1.1",
                  getName(), endpoint));
          return;
        }
      }

      AntiEntropyService.instance.sessions.put(getName(), this);
      Gossiper.instance.register(this);
      FailureDetector.instance.registerFailureDetectionEventListener(this);
      try {
        // Create and queue a RepairJob for each column family
        for (String cfname : cfnames) {
          RepairJob job = new RepairJob(cfname);
          jobs.offer(job);
          activeJobs.put(cfname, job);
        }

        jobs.peek().sendTreeRequests();

        // block whatever thread started this session until all requests have been returned:
        // if this thread dies, the session will still complete in the background
        completed.await();
        if (exception == null) {
          logger.info(String.format("[repair #%s] session completed successfully", getName()));
        } else {
          logger.error(
              String.format("[repair #%s] session completed with the following error", getName()),
              exception);
          throw exception;
        }
      } catch (InterruptedException e) {
        throw new RuntimeException("Interrupted while waiting for repair.");
      } finally {
        // mark this session as terminated
        terminate();
        FailureDetector.instance.unregisterFailureDetectionEventListener(this);
        Gossiper.instance.unregister(this);
        AntiEntropyService.instance.sessions.remove(getName());
      }
    }
 /**
  * Add a new received tree and return the number of remaining tree to be received for the job
  * to be complete.
  */
 public synchronized int addTree(TreeRequest request, MerkleTree tree) {
   assert request.cf.right.equals(cfname);
   trees.put(request.endpoint, tree);
   requestedEndpoints.remove(request.endpoint);
   return requestedEndpoints.size();
 }
 public IVerbHandler getVerbHandler(String type) {
   IVerbHandler handler = (IVerbHandler) verbHandlers_.get(type);
   return handler;
 }
 public void deregisterVerbHandlers(String type) {
   verbHandlers_.remove(type);
 }
 public void registerVerbHandlers(String type, IVerbHandler verbHandler) {
   checkForReservedVerb(type);
   verbHandlers_.put(type, verbHandler);
 }
 private void checkForReservedVerb(String type) {
   if (reservedVerbs_.get(type) != null && verbHandlers_.get(type) != null) {
     throw new IllegalArgumentException(type + " is a reserved verb handler. Scram!");
   }
 }