public Map<Token, Float> describeOwnership(List<Token> sortedTokens) { // allTokens will contain the count and be returned, sorted_ranges is shorthand for // token<->token math. Map<Token, Float> allTokens = new HashMap<Token, Float>(); List<Range<Token>> sortedRanges = new ArrayList<Range<Token>>(); // this initializes the counts to 0 and calcs the ranges in order. Token lastToken = sortedTokens.get(sortedTokens.size() - 1); for (Token node : sortedTokens) { allTokens.put(node, new Float(0.0)); sortedRanges.add(new Range<Token>(lastToken, node)); lastToken = node; } for (String ks : Schema.instance.getKeyspaces()) { for (CFMetaData cfmd : Schema.instance.getKSMetaData(ks).cfMetaData().values()) { for (Range<Token> r : sortedRanges) { // Looping over every KS:CF:Range, get the splits size and add it to the count allTokens.put( r.right, allTokens.get(r.right) + StorageService.instance.getSplits(ks, cfmd.cfName, r, 1).size()); } } } // Sum every count up and divide count/total for the fractional ownership. Float total = new Float(0.0); for (Float f : allTokens.values()) total += f; for (Map.Entry<Token, Float> row : allTokens.entrySet()) allTokens.put(row.getKey(), row.getValue() / total); return allTokens; }
protected MessagingService() { for (ReservedVerbs_ verbs : ReservedVerbs_.values()) { reservedVerbs_.put(verbs.toString(), verbs.toString()); } verbHandlers_ = new HashMap<String, IVerbHandler>(); endPoints_ = new HashSet<EndPoint>(); /* * Leave callbacks in the cachetable long enough that any related messages will arrive * before the callback is evicted from the table. The concurrency level is set at 128 * which is the sum of the threads in the pool that adds shit into the table and the * pool that retrives the callback from here. */ int maxSize = MessagingConfig.getMessagingThreadCount(); callbackMap_ = new Cachetable<String, IAsyncCallback>(2 * DatabaseDescriptor.getRpcTimeout()); taskCompletionMap_ = new Cachetable<String, IAsyncResult>(2 * DatabaseDescriptor.getRpcTimeout()); messageDeserializationExecutor_ = new DebuggableThreadPoolExecutor( maxSize, maxSize, Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactoryImpl("MESSAGING-SERVICE-POOL")); messageSerializerExecutor_ = new DebuggableThreadPoolExecutor( maxSize, maxSize, Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactoryImpl("MESSAGE-SERIALIZER-POOL")); messageDeserializerExecutor_ = new DebuggableThreadPoolExecutor( maxSize, maxSize, Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactoryImpl("MESSAGE-DESERIALIZER-POOL")); streamExecutor_ = new DebuggableThreadPoolExecutor( 1, 1, Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactoryImpl("MESSAGE-STREAMING-POOL")); protocol_ = hash(HashingSchemes.MD5, "FB-MESSAGING".getBytes()); /* register the response verb handler */ registerVerbHandlers(MessagingService.responseVerbHandler_, new ResponseVerbHandler()); /* register stage for response */ StageManager.registerStage( MessagingService.responseStage_, new MultiThreadedStage("RESPONSE-STAGE", maxSize)); }
// we don't care about the return value but care about it throwing exception public void runMayThrow() throws Exception { if (endpoints.isEmpty()) { differencingDone.signalAll(); logger.info( "No neighbors to repair with for " + tablename + " on " + range + ": " + getName() + " completed."); return; } // Checking all nodes are live for (InetAddress endpoint : endpoints) { if (!FailureDetector.instance.isAlive(endpoint)) { differencingDone.signalAll(); logger.info( "Could not proceed on repair because a neighbor (" + endpoint + ") is dead: " + getName() + " failed."); return; } } AntiEntropyService.instance.sessions.put(getName(), this); Gossiper.instance.register(this); FailureDetector.instance.registerFailureDetectionEventListener(this); try { // Create and queue a RepairJob for each column family for (String cfname : cfnames) { RepairJob job = new RepairJob(cfname); jobs.offer(job); activeJobs.put(cfname, job); } jobs.peek().sendTreeRequests(); // block whatever thread started this session until all requests have been returned: // if this thread dies, the session will still complete in the background completed.await(); if (exception != null) throw exception; } catch (InterruptedException e) { throw new RuntimeException( "Interrupted while waiting for repair: repair will continue in the background."); } finally { FailureDetector.instance.unregisterFailureDetectionEventListener(this); Gossiper.instance.unregister(this); AntiEntropyService.instance.sessions.remove(getName()); } }
public void listen(EndPoint localEp, boolean isHttp) throws IOException { ServerSocketChannel serverChannel = ServerSocketChannel.open(); ServerSocket ss = serverChannel.socket(); ss.bind(localEp.getInetAddress()); serverChannel.configureBlocking(false); SelectionKeyHandler handler = null; if (isHttp) { handler = new HttpConnectionHandler(); } else { handler = new TcpConnectionHandler(localEp); } SelectionKey key = SelectorManager.getSelectorManager() .register(serverChannel, handler, SelectionKey.OP_ACCEPT); endPoints_.add(localEp); listenSockets_.put(localEp, key); }
public static TcpConnectionManager getConnectionPool(EndPoint from, EndPoint to) { String key = from + ":" + to; TcpConnectionManager cp = poolTable_.get(key); if (cp == null) { lock_.lock(); try { cp = poolTable_.get(key); if (cp == null) { cp = new TcpConnectionManager( MessagingConfig.getConnectionPoolInitialSize(), MessagingConfig.getConnectionPoolGrowthFactor(), MessagingConfig.getConnectionPoolMaxSize(), from, to); poolTable_.put(key, cp); } } finally { lock_.unlock(); } } return cp; }
// we don't care about the return value but care about it throwing exception public void runMayThrow() throws Exception { logger.info( String.format( "[repair #%s] new session: will sync %s on range %s for %s.%s", getName(), repairedNodes(), range, tablename, Arrays.toString(cfnames))); if (endpoints.isEmpty()) { differencingDone.signalAll(); logger.info( String.format( "[repair #%s] No neighbors to repair with on range %s: session completed", getName(), range)); return; } // Checking all nodes are live for (InetAddress endpoint : endpoints) { if (!FailureDetector.instance.isAlive(endpoint)) { differencingDone.signalAll(); logger.info( String.format( "[repair #%s] Cannot proceed on repair because a neighbor (%s) is dead: session failed", getName(), endpoint)); return; } if (Gossiper.instance.getVersion(endpoint) < MessagingService.VERSION_11 && isSequential) { logger.info( String.format( "[repair #%s] Cannot repair using snapshots as node %s is pre-1.1", getName(), endpoint)); return; } } AntiEntropyService.instance.sessions.put(getName(), this); Gossiper.instance.register(this); FailureDetector.instance.registerFailureDetectionEventListener(this); try { // Create and queue a RepairJob for each column family for (String cfname : cfnames) { RepairJob job = new RepairJob(cfname); jobs.offer(job); activeJobs.put(cfname, job); } jobs.peek().sendTreeRequests(); // block whatever thread started this session until all requests have been returned: // if this thread dies, the session will still complete in the background completed.await(); if (exception == null) { logger.info(String.format("[repair #%s] session completed successfully", getName())); } else { logger.error( String.format("[repair #%s] session completed with the following error", getName()), exception); throw exception; } } catch (InterruptedException e) { throw new RuntimeException("Interrupted while waiting for repair."); } finally { // mark this session as terminated terminate(); FailureDetector.instance.unregisterFailureDetectionEventListener(this); Gossiper.instance.unregister(this); AntiEntropyService.instance.sessions.remove(getName()); } }
/** * Add a new received tree and return the number of remaining tree to be received for the job * to be complete. */ public synchronized int addTree(TreeRequest request, MerkleTree tree) { assert request.cf.right.equals(cfname); trees.put(request.endpoint, tree); requestedEndpoints.remove(request.endpoint); return requestedEndpoints.size(); }
public void registerVerbHandlers(String type, IVerbHandler verbHandler) { checkForReservedVerb(type); verbHandlers_.put(type, verbHandler); }