@Override protected Versioned<Slop> computeNext() { try { Versioned<Slop> head = null; if (!shutDown) { head = slopQueue.take(); if (head.equals(END)) { shutDown = true; isComplete = true; } else { slopsDone++; if (slopsDone % voldemortConfig.getSlopBatchSize() == 0) { shutDown = true; } writeThrottler.maybeThrottle(writtenLast); writtenLast = slopSize(head); deleteBatch.add(Pair.create(head.getValue().makeKey(), head.getVersion())); return head; } } return endOfData(); } catch (Exception e) { logger.error("Got an exception " + e); return endOfData(); } }
public VAdminProto.DeletePartitionEntriesResponse handleDeletePartitionEntries( VAdminProto.DeletePartitionEntriesRequest request) { VAdminProto.DeletePartitionEntriesResponse.Builder response = VAdminProto.DeletePartitionEntriesResponse.newBuilder(); ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> iterator = null; try { String storeName = request.getStore(); List<Integer> partitions = request.getPartitionsList(); StorageEngine<ByteArray, byte[]> storageEngine = getStorageEngine(storeRepository, storeName); VoldemortFilter filter = (request.hasFilter()) ? getFilterFromRequest(request.getFilter(), voldemortConfig, networkClassLoader) : new DefaultVoldemortFilter(); RoutingStrategy routingStrategy = metadataStore.getRoutingStrategy(storageEngine.getName()); EventThrottler throttler = new EventThrottler(voldemortConfig.getStreamMaxReadBytesPerSec()); iterator = storageEngine.entries(); int deleteSuccess = 0; while (iterator.hasNext()) { Pair<ByteArray, Versioned<byte[]>> entry = iterator.next(); ByteArray key = entry.getFirst(); Versioned<byte[]> value = entry.getSecond(); throttler.maybeThrottle(key.length() + valueSize(value)); if (checkKeyBelongsToDeletePartition(key.get(), partitions, routingStrategy) && filter.accept(key, value)) { if (storageEngine.delete(key, value.getVersion())) deleteSuccess++; } } response.setCount(deleteSuccess); } catch (VoldemortException e) { response.setError(ProtoUtils.encodeError(errorCodeMapper, e)); logger.error( "handleDeletePartitionEntries failed for request(" + request.toString() + ")", e); } finally { if (null != iterator) iterator.close(); } return response.build(); }
public void run() { // don't try to run slop pusher job when rebalancing if (metadataStore .getServerState() .equals(MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER)) { logger.error("Cannot run slop pusher job since Voldemort server is rebalancing"); return; } boolean terminatedEarly = false; Date startTime = new Date(); logger.info("Started streaming slop pusher job at " + startTime); SlopStorageEngine slopStorageEngine = storeRepo.getSlopStore(); ClosableIterator<Pair<ByteArray, Versioned<Slop>>> iterator = null; if (adminClient == null) { adminClient = new AdminClient( cluster, new AdminClientConfig() .setMaxThreads(cluster.getNumberOfNodes()) .setMaxConnectionsPerNode(1)); } if (voldemortConfig.getSlopZonesDownToTerminate() > 0) { // Populating the zone mapping for early termination zoneMapping.clear(); for (Node n : cluster.getNodes()) { if (failureDetector.isAvailable(n)) { Set<Integer> nodes = zoneMapping.get(n.getZoneId()); if (nodes == null) { nodes = Sets.newHashSet(); zoneMapping.put(n.getZoneId(), nodes); } nodes.add(n.getId()); } } // Check how many zones are down int zonesDown = 0; for (Zone zone : cluster.getZones()) { if (zoneMapping.get(zone.getId()) == null || zoneMapping.get(zone.getId()).size() == 0) zonesDown++; } // Terminate early if (voldemortConfig.getSlopZonesDownToTerminate() <= zoneMapping.size() && zonesDown >= voldemortConfig.getSlopZonesDownToTerminate()) { logger.info( "Completed streaming slop pusher job at " + startTime + " early because " + zonesDown + " zones are down"); stopAdminClient(); return; } } // Clearing the statistics AtomicLong attemptedPushes = new AtomicLong(0); for (Node node : cluster.getNodes()) { attemptedByNode.put(node.getId(), 0L); succeededByNode.put(node.getId(), 0L); } acquireRepairPermit(); try { StorageEngine<ByteArray, Slop, byte[]> slopStore = slopStorageEngine.asSlopStore(); iterator = slopStore.entries(); while (iterator.hasNext()) { Pair<ByteArray, Versioned<Slop>> keyAndVal; try { keyAndVal = iterator.next(); Versioned<Slop> versioned = keyAndVal.getSecond(); // Retrieve the node int nodeId = versioned.getValue().getNodeId(); Node node = cluster.getNodeById(nodeId); attemptedPushes.incrementAndGet(); Long attempted = attemptedByNode.get(nodeId); attemptedByNode.put(nodeId, attempted + 1L); if (attemptedPushes.get() % 10000 == 0) logger.info("Attempted pushing " + attemptedPushes + " slops"); if (logger.isTraceEnabled()) logger.trace( "Pushing slop for " + versioned.getValue().getNodeId() + " and store " + versioned.getValue().getStoreName()); if (failureDetector.isAvailable(node)) { SynchronousQueue<Versioned<Slop>> slopQueue = slopQueues.get(nodeId); if (slopQueue == null) { // No previous slop queue, add one slopQueue = new SynchronousQueue<Versioned<Slop>>(); slopQueues.put(nodeId, slopQueue); consumerResults.add( consumerExecutor.submit(new SlopConsumer(nodeId, slopQueue, slopStorageEngine))); } boolean offered = slopQueue.offer( versioned, voldemortConfig.getClientRoutingTimeoutMs(), TimeUnit.MILLISECONDS); if (!offered) { if (logger.isDebugEnabled()) logger.debug( "No consumer appeared for slop in " + voldemortConfig.getClientConnectionTimeoutMs() + " ms"); } readThrottler.maybeThrottle(nBytesRead(keyAndVal)); } else { logger.trace(node + " declared down, won't push slop"); } } catch (RejectedExecutionException e) { throw new VoldemortException("Ran out of threads in executor", e); } } } catch (InterruptedException e) { logger.warn("Interrupted exception", e); terminatedEarly = true; } catch (Exception e) { logger.error(e, e); terminatedEarly = true; } finally { try { if (iterator != null) iterator.close(); } catch (Exception e) { logger.warn("Failed to close iterator cleanly as database might be closed", e); } // Adding the poison pill for (SynchronousQueue<Versioned<Slop>> slopQueue : slopQueues.values()) { try { slopQueue.put(END); } catch (InterruptedException e) { logger.warn("Error putting poison pill", e); } } for (Future result : consumerResults) { try { result.get(); } catch (Exception e) { logger.warn("Exception in consumer", e); } } // Only if exception didn't take place do we update the counts if (!terminatedEarly) { Map<Integer, Long> outstanding = Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes()); for (int nodeId : succeededByNode.keySet()) { logger.info( "Slops to node " + nodeId + " - Succeeded - " + succeededByNode.get(nodeId) + " - Attempted - " + attemptedByNode.get(nodeId)); outstanding.put(nodeId, attemptedByNode.get(nodeId) - succeededByNode.get(nodeId)); } slopStorageEngine.resetStats(outstanding); logger.info("Completed streaming slop pusher job which started at " + startTime); } else { for (int nodeId : succeededByNode.keySet()) { logger.info( "Slops to node " + nodeId + " - Succeeded - " + succeededByNode.get(nodeId) + " - Attempted - " + attemptedByNode.get(nodeId)); } logger.info("Completed early streaming slop pusher job which started at " + startTime); } // Shut down admin client as not to waste connections consumerResults.clear(); slopQueues.clear(); stopAdminClient(); this.repairPermits.release(); } }