public void addStorageEngine(StorageEngine<ByteArray, byte[], byte[]> engine) { StorageEngine<ByteArray, byte[], byte[]> found = this.storageEngines.putIfAbsent(engine.getName(), engine); if (found != null) throw new VoldemortException( "Storage Engine '" + engine.getName() + "' has already been initialized."); // register streaming stats object for the store if (streamingStatsMap != null) { // lazily register the aggregated mbean if (storageEngines.size() == 1) { JmxUtils.registerMbean( aggregatedStreamStats, JmxUtils.createObjectName( this.getClass().getCanonicalName(), "aggregated-streaming-stats")); } StreamingStats stat = new StreamingStats(aggregatedStreamStats); JmxUtils.registerMbean( stat, JmxUtils.createObjectName( this.getClass().getCanonicalName(), engine.getName() + "-streaming-stats")); streamingStatsMap.putIfAbsent(engine.getName(), stat); } }
public void unregisterSystemEngine(StorageEngine<ByteArray, byte[], byte[]> engine) { String storeName = engine.getName(); Store<ByteArray, byte[], byte[]> store = storeRepository.removeLocalStore(storeName); if (store != null) { if (voldemortConfig.isJmxEnabled()) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); if (voldemortConfig.isEnableRebalanceService()) { ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(RedirectingStore.class), store.getName()); else name = JmxUtils.createObjectName( JmxUtils.getPackageName(RedirectingStore.class), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); } } if (voldemortConfig.isStatTrackingEnabled()) { ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(store.getClass()), store.getName()); else name = JmxUtils.createObjectName( JmxUtils.getPackageName(store.getClass()), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); } } } if (voldemortConfig.isServerRoutingEnabled()) { this.storeRepository.removeRoutedStore(storeName); for (Node node : metadata.getCluster().getNodes()) this.storeRepository.removeNodeStore(storeName, node.getId()); } } storeRepository.removeStorageEngine(storeName); // engine.truncate(); why truncate here when unregister? Isn't close // good enough? engine.close(); }
public List<StorageEngine<ByteArray, byte[], byte[]>> getStorageEnginesByClass( Class<? extends StorageEngine<?, ?, ?>> c) { List<StorageEngine<ByteArray, byte[], byte[]>> l = new ArrayList<StorageEngine<ByteArray, byte[], byte[]>>(); for (StorageEngine<ByteArray, byte[], byte[]> engine : this.storageEngines.values()) if (engine.getClass().equals(c)) l.add(engine); return l; }
public VAdminProto.TruncateEntriesResponse handleTruncateEntries( VAdminProto.TruncateEntriesRequest request) { VAdminProto.TruncateEntriesResponse.Builder response = VAdminProto.TruncateEntriesResponse.newBuilder(); try { String storeName = request.getStore(); StorageEngine<ByteArray, byte[]> storageEngine = getStorageEngine(storeRepository, storeName); storageEngine.truncate(); } catch (VoldemortException e) { response.setError(ProtoUtils.encodeError(errorCodeMapper, e)); logger.error("handleTruncateEntries failed for request(" + request.toString() + ")", e); } return response.build(); }
@JmxOperation( description = "Force cleanup of old data based on retention policy.", impact = MBeanOperationInfo.ACTION) public void forceCleanupOldDataThrottled(String storeName, int entryScanThrottleRate) { logger.info( "forceCleanupOldData() called for store " + storeName + " with retention scan throttle rate:" + entryScanThrottleRate + " Entries/second."); try { StoreDefinition storeDef = getMetadataStore().getStoreDef(storeName); StorageEngine<ByteArray, byte[], byte[]> engine = storeRepository.getStorageEngine(storeName); if (null != engine) { if (storeDef.hasRetentionPeriod()) { ExecutorService executor = Executors.newFixedThreadPool(1); try { if (scanPermitWrapper.availablePermits() >= 1) { executor.execute( new DataCleanupJob<ByteArray, byte[], byte[]>( engine, scanPermitWrapper, storeDef.getRetentionDays() * Time.MS_PER_DAY, SystemTime.INSTANCE, new EventThrottler(entryScanThrottleRate), metadata)); } else { logger.error( "forceCleanupOldData() No permit available to run cleanJob already running multiple instance." + engine.getName()); } } finally { executor.shutdown(); } } else { logger.error("forceCleanupOldData() No retention policy found for " + storeName); } } } catch (Exception e) { logger.error("Error while running forceCleanupOldData()", e); throw new VoldemortException(e); } }
private void putAlltoStore() { for (Entry<ByteArray, byte[]> entry : ServerTestUtils.createRandomKeyValuePairs(TEST_KEYS).entrySet()) { try { failingStorageEngine.put(entry.getKey(), new Versioned<byte[]>(entry.getValue()), null); } catch (Exception e) { // ignore } } }
public VAdminProto.DeletePartitionEntriesResponse handleDeletePartitionEntries( VAdminProto.DeletePartitionEntriesRequest request) { VAdminProto.DeletePartitionEntriesResponse.Builder response = VAdminProto.DeletePartitionEntriesResponse.newBuilder(); ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> iterator = null; try { String storeName = request.getStore(); List<Integer> partitions = request.getPartitionsList(); StorageEngine<ByteArray, byte[]> storageEngine = getStorageEngine(storeRepository, storeName); VoldemortFilter filter = (request.hasFilter()) ? getFilterFromRequest(request.getFilter(), voldemortConfig, networkClassLoader) : new DefaultVoldemortFilter(); RoutingStrategy routingStrategy = metadataStore.getRoutingStrategy(storageEngine.getName()); EventThrottler throttler = new EventThrottler(voldemortConfig.getStreamMaxReadBytesPerSec()); iterator = storageEngine.entries(); int deleteSuccess = 0; while (iterator.hasNext()) { Pair<ByteArray, Versioned<byte[]>> entry = iterator.next(); ByteArray key = entry.getFirst(); Versioned<byte[]> value = entry.getSecond(); throttler.maybeThrottle(key.length() + valueSize(value)); if (checkKeyBelongsToDeletePartition(key.get(), partitions, routingStrategy) && filter.accept(key, value)) { if (storageEngine.delete(key, value.getVersion())) deleteSuccess++; } } response.setCount(deleteSuccess); } catch (VoldemortException e) { response.setError(ProtoUtils.encodeError(errorCodeMapper, e)); logger.error( "handleDeletePartitionEntries failed for request(" + request.toString() + ")", e); } finally { if (null != iterator) iterator.close(); } return response.build(); }
// client side should get exceptions from servers @Test public void testFailures() { for (StreamOperations operation : StreamOperations.values()) { try { doOperation(operation, 0, failingStorageEngine.getName(), Arrays.asList(0, 1)); fail("Unit test should fail for " + operation); } catch (Exception e) { // ignore } } }
/** * Schedule a data retention cleanup job for the given store * * @param storeDef The store definition * @param engine The storage engine to do cleanup on */ private void scheduleCleanupJob( StoreDefinition storeDef, StorageEngine<ByteArray, byte[], byte[]> engine) { // Compute the start time of the job, based on current time GregorianCalendar cal = Utils.getCalendarForNextRun( new GregorianCalendar(), voldemortConfig.getRetentionCleanupFirstStartDayOfWeek(), voldemortConfig.getRetentionCleanupFirstStartTimeInHour()); // allow only one cleanup job at a time Date startTime = cal.getTime(); int maxReadRate = storeDef.hasRetentionScanThrottleRate() ? storeDef.getRetentionScanThrottleRate() : Integer.MAX_VALUE; logger.info( "Scheduling data retention cleanup job for store '" + storeDef.getName() + "' at " + startTime + " with retention scan throttle rate:" + maxReadRate + " Entries/second."); EventThrottler throttler = new EventThrottler(maxReadRate); Runnable cleanupJob = new DataCleanupJob<ByteArray, byte[], byte[]>( engine, scanPermitWrapper, storeDef.getRetentionDays() * Time.MS_PER_DAY, SystemTime.INSTANCE, throttler, metadata); if (voldemortConfig.isJmxEnabled()) { JmxUtils.registerMbean("DataCleanupJob-" + engine.getName(), cleanupJob); } long retentionFreqHours = storeDef.hasRetentionFrequencyDays() ? (storeDef.getRetentionFrequencyDays() * Time.HOURS_PER_DAY) : voldemortConfig.getRetentionCleanupScheduledPeriodInHour(); this.scheduler.schedule( "cleanup-" + storeDef.getName(), cleanupJob, startTime, retentionFreqHours * Time.MS_PER_HOUR, voldemortConfig.getRetentionCleanupPinStartTime()); }
@Test public void testWithStartFailure() { // put some entries in store for (StreamOperations operation : StreamOperations.values()) { adminServer.stop(); try { doOperation(operation, 0, failingStorageEngine.getName(), Arrays.asList(0, 1)); fail(); } catch (UnreachableStoreException e) { // ignore } } }
private DataSetStats calculateStats(StorageEngine<ByteArray, byte[], byte[]> store) { DataSetStats stats = new DataSetStats(); ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> iter = store.entries(); try { int count = 0; while (iter.hasNext()) { Pair<ByteArray, Versioned<byte[]>> pair = iter.next(); VectorClock clock = (VectorClock) pair.getSecond().getVersion(); stats.countEntry( pair.getFirst().length(), pair.getSecond().getValue().length + clock.sizeInBytes()); if (count % 10000 == 0) logger.debug("Processing key " + count); count++; } } finally { iter.close(); } return stats; }
/** * Register the given engine with the storage repository * * @param engine Register the storage engine * @param isReadOnly Boolean indicating if this store is read-only * @param storeType The type of the store * @param storeDef store definition for the store to be registered */ public void registerEngine( StorageEngine<ByteArray, byte[], byte[]> engine, boolean isReadOnly, String storeType, StoreDefinition storeDef) { Cluster cluster = this.metadata.getCluster(); storeRepository.addStorageEngine(engine); /* Now add any store wrappers that are enabled */ Store<ByteArray, byte[], byte[]> store = engine; boolean isMetadata = store.getName().compareTo(MetadataStore.METADATA_STORE_NAME) == 0; boolean isSlop = storeType.compareTo("slop") == 0; boolean isView = storeType.compareTo(ViewStorageConfiguration.TYPE_NAME) == 0; if (voldemortConfig.isVerboseLoggingEnabled()) store = new LoggingStore<ByteArray, byte[], byte[]>( store, cluster.getName(), SystemTime.INSTANCE); if (!isSlop) { if (!isReadOnly && !isMetadata && !isView) { // wrap store to enforce retention policy if (voldemortConfig.isEnforceRetentionPolicyOnRead() && storeDef != null) { RetentionEnforcingStore retentionEnforcingStore = new RetentionEnforcingStore( store, storeDef, voldemortConfig.isDeleteExpiredValuesOnRead(), SystemTime.INSTANCE); metadata.addMetadataStoreListener(store.getName(), retentionEnforcingStore); store = retentionEnforcingStore; } if (voldemortConfig.isEnableRebalanceService()) { ProxyPutStats proxyPutStats = new ProxyPutStats(aggregatedProxyPutStats); if (voldemortConfig.isJmxEnabled()) { JmxUtils.registerMbean( proxyPutStats, JmxUtils.createObjectName( "voldemort.store.rebalancing", engine.getName() + "-proxy-puts")); } store = new RedirectingStore( store, metadata, storeRepository, failureDetector, storeFactory, proxyPutWorkerPool, proxyPutStats); if (voldemortConfig.isJmxEnabled()) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( cluster.getName() + "." + JmxUtils.getPackageName(RedirectingStore.class), store.getName()); else name = JmxUtils.createObjectName( JmxUtils.getPackageName(RedirectingStore.class), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); JmxUtils.registerMbean(mbeanServer, JmxUtils.createModelMBean(store), name); } } } } if (voldemortConfig.isMetadataCheckingEnabled() && !isMetadata) { store = new InvalidMetadataCheckingStore(metadata.getNodeId(), store, metadata); } } if (voldemortConfig.isStatTrackingEnabled()) { StatTrackingStore statStore = new StatTrackingStore(store, this.storeStats); store = statStore; if (voldemortConfig.isJmxEnabled()) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(store.getClass()), store.getName()); else name = JmxUtils.createObjectName(JmxUtils.getPackageName(store.getClass()), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); JmxUtils.registerMbean( mbeanServer, JmxUtils.createModelMBean(new StoreStatsJmx(statStore.getStats())), name); } } // Wrap everything under the rate limiting store (barring the // metadata store) if (voldemortConfig.isEnableQuotaLimiting() && !isMetadata) { FileBackedCachingStorageEngine quotaStore = (FileBackedCachingStorageEngine) storeRepository.getStorageEngine( SystemStoreConstants.SystemStoreName.voldsys$_store_quotas.toString()); QuotaLimitStats quotaStats = new QuotaLimitStats(this.aggregatedQuotaStats); QuotaLimitingStore rateLimitingStore = new QuotaLimitingStore(store, this.storeStats, quotaStats, quotaStore); if (voldemortConfig.isJmxEnabled()) { JmxUtils.registerMbean( this.aggregatedQuotaStats, JmxUtils.createObjectName( "voldemort.store.quota", store.getName() + "-quota-limit-stats")); } store = rateLimitingStore; } } storeRepository.addLocalStore(store); }
/** * Unregister and remove the engine from the storage repository. This is called during deletion of * stores and if there are exceptions adding/opening stores * * @param engine The actual engine to remove * @param isReadOnly Is this read-only? * @param storeType The storage type of the store * @param truncate Should the store be truncated? */ public void removeEngine( StorageEngine<ByteArray, byte[], byte[]> engine, boolean isReadOnly, String storeType, boolean truncate) { String storeName = engine.getName(); Store<ByteArray, byte[], byte[]> store = storeRepository.removeLocalStore(storeName); boolean isSlop = storeType.compareTo("slop") == 0; boolean isView = storeType.compareTo(ViewStorageConfiguration.TYPE_NAME) == 0; boolean isMetadata = storeName.compareTo(MetadataStore.METADATA_STORE_NAME) == 0; if (store != null) { if (voldemortConfig.isJmxEnabled()) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); if (!isSlop && voldemortConfig.isEnableRebalanceService() && !isReadOnly && !isMetadata && !isView) { ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(RedirectingStore.class), store.getName()); else name = JmxUtils.createObjectName( JmxUtils.getPackageName(RedirectingStore.class), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); } } if (voldemortConfig.isStatTrackingEnabled()) { ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(store.getClass()), store.getName()); else name = JmxUtils.createObjectName( JmxUtils.getPackageName(store.getClass()), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); } } } if (voldemortConfig.isServerRoutingEnabled() && !isSlop) { this.storeRepository.removeRoutedStore(storeName); for (Node node : metadata.getCluster().getNodes()) this.storeRepository.removeNodeStore(storeName, node.getId()); } } storeRepository.removeStorageEngine(storeName); if (truncate) engine.truncate(); engine.close(); }
public void run() { // don't try to run slop pusher job when rebalancing if (metadataStore .getServerState() .equals(MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER)) { logger.error("Cannot run slop pusher job since Voldemort server is rebalancing"); return; } boolean terminatedEarly = false; Date startTime = new Date(); logger.info("Started streaming slop pusher job at " + startTime); SlopStorageEngine slopStorageEngine = storeRepo.getSlopStore(); ClosableIterator<Pair<ByteArray, Versioned<Slop>>> iterator = null; if (adminClient == null) { adminClient = new AdminClient( cluster, new AdminClientConfig() .setMaxThreads(cluster.getNumberOfNodes()) .setMaxConnectionsPerNode(1)); } if (voldemortConfig.getSlopZonesDownToTerminate() > 0) { // Populating the zone mapping for early termination zoneMapping.clear(); for (Node n : cluster.getNodes()) { if (failureDetector.isAvailable(n)) { Set<Integer> nodes = zoneMapping.get(n.getZoneId()); if (nodes == null) { nodes = Sets.newHashSet(); zoneMapping.put(n.getZoneId(), nodes); } nodes.add(n.getId()); } } // Check how many zones are down int zonesDown = 0; for (Zone zone : cluster.getZones()) { if (zoneMapping.get(zone.getId()) == null || zoneMapping.get(zone.getId()).size() == 0) zonesDown++; } // Terminate early if (voldemortConfig.getSlopZonesDownToTerminate() <= zoneMapping.size() && zonesDown >= voldemortConfig.getSlopZonesDownToTerminate()) { logger.info( "Completed streaming slop pusher job at " + startTime + " early because " + zonesDown + " zones are down"); stopAdminClient(); return; } } // Clearing the statistics AtomicLong attemptedPushes = new AtomicLong(0); for (Node node : cluster.getNodes()) { attemptedByNode.put(node.getId(), 0L); succeededByNode.put(node.getId(), 0L); } acquireRepairPermit(); try { StorageEngine<ByteArray, Slop, byte[]> slopStore = slopStorageEngine.asSlopStore(); iterator = slopStore.entries(); while (iterator.hasNext()) { Pair<ByteArray, Versioned<Slop>> keyAndVal; try { keyAndVal = iterator.next(); Versioned<Slop> versioned = keyAndVal.getSecond(); // Retrieve the node int nodeId = versioned.getValue().getNodeId(); Node node = cluster.getNodeById(nodeId); attemptedPushes.incrementAndGet(); Long attempted = attemptedByNode.get(nodeId); attemptedByNode.put(nodeId, attempted + 1L); if (attemptedPushes.get() % 10000 == 0) logger.info("Attempted pushing " + attemptedPushes + " slops"); if (logger.isTraceEnabled()) logger.trace( "Pushing slop for " + versioned.getValue().getNodeId() + " and store " + versioned.getValue().getStoreName()); if (failureDetector.isAvailable(node)) { SynchronousQueue<Versioned<Slop>> slopQueue = slopQueues.get(nodeId); if (slopQueue == null) { // No previous slop queue, add one slopQueue = new SynchronousQueue<Versioned<Slop>>(); slopQueues.put(nodeId, slopQueue); consumerResults.add( consumerExecutor.submit(new SlopConsumer(nodeId, slopQueue, slopStorageEngine))); } boolean offered = slopQueue.offer( versioned, voldemortConfig.getClientRoutingTimeoutMs(), TimeUnit.MILLISECONDS); if (!offered) { if (logger.isDebugEnabled()) logger.debug( "No consumer appeared for slop in " + voldemortConfig.getClientConnectionTimeoutMs() + " ms"); } readThrottler.maybeThrottle(nBytesRead(keyAndVal)); } else { logger.trace(node + " declared down, won't push slop"); } } catch (RejectedExecutionException e) { throw new VoldemortException("Ran out of threads in executor", e); } } } catch (InterruptedException e) { logger.warn("Interrupted exception", e); terminatedEarly = true; } catch (Exception e) { logger.error(e, e); terminatedEarly = true; } finally { try { if (iterator != null) iterator.close(); } catch (Exception e) { logger.warn("Failed to close iterator cleanly as database might be closed", e); } // Adding the poison pill for (SynchronousQueue<Versioned<Slop>> slopQueue : slopQueues.values()) { try { slopQueue.put(END); } catch (InterruptedException e) { logger.warn("Error putting poison pill", e); } } for (Future result : consumerResults) { try { result.get(); } catch (Exception e) { logger.warn("Exception in consumer", e); } } // Only if exception didn't take place do we update the counts if (!terminatedEarly) { Map<Integer, Long> outstanding = Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes()); for (int nodeId : succeededByNode.keySet()) { logger.info( "Slops to node " + nodeId + " - Succeeded - " + succeededByNode.get(nodeId) + " - Attempted - " + attemptedByNode.get(nodeId)); outstanding.put(nodeId, attemptedByNode.get(nodeId) - succeededByNode.get(nodeId)); } slopStorageEngine.resetStats(outstanding); logger.info("Completed streaming slop pusher job which started at " + startTime); } else { for (int nodeId : succeededByNode.keySet()) { logger.info( "Slops to node " + nodeId + " - Succeeded - " + succeededByNode.get(nodeId) + " - Attempted - " + attemptedByNode.get(nodeId)); } logger.info("Completed early streaming slop pusher job which started at " + startTime); } // Shut down admin client as not to waste connections consumerResults.clear(); slopQueues.clear(); stopAdminClient(); this.repairPermits.release(); } }
@Override protected void startInner() { registerInternalEngine(metadata, false, "metadata"); /* Initialize storage configurations */ for (String configClassName : voldemortConfig.getStorageConfigurations()) initStorageConfig(configClassName); /* Initialize view storage configuration */ storageConfigs.put( ViewStorageConfiguration.TYPE_NAME, new ViewStorageConfiguration(voldemortConfig, metadata.getStoreDefList(), storeRepository)); /* Initialize system stores */ initSystemStores(); /* Register slop store */ if (voldemortConfig.isSlopEnabled()) { logger.info("Initializing the slop store using " + voldemortConfig.getSlopStoreType()); StorageConfiguration config = storageConfigs.get(voldemortConfig.getSlopStoreType()); if (config == null) throw new ConfigurationException( "Attempt to open store " + SlopStorageEngine.SLOP_STORE_NAME + " but " + voldemortConfig.getSlopStoreType() + " storage engine has not been enabled."); // make a dummy store definition object StoreDefinition slopStoreDefinition = new StoreDefinition( SlopStorageEngine.SLOP_STORE_NAME, null, null, null, null, null, null, RoutingStrategyType.CONSISTENT_STRATEGY, 0, null, 0, null, 0, null, null, null, null, null, null, null, null, null, null, null, null, 0); SlopStorageEngine slopEngine = new SlopStorageEngine( config.getStore( slopStoreDefinition, new RoutingStrategyFactory() .updateRoutingStrategy(slopStoreDefinition, metadata.getCluster())), metadata.getCluster()); registerInternalEngine(slopEngine, false, "slop"); storeRepository.setSlopStore(slopEngine); if (voldemortConfig.isSlopPusherJobEnabled()) { // Now initialize the pusher job after some time GregorianCalendar cal = new GregorianCalendar(); cal.add(Calendar.SECOND, (int) (voldemortConfig.getSlopFrequencyMs() / Time.MS_PER_SECOND)); Date nextRun = cal.getTime(); logger.info( "Initializing slop pusher job type " + voldemortConfig.getPusherType() + " at " + nextRun); scheduler.schedule( "slop", (voldemortConfig.getPusherType().compareTo(BlockingSlopPusherJob.TYPE_NAME) == 0) ? new BlockingSlopPusherJob( storeRepository, metadata, failureDetector, voldemortConfig, scanPermitWrapper) : new StreamingSlopPusherJob( storeRepository, metadata, slopStreamingFailureDetector, voldemortConfig, scanPermitWrapper), nextRun, voldemortConfig.getSlopFrequencyMs()); } // Create a SlopPurgeJob object and register it if (voldemortConfig.isSlopPurgeJobEnabled()) { logger.info("Initializing Slop Purge job"); SlopPurgeJob job = new SlopPurgeJob( storeRepository, metadata, scanPermitWrapper, voldemortConfig.getSlopPurgeJobMaxKeysScannedPerSec()); JmxUtils.registerMbean(job, JmxUtils.createObjectName(job.getClass())); storeRepository.registerSlopPurgeJob(job); } } // Create a repair job object and register it with Store repository if (voldemortConfig.isRepairEnabled()) { logger.info("Initializing repair job."); RepairJob job = new RepairJob( storeRepository, metadata, scanPermitWrapper, voldemortConfig.getRepairJobMaxKeysScannedPerSec()); JmxUtils.registerMbean(job, JmxUtils.createObjectName(job.getClass())); storeRepository.registerRepairJob(job); } // Create a prune job object and register it if (voldemortConfig.isPruneJobEnabled()) { logger.info("Intializing prune job"); VersionedPutPruneJob job = new VersionedPutPruneJob( storeRepository, metadata, scanPermitWrapper, voldemortConfig.getPruneJobMaxKeysScannedPerSec()); JmxUtils.registerMbean(job, JmxUtils.createObjectName(job.getClass())); storeRepository.registerPruneJob(job); } List<StoreDefinition> storeDefs = new ArrayList<StoreDefinition>(this.metadata.getStoreDefList()); logger.info("Initializing stores:"); logger.info("Validating schemas:"); StoreDefinitionUtils.validateSchemasAsNeeded(storeDefs); // first initialize non-view stores for (StoreDefinition def : storeDefs) if (!def.isView()) openStore(def); // now that we have all our stores, we can initialize views pointing at // those stores for (StoreDefinition def : storeDefs) { if (def.isView()) openStore(def); } initializeMetadataVersions(storeDefs); // enable aggregate jmx statistics if (voldemortConfig.isStatTrackingEnabled()) if (this.voldemortConfig.isEnableJmxClusterName()) JmxUtils.registerMbean( new StoreStatsJmx(this.storeStats), JmxUtils.createObjectName( metadata.getCluster().getName() + ".voldemort.store.stats.aggregate", "aggregate-perf")); else JmxUtils.registerMbean( new StoreStatsJmx(this.storeStats), JmxUtils.createObjectName("voldemort.store.stats.aggregate", "aggregate-perf")); List<StorageEngine> listOfDisabledStores = Lists.newArrayList(); for (StorageEngine storageEngine : storeRepository.getAllStorageEngines()) { try { StoreVersionManager storeVersionManager = (StoreVersionManager) storageEngine.getCapability(StoreCapabilityType.DISABLE_STORE_VERSION); if (storeVersionManager.hasAnyDisabledVersion()) { listOfDisabledStores.add(storageEngine); logger.warn("The following store is marked as disabled: " + storageEngine.getName()); // Must put server in offline mode. } } catch (NoSuchCapabilityException e) { // Not a read-only store: no-op } } if (listOfDisabledStores.isEmpty()) { logger.info("All stores initialized."); } else { throw new DisabledStoreException( "All stores initialized, but the server needs to go " + "in offline mode because some store(s) are disabled."); } }