public void addStorageEngine(StorageEngine<ByteArray, byte[], byte[]> engine) { StorageEngine<ByteArray, byte[], byte[]> found = this.storageEngines.putIfAbsent(engine.getName(), engine); if (found != null) throw new VoldemortException( "Storage Engine '" + engine.getName() + "' has already been initialized."); // register streaming stats object for the store if (streamingStatsMap != null) { // lazily register the aggregated mbean if (storageEngines.size() == 1) { JmxUtils.registerMbean( aggregatedStreamStats, JmxUtils.createObjectName( this.getClass().getCanonicalName(), "aggregated-streaming-stats")); } StreamingStats stat = new StreamingStats(aggregatedStreamStats); JmxUtils.registerMbean( stat, JmxUtils.createObjectName( this.getClass().getCanonicalName(), engine.getName() + "-streaming-stats")); streamingStatsMap.putIfAbsent(engine.getName(), stat); } }
public void unregisterSystemEngine(StorageEngine<ByteArray, byte[], byte[]> engine) { String storeName = engine.getName(); Store<ByteArray, byte[], byte[]> store = storeRepository.removeLocalStore(storeName); if (store != null) { if (voldemortConfig.isJmxEnabled()) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); if (voldemortConfig.isEnableRebalanceService()) { ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(RedirectingStore.class), store.getName()); else name = JmxUtils.createObjectName( JmxUtils.getPackageName(RedirectingStore.class), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); } } if (voldemortConfig.isStatTrackingEnabled()) { ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(store.getClass()), store.getName()); else name = JmxUtils.createObjectName( JmxUtils.getPackageName(store.getClass()), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); } } } if (voldemortConfig.isServerRoutingEnabled()) { this.storeRepository.removeRoutedStore(storeName); for (Node node : metadata.getCluster().getNodes()) this.storeRepository.removeNodeStore(storeName, node.getId()); } } storeRepository.removeStorageEngine(storeName); // engine.truncate(); why truncate here when unregister? Isn't close // good enough? engine.close(); }
public Store<ByteArray, byte[], byte[]> removeStorageEngine(String storeName) { // register streaming stats object for the store if (streamingStatsMap != null) { JmxUtils.unregisterMbean( JmxUtils.createObjectName(this.getClass().getCanonicalName(), storeName)); streamingStatsMap.remove(storeName); // lazily unregister the aggregated mbean if (storageEngines.size() == 1) { JmxUtils.unregisterMbean( JmxUtils.createObjectName( this.getClass().getCanonicalName(), "aggregated-streaming-stats")); } } return this.storageEngines.remove(storeName); }
private void initStorageConfig(String configClassName) { // add the configurations of the storage engines needed by user stores try { Class<?> configClass = ReflectUtils.loadClass(configClassName); StorageConfiguration configuration = (StorageConfiguration) ReflectUtils.callConstructor( configClass, new Class<?>[] {VoldemortConfig.class}, new Object[] {voldemortConfig}); logger.info("Initializing " + configuration.getType() + " storage engine."); storageConfigs.put(configuration.getType(), configuration); if (voldemortConfig.isJmxEnabled()) JmxUtils.registerMbean(configuration.getType() + "StorageConfiguration", configuration); } catch (IllegalStateException e) { logger.error("Error loading storage configuration '" + configClassName + "'.", e); } if (storageConfigs.size() == 0) throw new ConfigurationException("No storage engine has been enabled!"); // now, add the configurations of the storage engines needed by system // stores, if not yet exist initSystemStorageConfig(); }
public SocketStoreClientFactory(ClientConfig config) { super(config); this.requestRoutingType = RequestRoutingType.getRequestRoutingType( RoutingTier.SERVER.equals(config.getRoutingTier()), false); this.storeFactory = new ClientRequestExecutorPool( config.getSelectors(), config.getMaxConnectionsPerNode(), config.getConnectionTimeout(TimeUnit.MILLISECONDS), config.getSocketTimeout(TimeUnit.MILLISECONDS), config.getSocketBufferSize(), config.getSocketKeepAlive()); if (config.isJmxEnabled()) JmxUtils.registerMbean(storeFactory, JmxUtils.createObjectName(storeFactory.getClass())); }
/** * Schedule a data retention cleanup job for the given store * * @param storeDef The store definition * @param engine The storage engine to do cleanup on */ private void scheduleCleanupJob( StoreDefinition storeDef, StorageEngine<ByteArray, byte[], byte[]> engine) { // Compute the start time of the job, based on current time GregorianCalendar cal = Utils.getCalendarForNextRun( new GregorianCalendar(), voldemortConfig.getRetentionCleanupFirstStartDayOfWeek(), voldemortConfig.getRetentionCleanupFirstStartTimeInHour()); // allow only one cleanup job at a time Date startTime = cal.getTime(); int maxReadRate = storeDef.hasRetentionScanThrottleRate() ? storeDef.getRetentionScanThrottleRate() : Integer.MAX_VALUE; logger.info( "Scheduling data retention cleanup job for store '" + storeDef.getName() + "' at " + startTime + " with retention scan throttle rate:" + maxReadRate + " Entries/second."); EventThrottler throttler = new EventThrottler(maxReadRate); Runnable cleanupJob = new DataCleanupJob<ByteArray, byte[], byte[]>( engine, scanPermitWrapper, storeDef.getRetentionDays() * Time.MS_PER_DAY, SystemTime.INSTANCE, throttler, metadata); if (voldemortConfig.isJmxEnabled()) { JmxUtils.registerMbean("DataCleanupJob-" + engine.getName(), cleanupJob); } long retentionFreqHours = storeDef.hasRetentionFrequencyDays() ? (storeDef.getRetentionFrequencyDays() * Time.HOURS_PER_DAY) : voldemortConfig.getRetentionCleanupScheduledPeriodInHour(); this.scheduler.schedule( "cleanup-" + storeDef.getName(), cleanupJob, startTime, retentionFreqHours * Time.MS_PER_HOUR, voldemortConfig.getRetentionCleanupPinStartTime()); }
@Override public void close() { VoldemortException exception = null; for (NonblockingStore store : nonblockingStores.values()) { try { store.close(); } catch (VoldemortException e) { exception = e; } } if (this.jmxEnabled) { JmxUtils.unregisterMbean( JmxUtils.createObjectName( JmxUtils.getPackageName(stats.getClass()), getName() + identifierString)); } if (exception != null) throw exception; super.close(); }
public void registerSystemEngine(StorageEngine<ByteArray, byte[], byte[]> engine) { Cluster cluster = this.metadata.getCluster(); storeRepository.addStorageEngine(engine); /* Now add any store wrappers that are enabled */ Store<ByteArray, byte[], byte[]> store = engine; if (voldemortConfig.isVerboseLoggingEnabled()) store = new LoggingStore<ByteArray, byte[], byte[]>( store, cluster.getName(), SystemTime.INSTANCE); if (voldemortConfig.isMetadataCheckingEnabled()) store = new InvalidMetadataCheckingStore(metadata.getNodeId(), store, metadata); if (voldemortConfig.isStatTrackingEnabled()) { StatTrackingStore statStore = new StatTrackingStore(store, this.storeStats); store = statStore; if (voldemortConfig.isJmxEnabled()) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(store.getClass()), store.getName()); else name = JmxUtils.createObjectName(JmxUtils.getPackageName(store.getClass()), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); JmxUtils.registerMbean( mbeanServer, JmxUtils.createModelMBean(new StoreStatsJmx(statStore.getStats())), name); } } } storeRepository.addLocalStore(store); }
@Override public <K, V, T> Store<K, V, T> getRawStore( String storeName, InconsistencyResolver<Versioned<V>> resolver) { Store<K, V, T> clientStore = null; // The lowest layer : Transporting request to coordinator R2Store r2store = null; this.d2Client = restClientFactoryConfig.getD2Client(); if (this.d2Client == null) { r2store = new R2Store( storeName, this.config.getHttpBootstrapURL(), this.transportClient, this.config); } else { r2store = new R2Store(storeName, this.config.getHttpBootstrapURL(), this.d2Client, this.config); } this.rawStoreList.add(r2store); // bootstrap from the coordinator and obtain all the serialization // information. String serializerInfoXml = r2store.getSerializerInfoXml(); SerializerDefinition keySerializerDefinition = RestUtils.parseKeySerializerDefinition(serializerInfoXml); SerializerDefinition valueSerializerDefinition = RestUtils.parseValueSerializerDefinition(serializerInfoXml); synchronized (this) { keySerializerMap.put(storeName, keySerializerDefinition); valueSerializerMap.put(storeName, valueSerializerDefinition); } if (logger.isDebugEnabled()) { logger.debug( "Bootstrapping for " + storeName + ": Key serializer " + keySerializerDefinition); logger.debug( "Bootstrapping for " + storeName + ": Value serializer " + valueSerializerDefinition); } // Start building the stack.. // First, the transport layer Store<ByteArray, byte[], byte[]> store = r2store; // TODO: Add jmxId / some unique identifier to the Mbean name if (this.config.isEnableJmx()) { StatTrackingStore statStore = new StatTrackingStore(store, this.stats); store = statStore; JmxUtils.registerMbean( new StoreStatsJmx(statStore.getStats()), JmxUtils.createObjectName(JmxUtils.getPackageName(store.getClass()), store.getName())); } // Add compression layer if (keySerializerDefinition.hasCompression() || valueSerializerDefinition.hasCompression()) { store = new CompressingStore( store, new CompressionStrategyFactory().get(keySerializerDefinition.getCompression()), new CompressionStrategyFactory().get(valueSerializerDefinition.getCompression())); } // Add Serialization layer Serializer<K> keySerializer = (Serializer<K>) serializerFactory.getSerializer(keySerializerDefinition); Serializer<V> valueSerializer = (Serializer<V>) serializerFactory.getSerializer(valueSerializerDefinition); clientStore = SerializingStore.wrap(store, keySerializer, valueSerializer, null); // Add inconsistency Resolving layer InconsistencyResolver<Versioned<V>> secondaryResolver = resolver == null ? new TimeBasedInconsistencyResolver<V>() : resolver; clientStore = new InconsistencyResolvingStore<K, V, T>( clientStore, new ChainedResolver<Versioned<V>>( new VectorClockInconsistencyResolver<V>(), secondaryResolver)); return clientStore; }
/** * Register the given engine with the storage repository * * @param engine Register the storage engine * @param isReadOnly Boolean indicating if this store is read-only * @param storeType The type of the store * @param storeDef store definition for the store to be registered */ public void registerEngine( StorageEngine<ByteArray, byte[], byte[]> engine, boolean isReadOnly, String storeType, StoreDefinition storeDef) { Cluster cluster = this.metadata.getCluster(); storeRepository.addStorageEngine(engine); /* Now add any store wrappers that are enabled */ Store<ByteArray, byte[], byte[]> store = engine; boolean isMetadata = store.getName().compareTo(MetadataStore.METADATA_STORE_NAME) == 0; boolean isSlop = storeType.compareTo("slop") == 0; boolean isView = storeType.compareTo(ViewStorageConfiguration.TYPE_NAME) == 0; if (voldemortConfig.isVerboseLoggingEnabled()) store = new LoggingStore<ByteArray, byte[], byte[]>( store, cluster.getName(), SystemTime.INSTANCE); if (!isSlop) { if (!isReadOnly && !isMetadata && !isView) { // wrap store to enforce retention policy if (voldemortConfig.isEnforceRetentionPolicyOnRead() && storeDef != null) { RetentionEnforcingStore retentionEnforcingStore = new RetentionEnforcingStore( store, storeDef, voldemortConfig.isDeleteExpiredValuesOnRead(), SystemTime.INSTANCE); metadata.addMetadataStoreListener(store.getName(), retentionEnforcingStore); store = retentionEnforcingStore; } if (voldemortConfig.isEnableRebalanceService()) { ProxyPutStats proxyPutStats = new ProxyPutStats(aggregatedProxyPutStats); if (voldemortConfig.isJmxEnabled()) { JmxUtils.registerMbean( proxyPutStats, JmxUtils.createObjectName( "voldemort.store.rebalancing", engine.getName() + "-proxy-puts")); } store = new RedirectingStore( store, metadata, storeRepository, failureDetector, storeFactory, proxyPutWorkerPool, proxyPutStats); if (voldemortConfig.isJmxEnabled()) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( cluster.getName() + "." + JmxUtils.getPackageName(RedirectingStore.class), store.getName()); else name = JmxUtils.createObjectName( JmxUtils.getPackageName(RedirectingStore.class), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); JmxUtils.registerMbean(mbeanServer, JmxUtils.createModelMBean(store), name); } } } } if (voldemortConfig.isMetadataCheckingEnabled() && !isMetadata) { store = new InvalidMetadataCheckingStore(metadata.getNodeId(), store, metadata); } } if (voldemortConfig.isStatTrackingEnabled()) { StatTrackingStore statStore = new StatTrackingStore(store, this.storeStats); store = statStore; if (voldemortConfig.isJmxEnabled()) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(store.getClass()), store.getName()); else name = JmxUtils.createObjectName(JmxUtils.getPackageName(store.getClass()), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); JmxUtils.registerMbean( mbeanServer, JmxUtils.createModelMBean(new StoreStatsJmx(statStore.getStats())), name); } } // Wrap everything under the rate limiting store (barring the // metadata store) if (voldemortConfig.isEnableQuotaLimiting() && !isMetadata) { FileBackedCachingStorageEngine quotaStore = (FileBackedCachingStorageEngine) storeRepository.getStorageEngine( SystemStoreConstants.SystemStoreName.voldsys$_store_quotas.toString()); QuotaLimitStats quotaStats = new QuotaLimitStats(this.aggregatedQuotaStats); QuotaLimitingStore rateLimitingStore = new QuotaLimitingStore(store, this.storeStats, quotaStats, quotaStore); if (voldemortConfig.isJmxEnabled()) { JmxUtils.registerMbean( this.aggregatedQuotaStats, JmxUtils.createObjectName( "voldemort.store.quota", store.getName() + "-quota-limit-stats")); } store = rateLimitingStore; } } storeRepository.addLocalStore(store); }
/** * Unregister and remove the engine from the storage repository. This is called during deletion of * stores and if there are exceptions adding/opening stores * * @param engine The actual engine to remove * @param isReadOnly Is this read-only? * @param storeType The storage type of the store * @param truncate Should the store be truncated? */ public void removeEngine( StorageEngine<ByteArray, byte[], byte[]> engine, boolean isReadOnly, String storeType, boolean truncate) { String storeName = engine.getName(); Store<ByteArray, byte[], byte[]> store = storeRepository.removeLocalStore(storeName); boolean isSlop = storeType.compareTo("slop") == 0; boolean isView = storeType.compareTo(ViewStorageConfiguration.TYPE_NAME) == 0; boolean isMetadata = storeName.compareTo(MetadataStore.METADATA_STORE_NAME) == 0; if (store != null) { if (voldemortConfig.isJmxEnabled()) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); if (!isSlop && voldemortConfig.isEnableRebalanceService() && !isReadOnly && !isMetadata && !isView) { ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(RedirectingStore.class), store.getName()); else name = JmxUtils.createObjectName( JmxUtils.getPackageName(RedirectingStore.class), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); } } if (voldemortConfig.isStatTrackingEnabled()) { ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(store.getClass()), store.getName()); else name = JmxUtils.createObjectName( JmxUtils.getPackageName(store.getClass()), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); } } } if (voldemortConfig.isServerRoutingEnabled() && !isSlop) { this.storeRepository.removeRoutedStore(storeName); for (Node node : metadata.getCluster().getNodes()) this.storeRepository.removeNodeStore(storeName, node.getId()); } } storeRepository.removeStorageEngine(storeName); if (truncate) engine.truncate(); engine.close(); }
@Override protected void startInner() { registerInternalEngine(metadata, false, "metadata"); /* Initialize storage configurations */ for (String configClassName : voldemortConfig.getStorageConfigurations()) initStorageConfig(configClassName); /* Initialize view storage configuration */ storageConfigs.put( ViewStorageConfiguration.TYPE_NAME, new ViewStorageConfiguration(voldemortConfig, metadata.getStoreDefList(), storeRepository)); /* Initialize system stores */ initSystemStores(); /* Register slop store */ if (voldemortConfig.isSlopEnabled()) { logger.info("Initializing the slop store using " + voldemortConfig.getSlopStoreType()); StorageConfiguration config = storageConfigs.get(voldemortConfig.getSlopStoreType()); if (config == null) throw new ConfigurationException( "Attempt to open store " + SlopStorageEngine.SLOP_STORE_NAME + " but " + voldemortConfig.getSlopStoreType() + " storage engine has not been enabled."); // make a dummy store definition object StoreDefinition slopStoreDefinition = new StoreDefinition( SlopStorageEngine.SLOP_STORE_NAME, null, null, null, null, null, null, RoutingStrategyType.CONSISTENT_STRATEGY, 0, null, 0, null, 0, null, null, null, null, null, null, null, null, null, null, null, null, 0); SlopStorageEngine slopEngine = new SlopStorageEngine( config.getStore( slopStoreDefinition, new RoutingStrategyFactory() .updateRoutingStrategy(slopStoreDefinition, metadata.getCluster())), metadata.getCluster()); registerInternalEngine(slopEngine, false, "slop"); storeRepository.setSlopStore(slopEngine); if (voldemortConfig.isSlopPusherJobEnabled()) { // Now initialize the pusher job after some time GregorianCalendar cal = new GregorianCalendar(); cal.add(Calendar.SECOND, (int) (voldemortConfig.getSlopFrequencyMs() / Time.MS_PER_SECOND)); Date nextRun = cal.getTime(); logger.info( "Initializing slop pusher job type " + voldemortConfig.getPusherType() + " at " + nextRun); scheduler.schedule( "slop", (voldemortConfig.getPusherType().compareTo(BlockingSlopPusherJob.TYPE_NAME) == 0) ? new BlockingSlopPusherJob( storeRepository, metadata, failureDetector, voldemortConfig, scanPermitWrapper) : new StreamingSlopPusherJob( storeRepository, metadata, failureDetector, voldemortConfig, scanPermitWrapper), nextRun, voldemortConfig.getSlopFrequencyMs()); } // Create a SlopPurgeJob object and register it if (voldemortConfig.isSlopPurgeJobEnabled()) { logger.info("Initializing Slop Purge job"); SlopPurgeJob job = new SlopPurgeJob( storeRepository, metadata, scanPermitWrapper, voldemortConfig.getSlopPurgeJobMaxKeysScannedPerSec()); JmxUtils.registerMbean(job, JmxUtils.createObjectName(job.getClass())); storeRepository.registerSlopPurgeJob(job); } } // Create a repair job object and register it with Store repository if (voldemortConfig.isRepairEnabled()) { logger.info("Initializing repair job."); RepairJob job = new RepairJob( storeRepository, metadata, scanPermitWrapper, voldemortConfig.getRepairJobMaxKeysScannedPerSec()); JmxUtils.registerMbean(job, JmxUtils.createObjectName(job.getClass())); storeRepository.registerRepairJob(job); } // Create a prune job object and register it if (voldemortConfig.isPruneJobEnabled()) { logger.info("Intializing prune job"); VersionedPutPruneJob job = new VersionedPutPruneJob( storeRepository, metadata, scanPermitWrapper, voldemortConfig.getPruneJobMaxKeysScannedPerSec()); JmxUtils.registerMbean(job, JmxUtils.createObjectName(job.getClass())); storeRepository.registerPruneJob(job); } List<StoreDefinition> storeDefs = new ArrayList<StoreDefinition>(this.metadata.getStoreDefList()); logger.info("Initializing stores:"); logger.info("Validating schemas:"); String AVRO_GENERIC_VERSIONED_TYPE_NAME = "avro-generic-versioned"; for (StoreDefinition storeDef : storeDefs) { SerializerDefinition keySerDef = storeDef.getKeySerializer(); SerializerDefinition valueSerDef = storeDef.getValueSerializer(); if (keySerDef.getName().equals(AVRO_GENERIC_VERSIONED_TYPE_NAME)) { SchemaEvolutionValidator.checkSchemaCompatibility(keySerDef); } if (valueSerDef.getName().equals(AVRO_GENERIC_VERSIONED_TYPE_NAME)) { SchemaEvolutionValidator.checkSchemaCompatibility(valueSerDef); } } // first initialize non-view stores for (StoreDefinition def : storeDefs) if (!def.isView()) openStore(def); // now that we have all our stores, we can initialize views pointing at // those stores for (StoreDefinition def : storeDefs) { if (def.isView()) openStore(def); } initializeMetadataVersions(storeDefs); // enable aggregate jmx statistics if (voldemortConfig.isStatTrackingEnabled()) if (this.voldemortConfig.isEnableJmxClusterName()) JmxUtils.registerMbean( new StoreStatsJmx(this.storeStats), JmxUtils.createObjectName( metadata.getCluster().getName() + ".voldemort.store.stats.aggregate", "aggregate-perf")); else JmxUtils.registerMbean( new StoreStatsJmx(this.storeStats), JmxUtils.createObjectName("voldemort.store.stats.aggregate", "aggregate-perf")); logger.info("All stores initialized."); }
public StorageService( StoreRepository storeRepository, MetadataStore metadata, SchedulerService scheduler, VoldemortConfig config) { super(ServiceType.STORAGE); this.voldemortConfig = config; this.scheduler = scheduler; this.storeRepository = storeRepository; this.metadata = metadata; this.scanPermitWrapper = new ScanPermitWrapper(voldemortConfig.getNumScanPermits()); this.storageConfigs = new ConcurrentHashMap<String, StorageConfiguration>(); this.clientThreadPool = new ClientThreadPool( config.getClientMaxThreads(), config.getClientThreadIdleMs(), config.getClientMaxQueuedRequests()); this.storeFactory = new ClientRequestExecutorPool( config.getClientSelectors(), config.getClientMaxConnectionsPerNode(), config.getClientConnectionTimeoutMs(), config.getSocketTimeoutMs(), config.getSocketBufferSize(), config.getSocketKeepAlive()); FailureDetectorConfig failureDetectorConfig = new FailureDetectorConfig(voldemortConfig) .setCluster(metadata.getCluster()) .setStoreVerifier(new ServerStoreVerifier(storeFactory, metadata, config)); this.failureDetector = create(failureDetectorConfig, config.isJmxEnabled()); this.storeStats = new StoreStats(); this.routedStoreFactory = new RoutedStoreFactory(); this.routedStoreFactory.setThreadPool(this.clientThreadPool); this.routedStoreConfig = new RoutedStoreConfig(this.voldemortConfig, this.metadata.getCluster()); /* * Initialize the dynamic throttle limit based on the per node limit * config only if read-only engine is being used. */ if (this.voldemortConfig .getStorageConfigurations() .contains(ReadOnlyStorageConfiguration.class.getName())) { long rate = this.voldemortConfig.getReadOnlyFetcherMaxBytesPerSecond(); this.dynThrottleLimit = new DynamicThrottleLimit(rate); } else this.dynThrottleLimit = null; // create the proxy put thread pool this.proxyPutWorkerPool = Executors.newFixedThreadPool( config.getMaxProxyPutThreads(), new DaemonThreadFactory("voldemort-proxy-put-thread")); this.aggregatedProxyPutStats = new ProxyPutStats(null); if (config.isJmxEnabled()) { JmxUtils.registerMbean( this.aggregatedProxyPutStats, JmxUtils.createObjectName("voldemort.store.rebalancing", "aggregate-proxy-puts")); } this.aggregatedQuotaStats = new QuotaLimitStats(null); if (config.isJmxEnabled()) { JmxUtils.registerMbean( this.aggregatedQuotaStats, JmxUtils.createObjectName("voldemort.store.quota", "aggregate-quota-limit-stats")); } }
/** * Create a PipelineRoutedStore * * @param innerStores The mapping of node to client * @param nonblockingStores * @param slopStores The stores for hints * @param nonblockingSlopStores * @param cluster Cluster definition * @param storeDef Store definition */ public PipelineRoutedStore( Map<Integer, Store<ByteArray, byte[], byte[]>> innerStores, Map<Integer, NonblockingStore> nonblockingStores, Map<Integer, Store<ByteArray, Slop, byte[]>> slopStores, Map<Integer, NonblockingStore> nonblockingSlopStores, Cluster cluster, StoreDefinition storeDef, FailureDetector failureDetector, boolean repairReads, TimeoutConfig timeoutConfig, int clientZoneId, boolean isJmxEnabled, String identifierString, ZoneAffinity zoneAffinity) { super( storeDef.getName(), innerStores, cluster, storeDef, repairReads, timeoutConfig, failureDetector, SystemTime.INSTANCE); if (zoneAffinity != null && storeDef.getZoneCountReads() != null && storeDef.getZoneCountReads() > 0) { if (zoneAffinity.isGetOpZoneAffinityEnabled()) { throw new IllegalArgumentException( "storeDef.getZoneCountReads() is non-zero while zoneAffinityGet is enabled"); } if (zoneAffinity.isGetAllOpZoneAffinityEnabled()) { throw new IllegalArgumentException( "storeDef.getZoneCountReads() is non-zero while zoneAffinityGetAll is enabled"); } } this.nonblockingSlopStores = nonblockingSlopStores; if (clientZoneId == Zone.UNSET_ZONE_ID) { Collection<Zone> availableZones = cluster.getZones(); this.clientZone = availableZones.iterator().next(); if (availableZones.size() > 1) { String format = "Client Zone is not specified. Default to Zone %d. The servers could be in a remote zone"; logger.warn(String.format(format, this.clientZone.getId())); } else { if (logger.isDebugEnabled()) logger.debug( String.format( "Client Zone is not specified. Default to Zone %d", this.clientZone.getId())); } } else { this.clientZone = cluster.getZoneById(clientZoneId); } this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores); this.slopStores = slopStores; if (storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) { zoneRoutingEnabled = true; } else { zoneRoutingEnabled = false; } if (storeDef.hasHintedHandoffStrategyType()) { HintedHandoffStrategyFactory factory = new HintedHandoffStrategyFactory(zoneRoutingEnabled, clientZone.getId()); this.handoffStrategy = factory.updateHintedHandoffStrategy(storeDef, cluster); } else { this.handoffStrategy = null; } this.jmxEnabled = isJmxEnabled; this.identifierString = identifierString; if (this.jmxEnabled) { stats = new PipelineRoutedStats(); JmxUtils.registerMbean( stats, JmxUtils.createObjectName( JmxUtils.getPackageName(stats.getClass()), getName() + identifierString)); } if (zoneAffinity != null) { this.zoneAffinity = zoneAffinity; } else { this.zoneAffinity = new ZoneAffinity(); } }
@Override protected void startInner() { registerInternalEngine(metadata, false, "metadata"); /* Initialize storage configurations */ for (String configClassName : voldemortConfig.getStorageConfigurations()) initStorageConfig(configClassName); /* Initialize view storage configuration */ storageConfigs.put( ViewStorageConfiguration.TYPE_NAME, new ViewStorageConfiguration(voldemortConfig, metadata.getStoreDefList(), storeRepository)); /* Initialize system stores */ initSystemStores(); /* Register slop store */ if (voldemortConfig.isSlopEnabled()) { logger.info("Initializing the slop store using " + voldemortConfig.getSlopStoreType()); StorageConfiguration config = storageConfigs.get(voldemortConfig.getSlopStoreType()); if (config == null) throw new ConfigurationException( "Attempt to open store " + SlopStorageEngine.SLOP_STORE_NAME + " but " + voldemortConfig.getSlopStoreType() + " storage engine has not been enabled."); // make a dummy store definition object StoreDefinition slopStoreDefinition = new StoreDefinition( SlopStorageEngine.SLOP_STORE_NAME, null, null, null, null, null, null, RoutingStrategyType.CONSISTENT_STRATEGY, 0, null, 0, null, 0, null, null, null, null, null, null, null, null, null, null, null, null, 0); SlopStorageEngine slopEngine = new SlopStorageEngine( config.getStore( slopStoreDefinition, new RoutingStrategyFactory() .updateRoutingStrategy(slopStoreDefinition, metadata.getCluster())), metadata.getCluster()); registerInternalEngine(slopEngine, false, "slop"); storeRepository.setSlopStore(slopEngine); if (voldemortConfig.isSlopPusherJobEnabled()) { // Now initialize the pusher job after some time GregorianCalendar cal = new GregorianCalendar(); cal.add(Calendar.SECOND, (int) (voldemortConfig.getSlopFrequencyMs() / Time.MS_PER_SECOND)); Date nextRun = cal.getTime(); logger.info( "Initializing slop pusher job type " + voldemortConfig.getPusherType() + " at " + nextRun); scheduler.schedule( "slop", (voldemortConfig.getPusherType().compareTo(BlockingSlopPusherJob.TYPE_NAME) == 0) ? new BlockingSlopPusherJob( storeRepository, metadata, failureDetector, voldemortConfig, scanPermitWrapper) : new StreamingSlopPusherJob( storeRepository, metadata, slopStreamingFailureDetector, voldemortConfig, scanPermitWrapper), nextRun, voldemortConfig.getSlopFrequencyMs()); } // Create a SlopPurgeJob object and register it if (voldemortConfig.isSlopPurgeJobEnabled()) { logger.info("Initializing Slop Purge job"); SlopPurgeJob job = new SlopPurgeJob( storeRepository, metadata, scanPermitWrapper, voldemortConfig.getSlopPurgeJobMaxKeysScannedPerSec()); JmxUtils.registerMbean(job, JmxUtils.createObjectName(job.getClass())); storeRepository.registerSlopPurgeJob(job); } } // Create a repair job object and register it with Store repository if (voldemortConfig.isRepairEnabled()) { logger.info("Initializing repair job."); RepairJob job = new RepairJob( storeRepository, metadata, scanPermitWrapper, voldemortConfig.getRepairJobMaxKeysScannedPerSec()); JmxUtils.registerMbean(job, JmxUtils.createObjectName(job.getClass())); storeRepository.registerRepairJob(job); } // Create a prune job object and register it if (voldemortConfig.isPruneJobEnabled()) { logger.info("Intializing prune job"); VersionedPutPruneJob job = new VersionedPutPruneJob( storeRepository, metadata, scanPermitWrapper, voldemortConfig.getPruneJobMaxKeysScannedPerSec()); JmxUtils.registerMbean(job, JmxUtils.createObjectName(job.getClass())); storeRepository.registerPruneJob(job); } List<StoreDefinition> storeDefs = new ArrayList<StoreDefinition>(this.metadata.getStoreDefList()); logger.info("Initializing stores:"); logger.info("Validating schemas:"); StoreDefinitionUtils.validateSchemasAsNeeded(storeDefs); // first initialize non-view stores for (StoreDefinition def : storeDefs) if (!def.isView()) openStore(def); // now that we have all our stores, we can initialize views pointing at // those stores for (StoreDefinition def : storeDefs) { if (def.isView()) openStore(def); } initializeMetadataVersions(storeDefs); // enable aggregate jmx statistics if (voldemortConfig.isStatTrackingEnabled()) if (this.voldemortConfig.isEnableJmxClusterName()) JmxUtils.registerMbean( new StoreStatsJmx(this.storeStats), JmxUtils.createObjectName( metadata.getCluster().getName() + ".voldemort.store.stats.aggregate", "aggregate-perf")); else JmxUtils.registerMbean( new StoreStatsJmx(this.storeStats), JmxUtils.createObjectName("voldemort.store.stats.aggregate", "aggregate-perf")); List<StorageEngine> listOfDisabledStores = Lists.newArrayList(); for (StorageEngine storageEngine : storeRepository.getAllStorageEngines()) { try { StoreVersionManager storeVersionManager = (StoreVersionManager) storageEngine.getCapability(StoreCapabilityType.DISABLE_STORE_VERSION); if (storeVersionManager.hasAnyDisabledVersion()) { listOfDisabledStores.add(storageEngine); logger.warn("The following store is marked as disabled: " + storageEngine.getName()); // Must put server in offline mode. } } catch (NoSuchCapabilityException e) { // Not a read-only store: no-op } } if (listOfDisabledStores.isEmpty()) { logger.info("All stores initialized."); } else { throw new DisabledStoreException( "All stores initialized, but the server needs to go " + "in offline mode because some store(s) are disabled."); } }
public StorageService( StoreRepository storeRepository, MetadataStore metadata, SchedulerService scheduler, VoldemortConfig config) { super(ServiceType.STORAGE); this.voldemortConfig = config; this.scheduler = scheduler; this.storeRepository = storeRepository; this.metadata = metadata; this.scanPermitWrapper = new ScanPermitWrapper(voldemortConfig.getNumScanPermits()); this.storageConfigs = new ConcurrentHashMap<String, StorageConfiguration>(); this.clientThreadPool = new ClientThreadPool( config.getClientMaxThreads(), config.getClientThreadIdleMs(), config.getClientMaxQueuedRequests()); this.storeFactory = new ClientRequestExecutorPool( config.getClientSelectors(), config.getClientMaxConnectionsPerNode(), config.getClientConnectionTimeoutMs(), config.getSocketTimeoutMs(), config.getSocketBufferSize(), config.getSocketKeepAlive(), "-storage"); FailureDetectorConfig failureDetectorConfig = new FailureDetectorConfig(voldemortConfig) .setCluster(metadata.getCluster()) .setConnectionVerifier( new ServerStoreConnectionVerifier(storeFactory, metadata, config)); FailureDetectorConfig slopStreamingFailureDetectorConfig = new FailureDetectorConfig(voldemortConfig) .setImplementationClassName(AsyncRecoveryFailureDetector.class.getName()) .setCluster(metadata.getCluster()) .setConnectionVerifier(new AdminSlopStreamingVerifier(this.metadata.getCluster())); this.failureDetector = create(failureDetectorConfig, config.isJmxEnabled()); this.slopStreamingFailureDetector = create(slopStreamingFailureDetectorConfig, config.isJmxEnabled()); this.storeStats = new StoreStats("aggregate.storage-service"); this.routedStoreFactory = new RoutedStoreFactory(); this.routedStoreFactory.setThreadPool(this.clientThreadPool); this.routedStoreConfig = new RoutedStoreConfig(this.voldemortConfig, this.metadata.getCluster()); // create the proxy put thread pool this.proxyPutWorkerPool = Executors.newFixedThreadPool( config.getMaxProxyPutThreads(), new DaemonThreadFactory("voldemort-proxy-put-thread")); this.aggregatedProxyPutStats = new ProxyPutStats(null); if (config.isJmxEnabled()) { JmxUtils.registerMbean( this.aggregatedProxyPutStats, JmxUtils.createObjectName("voldemort.store.rebalancing", "aggregate-proxy-puts")); } this.aggregatedQuotaStats = new QuotaLimitStats(null); if (config.isJmxEnabled()) { JmxUtils.registerMbean( this.aggregatedQuotaStats, JmxUtils.createObjectName("voldemort.store.quota", "aggregate-quota-limit-stats")); } }