@Test /** * PS: Test will fail if for some reason we cannot do 50 ops/sec against a hash map. So yeah, * pretty unlikely. */ public void testQuotaPctUsageCalculation() throws Exception { File tempDir = TestUtils.createTempDir(); FileBackedCachingStorageEngine quotaStore = new FileBackedCachingStorageEngine("quota-usage-test-store", tempDir.getAbsolutePath()); InMemoryStorageEngine<ByteArray, byte[], byte[]> inMemoryEngine = new InMemoryStorageEngine<ByteArray, byte[], byte[]>("inMemoryBackingStore"); QuotaLimitStats quotaStats = new QuotaLimitStats(null, 1000); StatTrackingStore statTrackingStore = new StatTrackingStore(inMemoryEngine, null); QuotaLimitingStore quotaLimitingStore = new QuotaLimitingStore( statTrackingStore, statTrackingStore.getStats(), quotaStats, quotaStore); int targetRate = 50; // provide a quota of 100 gets/sec quotaStore.put( new ByteArray( QuotaUtils.makeQuotaKey(statTrackingStore.getName(), QuotaType.GET_THROUGHPUT) .getBytes()), new Versioned<byte[]>("100.0".getBytes()), null); long testIntervalMs = 5000; long timeToSleepMs = 1000 / targetRate; long startMs = System.currentTimeMillis(); ByteArray key = new ByteArray("some key".getBytes()); while ((System.currentTimeMillis() - startMs) <= testIntervalMs) { quotaLimitingStore.get(key, null); Thread.sleep(timeToSleepMs); } assertEquals("No get operations should be throttled", 0, quotaStats.getRateLimitedGets()); assertEquals("Put usage should be 0", 0, quotaStats.getQuotaPctUsedPut()); assertEquals("delete usage should be 0", 0, quotaStats.getQuotaPctUsedDelete()); assertEquals("getall usage should be 0", 0, quotaStats.getQuotaPctUsedGetAll()); assertEquals( "Computed usage pct must be close to actual observed qps", statTrackingStore.getStats().getThroughput(Tracked.GET), quotaStats.getQuotaPctUsedGet(), 1.0); }
public void registerSystemEngine(StorageEngine<ByteArray, byte[], byte[]> engine) { Cluster cluster = this.metadata.getCluster(); storeRepository.addStorageEngine(engine); /* Now add any store wrappers that are enabled */ Store<ByteArray, byte[], byte[]> store = engine; if (voldemortConfig.isVerboseLoggingEnabled()) store = new LoggingStore<ByteArray, byte[], byte[]>( store, cluster.getName(), SystemTime.INSTANCE); if (voldemortConfig.isMetadataCheckingEnabled()) store = new InvalidMetadataCheckingStore(metadata.getNodeId(), store, metadata); if (voldemortConfig.isStatTrackingEnabled()) { StatTrackingStore statStore = new StatTrackingStore(store, this.storeStats); store = statStore; if (voldemortConfig.isJmxEnabled()) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(store.getClass()), store.getName()); else name = JmxUtils.createObjectName(JmxUtils.getPackageName(store.getClass()), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); JmxUtils.registerMbean( mbeanServer, JmxUtils.createModelMBean(new StoreStatsJmx(statStore.getStats())), name); } } } storeRepository.addLocalStore(store); }
@Override public <K, V, T> Store<K, V, T> getRawStore( String storeName, InconsistencyResolver<Versioned<V>> resolver) { Store<K, V, T> clientStore = null; // The lowest layer : Transporting request to coordinator R2Store r2store = null; this.d2Client = restClientFactoryConfig.getD2Client(); if (this.d2Client == null) { r2store = new R2Store( storeName, this.config.getHttpBootstrapURL(), this.transportClient, this.config); } else { r2store = new R2Store(storeName, this.config.getHttpBootstrapURL(), this.d2Client, this.config); } this.rawStoreList.add(r2store); // bootstrap from the coordinator and obtain all the serialization // information. String serializerInfoXml = r2store.getSerializerInfoXml(); SerializerDefinition keySerializerDefinition = RestUtils.parseKeySerializerDefinition(serializerInfoXml); SerializerDefinition valueSerializerDefinition = RestUtils.parseValueSerializerDefinition(serializerInfoXml); synchronized (this) { keySerializerMap.put(storeName, keySerializerDefinition); valueSerializerMap.put(storeName, valueSerializerDefinition); } if (logger.isDebugEnabled()) { logger.debug( "Bootstrapping for " + storeName + ": Key serializer " + keySerializerDefinition); logger.debug( "Bootstrapping for " + storeName + ": Value serializer " + valueSerializerDefinition); } // Start building the stack.. // First, the transport layer Store<ByteArray, byte[], byte[]> store = r2store; // TODO: Add jmxId / some unique identifier to the Mbean name if (this.config.isEnableJmx()) { StatTrackingStore statStore = new StatTrackingStore(store, this.stats); store = statStore; JmxUtils.registerMbean( new StoreStatsJmx(statStore.getStats()), JmxUtils.createObjectName(JmxUtils.getPackageName(store.getClass()), store.getName())); } // Add compression layer if (keySerializerDefinition.hasCompression() || valueSerializerDefinition.hasCompression()) { store = new CompressingStore( store, new CompressionStrategyFactory().get(keySerializerDefinition.getCompression()), new CompressionStrategyFactory().get(valueSerializerDefinition.getCompression())); } // Add Serialization layer Serializer<K> keySerializer = (Serializer<K>) serializerFactory.getSerializer(keySerializerDefinition); Serializer<V> valueSerializer = (Serializer<V>) serializerFactory.getSerializer(valueSerializerDefinition); clientStore = SerializingStore.wrap(store, keySerializer, valueSerializer, null); // Add inconsistency Resolving layer InconsistencyResolver<Versioned<V>> secondaryResolver = resolver == null ? new TimeBasedInconsistencyResolver<V>() : resolver; clientStore = new InconsistencyResolvingStore<K, V, T>( clientStore, new ChainedResolver<Versioned<V>>( new VectorClockInconsistencyResolver<V>(), secondaryResolver)); return clientStore; }
/** * Register the given engine with the storage repository * * @param engine Register the storage engine * @param isReadOnly Boolean indicating if this store is read-only * @param storeType The type of the store * @param storeDef store definition for the store to be registered */ public void registerEngine( StorageEngine<ByteArray, byte[], byte[]> engine, boolean isReadOnly, String storeType, StoreDefinition storeDef) { Cluster cluster = this.metadata.getCluster(); storeRepository.addStorageEngine(engine); /* Now add any store wrappers that are enabled */ Store<ByteArray, byte[], byte[]> store = engine; boolean isMetadata = store.getName().compareTo(MetadataStore.METADATA_STORE_NAME) == 0; boolean isSlop = storeType.compareTo("slop") == 0; boolean isView = storeType.compareTo(ViewStorageConfiguration.TYPE_NAME) == 0; if (voldemortConfig.isVerboseLoggingEnabled()) store = new LoggingStore<ByteArray, byte[], byte[]>( store, cluster.getName(), SystemTime.INSTANCE); if (!isSlop) { if (!isReadOnly && !isMetadata && !isView) { // wrap store to enforce retention policy if (voldemortConfig.isEnforceRetentionPolicyOnRead() && storeDef != null) { RetentionEnforcingStore retentionEnforcingStore = new RetentionEnforcingStore( store, storeDef, voldemortConfig.isDeleteExpiredValuesOnRead(), SystemTime.INSTANCE); metadata.addMetadataStoreListener(store.getName(), retentionEnforcingStore); store = retentionEnforcingStore; } if (voldemortConfig.isEnableRebalanceService()) { ProxyPutStats proxyPutStats = new ProxyPutStats(aggregatedProxyPutStats); if (voldemortConfig.isJmxEnabled()) { JmxUtils.registerMbean( proxyPutStats, JmxUtils.createObjectName( "voldemort.store.rebalancing", engine.getName() + "-proxy-puts")); } store = new RedirectingStore( store, metadata, storeRepository, failureDetector, storeFactory, proxyPutWorkerPool, proxyPutStats); if (voldemortConfig.isJmxEnabled()) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( cluster.getName() + "." + JmxUtils.getPackageName(RedirectingStore.class), store.getName()); else name = JmxUtils.createObjectName( JmxUtils.getPackageName(RedirectingStore.class), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); JmxUtils.registerMbean(mbeanServer, JmxUtils.createModelMBean(store), name); } } } } if (voldemortConfig.isMetadataCheckingEnabled() && !isMetadata) { store = new InvalidMetadataCheckingStore(metadata.getNodeId(), store, metadata); } } if (voldemortConfig.isStatTrackingEnabled()) { StatTrackingStore statStore = new StatTrackingStore(store, this.storeStats); store = statStore; if (voldemortConfig.isJmxEnabled()) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); ObjectName name = null; if (this.voldemortConfig.isEnableJmxClusterName()) name = JmxUtils.createObjectName( metadata.getCluster().getName() + "." + JmxUtils.getPackageName(store.getClass()), store.getName()); else name = JmxUtils.createObjectName(JmxUtils.getPackageName(store.getClass()), store.getName()); synchronized (mbeanServer) { if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name); JmxUtils.registerMbean( mbeanServer, JmxUtils.createModelMBean(new StoreStatsJmx(statStore.getStats())), name); } } // Wrap everything under the rate limiting store (barring the // metadata store) if (voldemortConfig.isEnableQuotaLimiting() && !isMetadata) { FileBackedCachingStorageEngine quotaStore = (FileBackedCachingStorageEngine) storeRepository.getStorageEngine( SystemStoreConstants.SystemStoreName.voldsys$_store_quotas.toString()); QuotaLimitStats quotaStats = new QuotaLimitStats(this.aggregatedQuotaStats); QuotaLimitingStore rateLimitingStore = new QuotaLimitingStore(store, this.storeStats, quotaStats, quotaStore); if (voldemortConfig.isJmxEnabled()) { JmxUtils.registerMbean( this.aggregatedQuotaStats, JmxUtils.createObjectName( "voldemort.store.quota", store.getName() + "-quota-limit-stats")); } store = rateLimitingStore; } } storeRepository.addLocalStore(store); }
protected void outputJSON(HttpServletResponse response) { StringBuilder sb = new StringBuilder("{\n"); sb.append(" \"servertime\": \""); sb.append(new Date()); sb.append("\","); sb.append("\n \"server\": \""); sb.append(myMachine); sb.append("\","); sb.append("\n \"node\": \""); sb.append(server.getMetadataStore().getNodeId()); sb.append("\","); sb.append("\n \"uptime\": \""); sb.append(abstractSocketService.getStatusManager().getFormattedUptime()); sb.append("\","); sb.append("\n \"num_workers\": "); sb.append(abstractSocketService.getStatusManager().getActiveWorkersCount()); sb.append(","); sb.append("\n \"pool_size\": "); sb.append(abstractSocketService.getStatusManager().getWorkerPoolSize()); sb.append(","); sb.append("\n \"stores\": {"); int i = 0; for (Store<ByteArray, byte[], byte[]> store : server.getStoreRepository().getAllLocalStores()) { if (i++ > 0) { sb.append(","); } sb.append("\n \""); sb.append(store.getName()); sb.append("\" : {\n"); if (store instanceof StatTrackingStore) { StatTrackingStore statStore = (StatTrackingStore) store; Map<Tracked, RequestCounter> stats = statStore.getStats().getCounters(); for (Tracked t : Tracked.values()) { if (t == Tracked.EXCEPTION) { continue; } sb.append(fillCommonStats(stats, t)); } sb.append(",\n \"num_exceptions\": "); sb.append(statStore.getStats().getCount(Tracked.EXCEPTION)); sb.append("\n"); sb.append(" }"); } if (store instanceof QuotaLimitingStore) { QuotaLimitingStore quotaStore = (QuotaLimitingStore) store; Map<Tracked, RequestCounter> stats = quotaStore.getStats().getCounters(); for (Tracked t : Tracked.values()) { if (t == Tracked.EXCEPTION) { continue; } sb.append(fillCommonStats(stats, t)); } sb.append(",\n \"num_exceptions\": "); sb.append(quotaStore.getStats().getCount(Tracked.EXCEPTION)); sb.append("\n"); sb.append(" }"); } } sb.append("\n }\n"); sb.append("}\n"); try { response.setContentType("text/plain"); OutputStreamWriter writer = new OutputStreamWriter(response.getOutputStream()); writer.write(sb.toString()); writer.flush(); } catch (Exception e) { throw new VoldemortException(e); } }