public Backend(Configuration storageConfig) { this.storageConfig = storageConfig; storeManager = getStorageManager(storageConfig); indexes = getIndexes(storageConfig); storeFeatures = storeManager.getFeatures(); basicMetrics = storageConfig.getBoolean(BASIC_METRICS, BASIC_METRICS_DEFAULT); mergeBasicMetrics = storageConfig.getBoolean(MERGE_BASIC_METRICS_KEY, MERGE_BASIC_METRICS_DEFAULT); int bufferSizeTmp = storageConfig.getInt(BUFFER_SIZE_KEY, BUFFER_SIZE_DEFAULT); Preconditions.checkArgument( bufferSizeTmp >= 0, "Buffer size must be non-negative (use 0 to disable)"); if (!storeFeatures.supportsBatchMutation()) { bufferSize = 0; log.debug("Buffering disabled because backend does not support batch mutations"); } else bufferSize = bufferSizeTmp; writeAttempts = storageConfig.getInt(WRITE_ATTEMPTS_KEY, WRITE_ATTEMPTS_DEFAULT); Preconditions.checkArgument(writeAttempts > 0, "Write attempts must be positive"); readAttempts = storageConfig.getInt(READ_ATTEMPTS_KEY, READ_ATTEMPTS_DEFAULT); Preconditions.checkArgument(readAttempts > 0, "Read attempts must be positive"); persistAttemptWaittime = storageConfig.getInt(STORAGE_ATTEMPT_WAITTIME_KEY, STORAGE_ATTEMPT_WAITTIME_DEFAULT); Preconditions.checkArgument( persistAttemptWaittime > 0, "Persistence attempt retry wait time must be non-negative"); // If lock prefix is unspecified, specify it now storageConfig.setProperty( ExpectedValueCheckingStore.LOCAL_LOCK_MEDIATOR_PREFIX_KEY, storageConfig.getString( ExpectedValueCheckingStore.LOCAL_LOCK_MEDIATOR_PREFIX_KEY, storeManager.getName())); final String lockBackendName = storageConfig.getString( GraphDatabaseConfiguration.LOCK_BACKEND, GraphDatabaseConfiguration.LOCK_BACKEND_DEFAULT); if (REGISTERED_LOCKERS.containsKey(lockBackendName)) { lockerCreator = REGISTERED_LOCKERS.get(lockBackendName); } else { throw new TitanConfigurationException( "Unknown lock backend \"" + lockBackendName + "\". Known lock backends: " + Joiner.on(", ").join(REGISTERED_LOCKERS.keySet()) + "."); } // Never used for backends that have innate transaction support, but we // want to maintain the non-null invariant regardless; it will default // to connsistentkey impl if none is specified Preconditions.checkNotNull(lockerCreator); if (storeFeatures.isDistributed() && storeFeatures.isKeyOrdered()) { log.debug("Wrapping index store with HashPrefix"); hashPrefixIndex = true; } else { hashPrefixIndex = false; } }
public KeyValueStoreManagerAdapter( KeyValueStoreManager manager, Map<String, Integer> keyLengths) { this.manager = manager; ImmutableMap.Builder<String, Integer> mb = ImmutableMap.builder(); if (keyLengths != null && !keyLengths.isEmpty()) mb.putAll(keyLengths); this.keyLengths = mb.build(); features = manager.getFeatures().clone(); features.supportsBatchMutation = false; }
public HBaseStoreManager(org.apache.commons.configuration.Configuration config) throws StorageException { super(config, PORT_DEFAULT); this.tableName = config.getString(TABLE_NAME_KEY, TABLE_NAME_DEFAULT); this.hconf = HBaseConfiguration.create(); for (Map.Entry<String, String> confEntry : HBASE_CONFIGURATION.entrySet()) { if (config.containsKey(confEntry.getKey())) { hconf.set(confEntry.getValue(), config.getString(confEntry.getKey())); } } // Copy a subset of our commons config into a Hadoop config org.apache.commons.configuration.Configuration hbCommons = config.subset(HBASE_CONFIGURATION_NAMESPACE); @SuppressWarnings("unchecked") // I hope commons-config eventually fixes this Iterator<String> keys = hbCommons.getKeys(); int keysLoaded = 0; while (keys.hasNext()) { String key = keys.next(); String value = hbCommons.getString(key); logger.debug("HBase configuration: setting {}={}", key, value); hconf.set(key, value); keysLoaded++; } logger.debug("HBase configuration: set a total of {} configuration values", keysLoaded); connectionPool = new HTablePool(hconf, connectionPoolSize); openStores = new ConcurrentHashMap<String, HBaseKeyColumnValueStore>(); // TODO: allowing publicly mutate fields is bad, should be fixed features = new StoreFeatures(); features.supportsScan = true; features.supportsBatchMutation = true; features.supportsTransactions = false; features.supportsConsistentKeyOperations = true; features.supportsLocking = false; features.isKeyOrdered = false; features.isDistributed = true; features.hasLocalKeyPartition = false; }
public Backend(Configuration storageConfig) { storeManager = getStorageManager(storageConfig); isKeyColumnValueStore = storeManager instanceof KeyColumnValueStoreManager; storeFeatures = storeManager.getFeatures(); int bufferSizeTmp = storageConfig.getInt(BUFFER_SIZE_KEY, BUFFER_SIZE_DEFAULT); Preconditions.checkArgument( bufferSizeTmp >= 0, "Buffer size must be non-negative (use 0 to disable)"); if (!storeFeatures.supportsBatchMutation()) { bufferSize = 0; log.debug("Buffering disabled because backend does not support batch mutations"); } else bufferSize = bufferSizeTmp; if (!storeFeatures.supportsLocking() && storeFeatures.supportsConsistentKeyOperations()) { lockConfiguration = new ConsistentKeyLockConfiguration(storageConfig, storeManager.toString()); } else { lockConfiguration = null; } writeAttempts = storageConfig.getInt(WRITE_ATTEMPTS_KEY, WRITE_ATTEMPTS_DEFAULT); Preconditions.checkArgument(writeAttempts > 0, "Write attempts must be positive"); readAttempts = storageConfig.getInt(READ_ATTEMPTS_KEY, READ_ATTEMPTS_DEFAULT); Preconditions.checkArgument(readAttempts > 0, "Read attempts must be positive"); persistAttemptWaittime = storageConfig.getInt(STORAGE_ATTEMPT_WAITTIME_KEY, STORAGE_ATTEMPT_WAITTIME_DEFAULT); Preconditions.checkArgument( persistAttemptWaittime > 0, "Persistence attempt retry wait time must be non-negative"); if (storeFeatures.isDistributed() && storeFeatures.isKeyOrdered()) { log.debug("Wrapping index store with HashPrefix"); hashPrefixIndex = true; } else { hashPrefixIndex = false; } }