/** * @param keyBytes: keyName strings serialized as bytes eg. 'cluster.xml' * @return List of values (only 1 for Metadata) versioned byte[] eg. UTF bytes for cluster xml * definitions * @throws VoldemortException */ public List<Versioned<byte[]>> get(ByteArray keyBytes) throws VoldemortException { try { String key = ByteUtils.getString(keyBytes.get(), "UTF-8"); if (METADATA_KEYS.contains(key)) { List<Versioned<byte[]>> values = Lists.newArrayList(); // Get the cached value and convert to string Versioned<String> value = convertObjectToString(key, metadataCache.get(key)); values.add( new Versioned<byte[]>( ByteUtils.getBytes(value.getValue(), "UTF-8"), value.getVersion())); return values; } else { throw new VoldemortException("Unhandled Key:" + key + " for MetadataStore get()"); } } catch (Exception e) { throw new VoldemortException( "Failed to read metadata key:" + ByteUtils.getString(keyBytes.get(), "UTF-8") + " delete config/.temp config/.version directories and restart.", e); } }
/** * A write through put to inner-store. * * @param keyBytes: keyName strings serialized as bytes eg. 'cluster.xml' * @param valueBytes: versioned byte[] eg. UTF bytes for cluster xml definitions * @throws VoldemortException */ public synchronized void put(ByteArray keyBytes, Versioned<byte[]> valueBytes) throws VoldemortException { String key = ByteUtils.getString(keyBytes.get(), "UTF-8"); Versioned<String> value = new Versioned<String>( ByteUtils.getString(valueBytes.getValue(), "UTF-8"), valueBytes.getVersion()); Versioned<Object> valueObject = convertStringToObject(key, value); this.put(key, valueObject); }
@Test public void testCollision() throws Exception { File fileDir = TestUtils.createTempDir(); File dataFile = new File(fileDir + File.separator + "0_0_0.data"); dataFile.deleteOnExit(); FileOutputStream fw = new FileOutputStream(dataFile.getAbsoluteFile()); DataOutputStream outputStream = new DataOutputStream(fw); /* * 4dc968ff0ee35c209572d4777b721587d36fa7b21bdc56b74a3dc0783e7b9518afbfa200a8284bf36e8e4b55b35f427593d849676da0d1555d8360fb5f07fea2 * and the (different by two bits) * 4dc968ff0ee35c209572d4777b721587d36fa7b21bdc56b74a3dc0783e7b9518afbfa202a8284bf36e8e4b55b35f427593d849676da0d1d55d8360fb5f07fea2 * both have MD5 hash 008ee33a9d58b51cfeb425b0959121c9 */ String[] md5collision = { "4dc968ff0ee35c209572d4777b721587d36fa7b21bdc56b74a3dc0783e7b9518afbfa200a8284bf36e8e4b55b35f427593d849676da0d1555d8360fb5f07fea2", "4dc968ff0ee35c209572d4777b721587d36fa7b21bdc56b74a3dc0783e7b9518afbfa202a8284bf36e8e4b55b35f427593d849676da0d1d55d8360fb5f07fea2" }; outputStream.writeShort(2); for (int i = 0; i < 2; i++) { String input = md5collision[i]; byte[] hexInput = ByteUtils.fromHexString(input); outputStream.writeInt(hexInput.length); outputStream.writeInt(hexInput.length); outputStream.write(hexInput); outputStream.write(hexInput); } outputStream.close(); fw.close(); File indexFile = new File(fileDir + File.separator + "0_0_0.index"); indexFile.createNewFile(); indexFile.deleteOnExit(); ChunkedFileSet fileSet = new ChunkedFileSet( fileDir, getTempStrategy(), NODE_ID, VoldemortConfig.DEFAULT_RO_MAX_VALUE_BUFFER_ALLOCATION_SIZE); for (int i = 0; i < 2; i++) { String input = md5collision[i]; byte[] hexInput = ByteUtils.fromHexString(input); byte[] hexValue = fileSet.readValue(hexInput, 0, 0); Assert.assertArrayEquals(hexInput, hexValue); } }
@Ignore @Test public void testHintedHandoff() throws Exception { Set<Integer> failedNodes = getFailedNodes(); Multimap<Integer, ByteArray> failedKeys = populateStore(failedNodes); Map<ByteArray, byte[]> dataInSlops = Maps.newHashMap(); Set<ByteArray> slopKeys = makeSlopKeys(failedKeys, Slop.Operation.PUT); for (Store<ByteArray, Slop, byte[]> slopStore : slopStores.values()) { Map<ByteArray, List<Versioned<Slop>>> res = slopStore.getAll(slopKeys, null); for (Map.Entry<ByteArray, List<Versioned<Slop>>> entry : res.entrySet()) { Slop slop = entry.getValue().get(0).getValue(); dataInSlops.put(slop.getKey(), slop.getValue()); if (logger.isTraceEnabled()) logger.trace(slop); } } for (Map.Entry<Integer, ByteArray> failedKey : failedKeys.entries()) { byte[] expected = keyValues.get(failedKey.getValue()).get(); byte[] actual = dataInSlops.get(failedKey.getValue()); assertNotNull("data should be stored in the slop", actual); assertEquals("correct should be stored in slop", 0, ByteUtils.compare(actual, expected)); } }
public VoldemortOperation(byte[] bytes) { if (bytes == null || bytes.length <= 1) throw new SerializationException("Not enough bytes to serialize"); DataInputStream inputStream = new DataInputStream(new ByteArrayInputStream(bytes)); try { this.opCode = inputStream.readByte(); switch (opCode) { case VoldemortOpCode.GET_OP_CODE: this.version = null; this.key = inputStream.readUTF(); this.value = null; break; case VoldemortOpCode.PUT_OP_CODE: this.version = new VectorClock(bytes, 1); this.key = inputStream.readUTF(); int valueSize = inputStream.readInt(); this.value = new byte[valueSize]; ByteUtils.read(inputStream, this.value); break; case VoldemortOpCode.DELETE_OP_CODE: this.version = new VectorClock(bytes, 1); this.key = inputStream.readUTF(); this.value = null; break; default: throw new SerializationException("Unknown opcode: " + bytes[0]); } } catch (IOException e) { throw new SerializationException(e); } }
private byte[] readBytes(DataInputStream stream) throws IOException { int size = stream.readShort(); if (size < 0) return null; byte[] bytes = new byte[size]; ByteUtils.read(stream, bytes); return bytes; }
public VAdminProto.GetMetadataResponse handleGetMetadata(VAdminProto.GetMetadataRequest request) { VAdminProto.GetMetadataResponse.Builder response = VAdminProto.GetMetadataResponse.newBuilder(); try { ByteArray key = ProtoUtils.decodeBytes(request.getKey()); String keyString = ByteUtils.getString(key.get(), "UTF-8"); if (MetadataStore.METADATA_KEYS.contains(keyString)) { List<Versioned<byte[]>> versionedList = metadataStore.get(key); int size = (versionedList.size() > 0) ? 1 : 0; if (size > 0) { Versioned<byte[]> versioned = versionedList.get(0); response.setVersion(ProtoUtils.encodeVersioned(versioned)); } } else { throw new VoldemortException( "Metadata Key passed " + keyString + " is not handled yet ..."); } } catch (VoldemortException e) { response.setError(ProtoUtils.encodeError(errorCodeMapper, e)); logger.error("handleGetMetadata failed for request(" + request.toString() + ")", e); } return response.build(); }
/** * helper function to auto update version and put() * * @param key * @param value */ private void incrementVersionAndPut(MetadataStore metadataStore, String keyString, Object value) { ByteArray key = new ByteArray(ByteUtils.getBytes(keyString, "UTF-8")); VectorClock current = (VectorClock) metadataStore.getVersions(key).get(0); metadataStore.put( keyString, new Versioned<Object>(value, current.incremented(0, System.currentTimeMillis()))); }
private Set<ByteArray> makeSlopKeys( Multimap<Integer, ByteArray> failedKeys, Slop.Operation operation) { Set<ByteArray> slopKeys = Sets.newHashSet(); for (Map.Entry<Integer, ByteArray> entry : failedKeys.entries()) { byte[] opCode = new byte[] {operation.getOpCode()}; byte[] spacer = new byte[] {(byte) 0}; byte[] storeName = ByteUtils.getBytes(STORE_NAME, "UTF-8"); byte[] nodeIdBytes = new byte[ByteUtils.SIZE_OF_INT]; ByteUtils.writeInt(nodeIdBytes, entry.getKey(), 0); ByteArray slopKey = new ByteArray( ByteUtils.cat( opCode, spacer, storeName, spacer, nodeIdBytes, spacer, entry.getValue().get())); slopKeys.add(slopKey); } return slopKeys; }
private void checkValues(Versioned<byte[]> value, List<Versioned<byte[]>> list, ByteArray key) { assertEquals("should return exactly one value ", 1, list.size()); assertEquals( "should return the last saved version", value.getVersion(), list.get(0).getVersion()); assertEquals( "should return the last saved value (key:" + ByteUtils.getString(key.get(), "UTF-8") + ")", new String(value.getValue()), new String(list.get(0).getValue())); }
public VAdminProto.UpdateMetadataResponse handleUpdateMetadata( VAdminProto.UpdateMetadataRequest request) { VAdminProto.UpdateMetadataResponse.Builder response = VAdminProto.UpdateMetadataResponse.newBuilder(); try { ByteArray key = ProtoUtils.decodeBytes(request.getKey()); String keyString = ByteUtils.getString(key.get(), "UTF-8"); if (MetadataStore.METADATA_KEYS.contains(keyString)) { Versioned<byte[]> versionedValue = ProtoUtils.decodeVersioned(request.getVersioned()); metadataStore.put(new ByteArray(ByteUtils.getBytes(keyString, "UTF-8")), versionedValue); } } catch (VoldemortException e) { response.setError(ProtoUtils.encodeError(errorCodeMapper, e)); logger.error("handleUpdateMetadata failed for request(" + request.toString() + ")", e); } return response.build(); }
@JmxOperation( description = "Clean all rebalancing server/cluster states from this node.", impact = MBeanOperationInfo.ACTION) public void cleanAllRebalancingState() { for (String key : OPTIONAL_KEYS) { if (!key.equals(NODE_ID_KEY)) innerStore.delete(key, getVersions(new ByteArray(ByteUtils.getBytes(key, "UTF-8"))).get(0)); } init(getNodeId()); }
public byte[] getValidValue(ByteArray key) { String keyString = ByteUtils.getString(key.get(), "UTF-8"); if (MetadataStore.CLUSTER_KEY.equals(keyString)) { return ByteUtils.getBytes( new ClusterMapper().writeCluster(ServerTestUtils.getLocalCluster(1)), "UTF-8"); } else if (MetadataStore.STORES_KEY.equals(keyString)) { return ByteUtils.getBytes( new StoreDefinitionsMapper().writeStoreList(ServerTestUtils.getStoreDefs(1)), "UTF-8"); } else if (MetadataStore.SERVER_STATE_KEY.equals(keyString)) { int i = (int) (Math.random() * VoldemortState.values().length); return ByteUtils.getBytes(VoldemortState.values()[i].toString(), "UTF-8"); } else if (MetadataStore.REBALANCING_STEAL_INFO.equals(keyString)) { int size = (int) (Math.random() * 10); List<Integer> partition = new ArrayList<Integer>(); for (int i = 0; i < size; i++) { partition.add((int) Math.random() * 10); } return ByteUtils.getBytes( new RebalancerState( Arrays.asList( new RebalancePartitionsInfo( 0, (int) Math.random() * 5, partition, new ArrayList<Integer>(0), new ArrayList<Integer>(0), Arrays.asList("testStoreName"), new HashMap<String, String>(), new HashMap<String, String>(), (int) Math.random() * 3))) .toJsonString(), "UTF-8"); } else if (MetadataStore.GRANDFATHERING_INFO.equals(keyString)) { int size = (int) (Math.random() * 10); List<Integer> partition = new ArrayList<Integer>(); for (int i = 0; i < size; i++) { partition.add((int) Math.random() * 10); } return ByteUtils.getBytes( new GrandfatherState( Arrays.asList( new RebalancePartitionsInfo( 0, (int) Math.random() * 5, partition, new ArrayList<Integer>(0), new ArrayList<Integer>(0), Arrays.asList("testStoreName"), new HashMap<String, String>(), new HashMap<String, String>(), (int) Math.random() * 3))) .toJsonString(), "UTF-8"); } throw new RuntimeException("Unhandled key:" + keyString + " passed"); }
public byte[] getValidValue(ByteArray key) { String keyString = ByteUtils.getString(key.get(), "UTF-8"); if (MetadataStore.CLUSTER_KEY.equals(keyString) || MetadataStore.REBALANCING_SOURCE_CLUSTER_XML.equals(keyString)) { return ByteUtils.getBytes( new ClusterMapper().writeCluster(ServerTestUtils.getLocalCluster(1)), "UTF-8"); } else if (MetadataStore.STORES_KEY.equals(keyString)) { return ByteUtils.getBytes( new StoreDefinitionsMapper().writeStoreList(ServerTestUtils.getStoreDefs(1)), "UTF-8"); } else if (MetadataStore.SERVER_STATE_KEY.equals(keyString)) { int i = (int) (Math.random() * VoldemortState.values().length); return ByteUtils.getBytes(VoldemortState.values()[i].toString(), "UTF-8"); } else if (MetadataStore.REBALANCING_STEAL_INFO.equals(keyString)) { int size = (int) (Math.random() * 10) + 1; List<Integer> partition = new ArrayList<Integer>(); for (int i = 0; i < size; i++) { partition.add((int) Math.random() * 10); } List<Integer> partitionIds = partition; HashMap<String, List<Integer>> storeToReplicaToPartitionList = Maps.newHashMap(); storeToReplicaToPartitionList.put("test", partitionIds); return ByteUtils.getBytes( new RebalancerState( Arrays.asList( new RebalanceTaskInfo( 0, (int) Math.random() * 5, storeToReplicaToPartitionList, ServerTestUtils.getLocalCluster(1)))) .toJsonString(), "UTF-8"); } throw new RuntimeException("Unhandled key:" + keyString + " passed"); }
// We use a generated class for the exercise. public void testRoundtripAvroWithHandShakeRequest() { String className = "java=org.apache.avro.ipc.HandshakeRequest"; HandshakeRequest req = new HandshakeRequest(); // set a few values to avoid NPEs req.clientHash = new MD5(); req.clientProtocol = new Utf8(""); req.serverHash = new MD5(); AvroSpecificSerializer<HandshakeRequest> serializer = new AvroSpecificSerializer<HandshakeRequest>(className); byte[] bytes = serializer.toBytes(req); byte[] bytes2 = serializer.toBytes(req); assertEquals(ByteUtils.compare(bytes, bytes2), 0); assertTrue(serializer.toObject(bytes).equals(req)); assertTrue(serializer.toObject(bytes2).equals(req)); }
private <R> String formatNodeValuesFromGetVersions( List<Response<ByteArray, List<Version>>> results) { // log all retrieved values StringBuilder builder = new StringBuilder(); builder.append("{"); for (Response<ByteArray, List<Version>> r : results) { builder.append( "(nodeId=" + r.getNode().getId() + ", key=" + ByteUtils.toHexString(r.getKey().get()) + ", retrieved= " + r.getValue() + "), "); } builder.append("}"); return builder.toString(); }
/** * Given a filesystem, path and buffer-size, read the file contents and presents it as a string * * @param fs Underlying filesystem * @param path The file to read * @param bufferSize The buffer size to use for reading * @return The contents of the file as a string * @throws IOException */ public static String readFileContents(FileSystem fs, Path path, int bufferSize) throws IOException { if (bufferSize <= 0) return new String(); FSDataInputStream input = fs.open(path); byte[] buffer = new byte[bufferSize]; ByteArrayOutputStream stream = new ByteArrayOutputStream(); while (true) { int read = input.read(buffer); if (read < 0) { break; } else { buffer = ByteUtils.copy(buffer, 0, read); } stream.write(buffer); } return new String(stream.toByteArray()); }
/** * Create the voldemort key and value from the input key and value and map it out for each of the * responsible voldemort nodes * * <p>The output key is the md5 of the serialized key returned by makeKey(). The output value is * the node_id & partition_id of the responsible node followed by serialized value returned by * makeValue() OR if we have setKeys flag on the serialized key and serialized value */ public void map( K key, V value, OutputCollector<BytesWritable, BytesWritable> output, Reporter reporter) throws IOException { byte[] keyBytes = keySerializer.toBytes(makeKey(key, value)); byte[] valBytes = valueSerializer.toBytes(makeValue(key, value)); // Compress key and values if required if (keySerializerDefinition.hasCompression()) { keyBytes = keyCompressor.deflate(keyBytes); } if (valueSerializerDefinition.hasCompression()) { valBytes = valueCompressor.deflate(valBytes); } // Get the output byte arrays ready to populate byte[] outputValue; BytesWritable outputKey; // Leave initial offset for (a) node id (b) partition id // since they are written later int offsetTillNow = 2 * ByteUtils.SIZE_OF_INT; // In order - 4 ( for node id ) + 4 ( partition id ) + 1 ( replica // type - primary | secondary | tertiary... ] + 4 ( key size ) // size ) + 4 ( value size ) + key + value outputValue = new byte [valBytes.length + keyBytes.length + ByteUtils.SIZE_OF_BYTE + 4 * ByteUtils.SIZE_OF_INT]; // Write key length - leave byte for replica type offsetTillNow += ByteUtils.SIZE_OF_BYTE; ByteUtils.writeInt(outputValue, keyBytes.length, offsetTillNow); // Write value length offsetTillNow += ByteUtils.SIZE_OF_INT; ByteUtils.writeInt(outputValue, valBytes.length, offsetTillNow); // Write key offsetTillNow += ByteUtils.SIZE_OF_INT; System.arraycopy(keyBytes, 0, outputValue, offsetTillNow, keyBytes.length); // Write value offsetTillNow += keyBytes.length; System.arraycopy(valBytes, 0, outputValue, offsetTillNow, valBytes.length); // Generate MR key - 16 byte md5 outputKey = new BytesWritable(keyBytes); // Generate partition and node list this key is destined for List<Integer> partitionList = routingStrategy.getPartitionList(keyBytes); Node[] partitionToNode = routingStrategy.getPartitionToNode(); for (int replicaType = 0; replicaType < partitionList.size(); replicaType++) { // Node id ByteUtils.writeInt(outputValue, partitionToNode[partitionList.get(replicaType)].getId(), 0); // Primary partition id ByteUtils.writeInt(outputValue, partitionList.get(0), ByteUtils.SIZE_OF_INT); // Replica type ByteUtils.writeBytes( outputValue, replicaType, 2 * ByteUtils.SIZE_OF_INT, ByteUtils.SIZE_OF_BYTE); BytesWritable outputVal = new BytesWritable(outputValue); output.collect(outputKey, outputVal); } }
@Override public List<Version> getVersions(final ByteArray key) { StoreUtils.assertValidKey(key); long startTimeMs = -1; long startTimeNs = -1; if (logger.isDebugEnabled()) { startTimeMs = System.currentTimeMillis(); startTimeNs = System.nanoTime(); } BasicPipelineData<List<Version>> pipelineData = new BasicPipelineData<List<Version>>(); if (zoneRoutingEnabled) pipelineData.setZonesRequired(storeDef.getZoneCountReads()); else pipelineData.setZonesRequired(null); pipelineData.setStats(stats); Pipeline pipeline = new Pipeline( Operation.GET_VERSIONS, timeoutConfig.getOperationTimeout(VoldemortOpCode.GET_VERSION_OP_CODE), TimeUnit.MILLISECONDS); StoreRequest<List<Version>> blockingStoreRequest = new StoreRequest<List<Version>>() { @Override public List<Version> request(Store<ByteArray, byte[], byte[]> store) { return store.getVersions(key); } }; if (zoneAffinity.isGetVersionsOpZoneAffinityEnabled()) { pipeline.addEventAction( Event.STARTED, new ConfigureNodesLocalZoneOnly<List<Version>, BasicPipelineData<List<Version>>>( pipelineData, Event.CONFIGURED, failureDetector, storeDef.getRequiredReads(), routingStrategy, key, clientZone)); } else { pipeline.addEventAction( Event.STARTED, new ConfigureNodes<List<Version>, BasicPipelineData<List<Version>>>( pipelineData, Event.CONFIGURED, failureDetector, storeDef.getRequiredReads(), routingStrategy, key, clientZone)); } pipeline.addEventAction( Event.CONFIGURED, new PerformParallelRequests<List<Version>, BasicPipelineData<List<Version>>>( pipelineData, Event.COMPLETED, key, null, failureDetector, storeDef.getPreferredReads(), storeDef.getRequiredReads(), timeoutConfig.getOperationTimeout(VoldemortOpCode.GET_VERSION_OP_CODE), nonblockingStores, Event.INSUFFICIENT_SUCCESSES, Event.INSUFFICIENT_ZONES)); pipeline.addEventAction( Event.INSUFFICIENT_SUCCESSES, new PerformSerialRequests<List<Version>, BasicPipelineData<List<Version>>>( pipelineData, Event.COMPLETED, key, failureDetector, innerStores, storeDef.getPreferredReads(), storeDef.getRequiredReads(), blockingStoreRequest, null)); if (zoneRoutingEnabled) pipeline.addEventAction( Event.INSUFFICIENT_ZONES, new PerformZoneSerialRequests<List<Version>, BasicPipelineData<List<Version>>>( pipelineData, Event.COMPLETED, key, failureDetector, innerStores, blockingStoreRequest)); pipeline.addEvent(Event.STARTED); if (logger.isDebugEnabled()) { logger.debug( "Operation " + pipeline.getOperation().getSimpleName() + " Key " + ByteUtils.toHexString(key.get())); } try { pipeline.execute(); } catch (VoldemortException e) { stats.reportException(e); throw e; } if (pipelineData.getFatalError() != null) throw pipelineData.getFatalError(); List<Version> results = new ArrayList<Version>(); for (Response<ByteArray, List<Version>> response : pipelineData.getResponses()) results.addAll(response.getValue()); if (logger.isDebugEnabled()) { logger.debug( "Finished " + pipeline.getOperation().getSimpleName() + " for key " + ByteUtils.toHexString(key.get()) + " keyRef: " + System.identityHashCode(key) + "; started at " + startTimeMs + " took " + (System.nanoTime() - startTimeNs) + " values: " + formatNodeValuesFromGetVersions(pipelineData.getResponses())); } return results; }
public ByteArray getValidKey() { int i = (int) (Math.random() * TEST_KEYS.size()); String key = TEST_KEYS.get(i); return new ByteArray(ByteUtils.getBytes(key, "UTF-8")); }
protected boolean delete(final ByteArray key, final Version version, long deleteOpTimeout) throws VoldemortException { StoreUtils.assertValidKey(key); long startTimeMs = -1; long startTimeNs = -1; if (logger.isDebugEnabled()) { startTimeMs = System.currentTimeMillis(); startTimeNs = System.nanoTime(); } BasicPipelineData<Boolean> pipelineData = new BasicPipelineData<Boolean>(); if (zoneRoutingEnabled) pipelineData.setZonesRequired(storeDef.getZoneCountWrites()); else pipelineData.setZonesRequired(null); pipelineData.setStoreName(getName()); pipelineData.setStats(stats); Pipeline pipeline = new Pipeline(Operation.DELETE, deleteOpTimeout, TimeUnit.MILLISECONDS); pipeline.setEnableHintedHandoff(isHintedHandoffEnabled()); HintedHandoff hintedHandoff = null; if (isHintedHandoffEnabled()) hintedHandoff = new HintedHandoff( failureDetector, slopStores, nonblockingSlopStores, handoffStrategy, pipelineData.getFailedNodes(), deleteOpTimeout); pipeline.addEventAction( Event.STARTED, new ConfigureNodes<Boolean, BasicPipelineData<Boolean>>( pipelineData, Event.CONFIGURED, failureDetector, storeDef.getRequiredWrites(), routingStrategy, key, clientZone)); pipeline.addEventAction( Event.CONFIGURED, new PerformParallelDeleteRequests<Boolean, BasicPipelineData<Boolean>>( pipelineData, isHintedHandoffEnabled() ? Event.RESPONSES_RECEIVED : Event.COMPLETED, key, failureDetector, storeDef.getPreferredWrites(), storeDef.getRequiredWrites(), deleteOpTimeout, nonblockingStores, hintedHandoff, version)); if (isHintedHandoffEnabled()) { pipeline.addEventAction( Event.RESPONSES_RECEIVED, new PerformDeleteHintedHandoff( pipelineData, Event.COMPLETED, key, version, hintedHandoff)); pipeline.addEventAction( Event.ABORTED, new PerformDeleteHintedHandoff(pipelineData, Event.ERROR, key, version, hintedHandoff)); } pipeline.addEvent(Event.STARTED); if (logger.isDebugEnabled()) { logger.debug( "Operation " + pipeline.getOperation().getSimpleName() + " Key " + ByteUtils.toHexString(key.get())); } try { pipeline.execute(); } catch (VoldemortException e) { stats.reportException(e); throw e; } if (logger.isDebugEnabled()) { logger.debug( "Finished " + pipeline.getOperation().getSimpleName() + " for key " + ByteUtils.toHexString(key.get()) + " keyRef: " + System.identityHashCode(key) + "; started at " + startTimeMs + " took " + (System.nanoTime() - startTimeNs)); } if (pipelineData.getFatalError() != null) throw pipelineData.getFatalError(); for (Response<ByteArray, Boolean> response : pipelineData.getResponses()) { if (response.getValue().booleanValue()) return true; } return false; }
@Test public void testHadoopBuild() throws Exception { // create test data Map<String, String> values = new HashMap<String, String>(); File testDir = TestUtils.createTempDir(); File tempDir = new File(testDir, "temp"), tempDir2 = new File(testDir, "temp2"); File outputDir = new File(testDir, "output"), outputDir2 = new File(testDir, "output2"); File storeDir = TestUtils.createTempDir(testDir); for (int i = 0; i < 200; i++) values.put(Integer.toString(i), Integer.toBinaryString(i)); // write test data to text file File inputFile = File.createTempFile("input", ".txt", testDir); inputFile.deleteOnExit(); StringBuilder contents = new StringBuilder(); for (Map.Entry<String, String> entry : values.entrySet()) contents.append(entry.getKey() + "\t" + entry.getValue() + "\n"); FileUtils.writeStringToFile(inputFile, contents.toString()); String storeName = "test"; SerializerDefinition serDef = new SerializerDefinition("string"); Cluster cluster = ServerTestUtils.getLocalCluster(1); // Test backwards compatibility StoreDefinition def = new StoreDefinitionBuilder() .setName(storeName) .setType(ReadOnlyStorageConfiguration.TYPE_NAME) .setKeySerializer(serDef) .setValueSerializer(serDef) .setRoutingPolicy(RoutingTier.CLIENT) .setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY) .setReplicationFactor(1) .setPreferredReads(1) .setRequiredReads(1) .setPreferredWrites(1) .setRequiredWrites(1) .build(); HadoopStoreBuilder builder = new HadoopStoreBuilder( new Configuration(), TextStoreMapper.class, TextInputFormat.class, cluster, def, 64 * 1024, new Path(tempDir2.getAbsolutePath()), new Path(outputDir2.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, saveKeys, false); builder.build(); builder = new HadoopStoreBuilder( new Configuration(), TextStoreMapper.class, TextInputFormat.class, cluster, def, 64 * 1024, new Path(tempDir.getAbsolutePath()), new Path(outputDir.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, saveKeys, false); builder.build(); // Check if checkSum is generated in outputDir File nodeFile = new File(outputDir, "node-0"); // Check if metadata file exists File metadataFile = new File(nodeFile, ".metadata"); Assert.assertTrue(metadataFile.exists()); ReadOnlyStorageMetadata metadata = new ReadOnlyStorageMetadata(metadataFile); if (saveKeys) Assert.assertEquals( metadata.get(ReadOnlyStorageMetadata.FORMAT), ReadOnlyStorageFormat.READONLY_V2.getCode()); else Assert.assertEquals( metadata.get(ReadOnlyStorageMetadata.FORMAT), ReadOnlyStorageFormat.READONLY_V1.getCode()); Assert.assertEquals( metadata.get(ReadOnlyStorageMetadata.CHECKSUM_TYPE), CheckSum.toString(CheckSumType.MD5)); // Check contents of checkSum file byte[] md5 = Hex.decodeHex(((String) metadata.get(ReadOnlyStorageMetadata.CHECKSUM)).toCharArray()); byte[] checkSumBytes = CheckSumTests.calculateCheckSum(nodeFile.listFiles(), CheckSumType.MD5); Assert.assertEquals(0, ByteUtils.compare(checkSumBytes, md5)); // check if fetching works HdfsFetcher fetcher = new HdfsFetcher(); // Fetch to version directory File versionDir = new File(storeDir, "version-0"); fetcher.fetch(nodeFile.getAbsolutePath(), versionDir.getAbsolutePath()); Assert.assertTrue(versionDir.exists()); // open store @SuppressWarnings("unchecked") Serializer<Object> serializer = (Serializer<Object>) new DefaultSerializerFactory().getSerializer(serDef); ReadOnlyStorageEngine engine = new ReadOnlyStorageEngine( storeName, searchStrategy, new RoutingStrategyFactory().updateRoutingStrategy(def, cluster), 0, storeDir, 1); Store<Object, Object, Object> store = SerializingStore.wrap(engine, serializer, serializer, serializer); // check values for (Map.Entry<String, String> entry : values.entrySet()) { List<Versioned<Object>> found = store.get(entry.getKey(), null); Assert.assertEquals("Incorrect number of results", 1, found.size()); Assert.assertEquals(entry.getValue(), found.get(0).getValue()); } // also check the iterator - first key iterator... try { ClosableIterator<ByteArray> keyIterator = engine.keys(); if (!saveKeys) { fail("Should have thrown an exception since this RO format does not support iterators"); } int numElements = 0; while (keyIterator.hasNext()) { Assert.assertTrue(values.containsKey(serializer.toObject(keyIterator.next().get()))); numElements++; } Assert.assertEquals(numElements, values.size()); } catch (UnsupportedOperationException e) { if (saveKeys) { fail("Should not have thrown an exception since this RO format does support iterators"); } } // ... and entry iterator try { ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entryIterator = engine.entries(); if (!saveKeys) { fail("Should have thrown an exception since this RO format does not support iterators"); } int numElements = 0; while (entryIterator.hasNext()) { Pair<ByteArray, Versioned<byte[]>> entry = entryIterator.next(); Assert.assertEquals( values.get(serializer.toObject(entry.getFirst().get())), serializer.toObject(entry.getSecond().getValue())); numElements++; } Assert.assertEquals(numElements, values.size()); } catch (UnsupportedOperationException e) { if (saveKeys) { fail("Should not have thrown an exception since this RO format does support iterators"); } } }
public VAdminProto.AddStoreResponse handleAddStore(VAdminProto.AddStoreRequest request) { VAdminProto.AddStoreResponse.Builder response = VAdminProto.AddStoreResponse.newBuilder(); // don't try to add a store in the middle of rebalancing if (metadataStore .getServerState() .equals(MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER) || metadataStore .getServerState() .equals(MetadataStore.VoldemortState.REBALANCING_CLUSTER)) { response.setError( ProtoUtils.encodeError( errorCodeMapper, new VoldemortException("Rebalancing in progress"))); return response.build(); } try { // adding a store requires decoding the passed in store string StoreDefinitionsMapper mapper = new StoreDefinitionsMapper(); StoreDefinition def = mapper.readStore(new StringReader(request.getStoreDefinition())); synchronized (lock) { // only allow a single store to be created at a time. We'll see concurrent errors when // writing the // stores.xml file out otherwise. (see ConfigurationStorageEngine.put for details) if (!storeRepository.hasLocalStore(def.getName())) { // open the store storageService.openStore(def); // update stores list in metadata store (this also has the // effect of updating the stores.xml file) List<StoreDefinition> currentStoreDefs; List<Versioned<byte[]>> v = metadataStore.get(MetadataStore.STORES_KEY); if (((v.size() > 0) ? 1 : 0) > 0) { Versioned<byte[]> currentValue = v.get(0); currentStoreDefs = mapper.readStoreList( new StringReader(ByteUtils.getString(currentValue.getValue(), "UTF-8"))); } else { currentStoreDefs = Lists.newArrayList(); } currentStoreDefs.add(def); try { metadataStore.put(MetadataStore.STORES_KEY, currentStoreDefs); } catch (Exception e) { throw new VoldemortException(e); } } else { throw new StoreOperationFailureException( String.format("Store '%s' already exists on this server", def.getName())); } } } catch (VoldemortException e) { response.setError(ProtoUtils.encodeError(errorCodeMapper, e)); logger.error("handleAddStore failed for request(" + request.toString() + ")", e); } return response.build(); }
@Override protected Pair<ByteArray, Versioned<byte[]>> get(DatabaseEntry key, DatabaseEntry value) { VectorClock clock = new VectorClock(value.getData()); byte[] bytes = ByteUtils.copy(value.getData(), clock.sizeInBytes(), value.getData().length); return Pair.create(new ByteArray(key.getData()), new Versioned<byte[]>(bytes, clock)); }
public Map<ByteArray, List<Versioned<byte[]>>> getAll( Iterable<ByteArray> keys, Map<ByteArray, byte[]> transforms, long getAllOpTimeoutInMs) throws VoldemortException { StoreUtils.assertValidKeys(keys); long startTimeMs = -1; long startTimeNs = -1; if (logger.isDebugEnabled()) { startTimeMs = System.currentTimeMillis(); startTimeNs = System.nanoTime(); } boolean allowReadRepair = repairReads && (transforms == null || transforms.size() == 0); GetAllPipelineData pipelineData = new GetAllPipelineData(); if (zoneRoutingEnabled) pipelineData.setZonesRequired(storeDef.getZoneCountReads()); else pipelineData.setZonesRequired(null); pipelineData.setStats(stats); Pipeline pipeline = new Pipeline(Operation.GET_ALL, getAllOpTimeoutInMs, TimeUnit.MILLISECONDS); pipeline.addEventAction( Event.STARTED, new GetAllConfigureNodes( pipelineData, Event.CONFIGURED, failureDetector, storeDef.getPreferredReads(), storeDef.getRequiredReads(), routingStrategy, keys, transforms, clientZone, zoneAffinity)); pipeline.addEventAction( Event.CONFIGURED, new PerformParallelGetAllRequests( pipelineData, Event.INSUFFICIENT_SUCCESSES, failureDetector, getAllOpTimeoutInMs, nonblockingStores)); pipeline.addEventAction( Event.INSUFFICIENT_SUCCESSES, new PerformSerialGetAllRequests( pipelineData, allowReadRepair ? Event.RESPONSES_RECEIVED : Event.COMPLETED, keys, failureDetector, innerStores, storeDef.getPreferredReads(), storeDef.getRequiredReads(), timeoutConfig.isPartialGetAllAllowed())); if (allowReadRepair) pipeline.addEventAction( Event.RESPONSES_RECEIVED, new GetAllReadRepair( pipelineData, Event.COMPLETED, storeDef.getPreferredReads(), getAllOpTimeoutInMs, nonblockingStores, readRepairer)); pipeline.addEvent(Event.STARTED); if (logger.isDebugEnabled()) { StringBuilder keyStr = new StringBuilder(); for (ByteArray key : keys) { keyStr.append(ByteUtils.toHexString(key.get()) + ","); } logger.debug( "Operation " + pipeline.getOperation().getSimpleName() + " Keys " + keyStr.toString()); } try { pipeline.execute(); } catch (VoldemortException e) { stats.reportException(e); throw e; } if (pipelineData.getFatalError() != null) throw pipelineData.getFatalError(); if (logger.isDebugEnabled()) { logger.debug( "Finished " + pipeline.getOperation().getSimpleName() + "for keys " + ByteArray.toHexStrings(keys) + " keyRef: " + System.identityHashCode(keys) + "; started at " + startTimeMs + " took " + (System.nanoTime() - startTimeNs) + " values: " + formatNodeValuesFromGetAll(pipelineData.getResponses())); } return pipelineData.getResult(); }
public StreamRequestHandler handleRequest( final DataInputStream inputStream, final DataOutputStream outputStream) throws IOException { // Another protocol buffers bug here, temp. work around VoldemortAdminRequest.Builder request = VoldemortAdminRequest.newBuilder(); int size = inputStream.readInt(); if (logger.isTraceEnabled()) logger.trace("In handleRequest, request specified size of " + size + " bytes"); if (size < 0) throw new IOException("In handleRequest, request specified size of " + size + " bytes"); byte[] input = new byte[size]; ByteUtils.read(inputStream, input); request.mergeFrom(input); switch (request.getType()) { case GET_METADATA: ProtoUtils.writeMessage(outputStream, handleGetMetadata(request.getGetMetadata())); break; case UPDATE_METADATA: ProtoUtils.writeMessage(outputStream, handleUpdateMetadata(request.getUpdateMetadata())); break; case DELETE_PARTITION_ENTRIES: ProtoUtils.writeMessage( outputStream, handleDeletePartitionEntries(request.getDeletePartitionEntries())); break; case FETCH_PARTITION_ENTRIES: return handleFetchPartitionEntries(request.getFetchPartitionEntries()); case UPDATE_PARTITION_ENTRIES: return handleUpdatePartitionEntries(request.getUpdatePartitionEntries()); case INITIATE_FETCH_AND_UPDATE: ProtoUtils.writeMessage( outputStream, handleFetchAndUpdate(request.getInitiateFetchAndUpdate())); break; case ASYNC_OPERATION_STATUS: ProtoUtils.writeMessage(outputStream, handleAsyncStatus(request.getAsyncOperationStatus())); break; case INITIATE_REBALANCE_NODE: ProtoUtils.writeMessage( outputStream, handleRebalanceNode(request.getInitiateRebalanceNode())); break; case ASYNC_OPERATION_LIST: ProtoUtils.writeMessage( outputStream, handleAsyncOperationList(request.getAsyncOperationList())); break; case ASYNC_OPERATION_STOP: ProtoUtils.writeMessage( outputStream, handleAsyncOperationStop(request.getAsyncOperationStop())); break; case TRUNCATE_ENTRIES: ProtoUtils.writeMessage(outputStream, handleTruncateEntries(request.getTruncateEntries())); break; case ADD_STORE: ProtoUtils.writeMessage(outputStream, handleAddStore(request.getAddStore())); break; default: throw new VoldemortException("Unkown operation " + request.getType()); } return null; }
public List<Versioned<byte[]>> get(String key) throws VoldemortException { return get(new ByteArray(ByteUtils.getBytes(key, "UTF-8"))); }
public void put( ByteArray key, Versioned<byte[]> versioned, byte[] transforms, long putOpTimeoutInMs) throws VoldemortException { long startTimeMs = -1; long startTimeNs = -1; if (logger.isDebugEnabled()) { startTimeMs = System.currentTimeMillis(); startTimeNs = System.nanoTime(); } StoreUtils.assertValidKey(key); PutPipelineData pipelineData = new PutPipelineData(); if (zoneRoutingEnabled) pipelineData.setZonesRequired(storeDef.getZoneCountWrites()); else pipelineData.setZonesRequired(null); pipelineData.setStartTimeNs(System.nanoTime()); pipelineData.setStoreName(getName()); pipelineData.setStats(stats); Pipeline pipeline = new Pipeline(Operation.PUT, putOpTimeoutInMs, TimeUnit.MILLISECONDS); pipeline.setEnableHintedHandoff(isHintedHandoffEnabled()); HintedHandoff hintedHandoff = null; // Get the correct type of configure nodes action depending on the store // requirements AbstractConfigureNodes<ByteArray, Void, PutPipelineData> configureNodes = makeNodeConfigurationForPut(pipelineData, key); if (isHintedHandoffEnabled()) hintedHandoff = new HintedHandoff( failureDetector, slopStores, nonblockingSlopStores, handoffStrategy, pipelineData.getFailedNodes(), putOpTimeoutInMs); pipeline.addEventAction(Event.STARTED, configureNodes); pipeline.addEventAction( Event.CONFIGURED, new PerformSerialPutRequests( pipelineData, isHintedHandoffEnabled() ? Event.RESPONSES_RECEIVED : Event.COMPLETED, key, transforms, failureDetector, innerStores, storeDef.getRequiredWrites(), versioned, time, Event.MASTER_DETERMINED)); pipeline.addEventAction( Event.MASTER_DETERMINED, new PerformParallelPutRequests( pipelineData, Event.RESPONSES_RECEIVED, key, transforms, failureDetector, storeDef.getPreferredWrites(), storeDef.getRequiredWrites(), putOpTimeoutInMs, nonblockingStores, hintedHandoff)); if (isHintedHandoffEnabled()) { pipeline.addEventAction( Event.ABORTED, new PerformPutHintedHandoff( pipelineData, Event.ERROR, key, versioned, transforms, hintedHandoff, time)); pipeline.addEventAction( Event.RESPONSES_RECEIVED, new PerformPutHintedHandoff( pipelineData, Event.HANDOFF_FINISHED, key, versioned, transforms, hintedHandoff, time)); pipeline.addEventAction( Event.HANDOFF_FINISHED, new IncrementClock(pipelineData, Event.COMPLETED, versioned, time)); } else pipeline.addEventAction( Event.RESPONSES_RECEIVED, new IncrementClock(pipelineData, Event.COMPLETED, versioned, time)); pipeline.addEvent(Event.STARTED); if (logger.isDebugEnabled()) { logger.debug( "Operation " + pipeline.getOperation().getSimpleName() + " Key " + ByteUtils.toHexString(key.get())); } try { pipeline.execute(); } catch (VoldemortException e) { stats.reportException(e); throw e; } if (logger.isDebugEnabled()) { logger.debug( "Finished " + pipeline.getOperation().getSimpleName() + " for key " + ByteUtils.toHexString(key.get()) + " keyRef: " + System.identityHashCode(key) + "; started at " + startTimeMs + " took " + (System.nanoTime() - startTimeNs) + " value: " + versioned.getValue() + " (size: " + versioned.getValue().length + ")"); } if (pipelineData.getFatalError() != null) throw pipelineData.getFatalError(); }
public List<Versioned<byte[]>> get( final ByteArray key, final byte[] transforms, final long getOpTimeout) { StoreUtils.assertValidKey(key); long startTimeMs = -1; long startTimeNs = -1; if (logger.isDebugEnabled()) { startTimeMs = System.currentTimeMillis(); startTimeNs = System.nanoTime(); } BasicPipelineData<List<Versioned<byte[]>>> pipelineData = new BasicPipelineData<List<Versioned<byte[]>>>(); if (zoneRoutingEnabled) pipelineData.setZonesRequired(storeDef.getZoneCountReads()); else pipelineData.setZonesRequired(null); pipelineData.setStats(stats); final Pipeline pipeline = new Pipeline(Operation.GET, getOpTimeout, TimeUnit.MILLISECONDS); boolean allowReadRepair = repairReads && transforms == null; StoreRequest<List<Versioned<byte[]>>> blockingStoreRequest = new StoreRequest<List<Versioned<byte[]>>>() { @Override public List<Versioned<byte[]>> request(Store<ByteArray, byte[], byte[]> store) { return store.get(key, transforms); } }; // Get the correct type of configure nodes action depending on the store // requirements AbstractConfigureNodes< ByteArray, List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>> configureNodes = makeNodeConfigurationForGet(pipelineData, key); pipeline.addEventAction(Event.STARTED, configureNodes); pipeline.addEventAction( Event.CONFIGURED, new PerformParallelRequests< List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>( pipelineData, allowReadRepair ? Event.RESPONSES_RECEIVED : Event.COMPLETED, key, transforms, failureDetector, storeDef.getPreferredReads(), storeDef.getRequiredReads(), getOpTimeout, nonblockingStores, Event.INSUFFICIENT_SUCCESSES, Event.INSUFFICIENT_ZONES)); pipeline.addEventAction( Event.INSUFFICIENT_SUCCESSES, new PerformSerialRequests< List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>( pipelineData, allowReadRepair ? Event.RESPONSES_RECEIVED : Event.COMPLETED, key, failureDetector, innerStores, storeDef.getPreferredReads(), storeDef.getRequiredReads(), blockingStoreRequest, null)); if (allowReadRepair) pipeline.addEventAction( Event.RESPONSES_RECEIVED, new ReadRepair<BasicPipelineData<List<Versioned<byte[]>>>>( pipelineData, Event.COMPLETED, storeDef.getPreferredReads(), getOpTimeout, nonblockingStores, readRepairer)); if (zoneRoutingEnabled) pipeline.addEventAction( Event.INSUFFICIENT_ZONES, new PerformZoneSerialRequests< List<Versioned<byte[]>>, BasicPipelineData<List<Versioned<byte[]>>>>( pipelineData, allowReadRepair ? Event.RESPONSES_RECEIVED : Event.COMPLETED, key, failureDetector, innerStores, blockingStoreRequest)); pipeline.addEvent(Event.STARTED); if (logger.isDebugEnabled()) { logger.debug( "Operation " + pipeline.getOperation().getSimpleName() + " Key " + ByteUtils.toHexString(key.get())); } try { pipeline.execute(); } catch (VoldemortException e) { stats.reportException(e); throw e; } if (pipelineData.getFatalError() != null) throw pipelineData.getFatalError(); List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>(); for (Response<ByteArray, List<Versioned<byte[]>>> response : pipelineData.getResponses()) { List<Versioned<byte[]>> value = response.getValue(); if (value != null) results.addAll(value); } if (logger.isDebugEnabled()) { logger.debug( "Finished " + pipeline.getOperation().getSimpleName() + " for key " + ByteUtils.toHexString(key.get()) + " keyRef: " + System.identityHashCode(key) + "; started at " + startTimeMs + " took " + (System.nanoTime() - startTimeNs) + " values: " + formatNodeValuesFromGet(pipelineData.getResponses())); } return results; }
public static int chunk(byte[] key, int numChunks) { // max handles abs(Integer.MIN_VALUE) return Math.max(0, Math.abs(ByteUtils.readInt(key, 0))) % numChunks; }