@Override @Test public void testFetchedEqualsPut() throws Exception { System.out.println(" Testing Fetchhed equals put "); ByteArray key = getKey(); Store<ByteArray, byte[], byte[]> store = getStore(); VectorClock clock = getClock(1, 1, 2, 3, 3, 4); byte[] value = getValue(); System.out.println("Value chosen : " + value); List<Versioned<byte[]>> resultList = store.get(key, null); assertNotNull("Null result list obtained from a get request", resultList); assertEquals("Store not empty at start!", 0, resultList.size()); Versioned<byte[]> versioned = new Versioned<byte[]>(value, clock); store.put(key, versioned, null); List<Versioned<byte[]>> found = store.get(key, null); assertEquals("Should only be one version stored.", 1, found.size()); System.out.println("individual bytes"); System.out.println("input"); printBytes(versioned.getValue()); System.out.println("found"); printBytes(found.get(0).getValue()); assertTrue("Values not equal!", valuesEqual(versioned.getValue(), found.get(0).getValue())); }
@Test public void testRecoverData() { // use store with replication 2, required write 2 for this test. String testStoreName = "test-recovery-data"; HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(TEST_STREAM_KEYS_SIZE); // insert it into server-0 store Store<ByteArray, byte[]> store = getStore(0, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) { store.put(entry.getKey(), new Versioned<byte[]>(entry.getValue())); } // assert server 1 is empty store = getStore(1, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) { assertSame("entry should NOT be present at store", 0, store.get(entry.getKey()).size()); } // recover all data adminClient.restoreDataFromReplications(1, 2); // assert server 1 has all entries for its partitions store = getStore(1, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) { ByteArray key = entry.getKey(); assertSame("entry should be present for key " + key, 1, store.get(entry.getKey()).size()); assertEquals( "entry value should match", new String(entry.getValue()), new String(store.get(entry.getKey()).get(0).getValue())); } }
@Test public void testUpdate() { final HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(TEST_STREAM_KEYS_SIZE); Iterator<Pair<ByteArray, Versioned<byte[]>>> iterator = new AbstractIterator<Pair<ByteArray, Versioned<byte[]>>>() { final Iterator<Entry<ByteArray, byte[]>> entrySetItr = entrySet.entrySet().iterator(); @Override protected Pair<ByteArray, Versioned<byte[]>> computeNext() { while (entrySetItr.hasNext()) { Entry<ByteArray, byte[]> entry = entrySetItr.next(); return new Pair<ByteArray, Versioned<byte[]>>( entry.getKey(), new Versioned<byte[]>(entry.getValue())); } return endOfData(); } }; getAdminClient().updateEntries(0, testStoreName, iterator, null); // check updated values Store<ByteArray, byte[]> store = getStore(0, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) { assertNotSame("entry should be present at store", 0, store.get(entry.getKey()).size()); assertEquals( "entry value should match", new String(entry.getValue()), new String(store.get(entry.getKey()).get(0).getValue())); } }
protected void initializeMetadataVersions(List<StoreDefinition> storeDefs) { Store<ByteArray, byte[], byte[]> versionStore = storeRepository.getLocalStore( SystemStoreConstants.SystemStoreName.voldsys$_metadata_version_persistence.name()); Properties props = new Properties(); try { boolean isPropertyAdded = false; ByteArray metadataVersionsKey = new ByteArray(SystemStoreConstants.VERSIONS_METADATA_KEY.getBytes()); List<Versioned<byte[]>> versionList = versionStore.get(metadataVersionsKey, null); VectorClock newClock = null; if (versionList != null && versionList.size() > 0) { byte[] versionsByteArray = versionList.get(0).getValue(); if (versionsByteArray != null) { props.load(new ByteArrayInputStream(versionsByteArray)); } newClock = (VectorClock) versionList.get(0).getVersion(); newClock = newClock.incremented(0, System.currentTimeMillis()); } else { newClock = new VectorClock(); } // Check if version exists for cluster.xml if (!props.containsKey(SystemStoreConstants.CLUSTER_VERSION_KEY)) { props.setProperty(SystemStoreConstants.CLUSTER_VERSION_KEY, "0"); isPropertyAdded = true; } // Check if version exists for stores.xml if (!props.containsKey(SystemStoreConstants.STORES_VERSION_KEY)) { props.setProperty(SystemStoreConstants.STORES_VERSION_KEY, "0"); isPropertyAdded = true; } // Check if version exists for each store for (StoreDefinition def : storeDefs) { if (!props.containsKey(def.getName())) { props.setProperty(def.getName(), "0"); isPropertyAdded = true; } } if (isPropertyAdded) { StringBuilder finalVersionList = new StringBuilder(); for (String propName : props.stringPropertyNames()) { finalVersionList.append(propName + "=" + props.getProperty(propName) + "\n"); } versionStore.put( metadataVersionsKey, new Versioned<byte[]>(finalVersionList.toString().getBytes(), newClock), null); } } catch (Exception e) { logger.error("Error while intializing metadata versions ", e); } }
/** @throws IOException */ @Test public void testFetchAndUpdate() throws IOException { HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(TEST_STREAM_KEYS_SIZE); List<Integer> fetchAndUpdatePartitionsList = Arrays.asList(0, 2); // insert it into server-0 store int fetchPartitionKeyCount = 0; Store<ByteArray, byte[]> store = getStore(0, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) { store.put(entry.getKey(), new Versioned<byte[]>(entry.getValue())); if (isKeyPartition(entry.getKey(), 0, testStoreName, fetchAndUpdatePartitionsList)) { fetchPartitionKeyCount++; } } // assert that server1 is empty. store = getStore(1, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) assertEquals("server1 should be empty at start.", 0, store.get(entry.getKey()).size()); // do fetch And update call server1 <-- server0 AdminClient client = getAdminClient(); int id = client.migratePartitions(0, 1, testStoreName, fetchAndUpdatePartitionsList, null); client.waitForCompletion(1, id, 60, TimeUnit.SECONDS); // check values int count = 0; store = getStore(1, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) { if (isKeyPartition(entry.getKey(), 0, testStoreName, fetchAndUpdatePartitionsList)) { assertEquals( "server1 store should contain fetchAndupdated partitions.", 1, store.get(entry.getKey()).size()); assertEquals( "entry value should match", new String(entry.getValue()), new String(store.get(entry.getKey()).get(0).getValue())); count++; } } // assert all keys for asked partitions are returned. assertEquals("All keys for asked partitions should be received", fetchPartitionKeyCount, count); }
@Test public void testAllUp() { try { List<Versioned<String>> versioneds = client.get("AB", null); assertEquals(versioneds.get(0).getValue(), "AB"); } catch (InsufficientOperationalNodesException e) { fail("Failed with exception: " + e); } }
// check the basic rebalanceNode call. @Test public void testRebalanceNode() { HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(TEST_STREAM_KEYS_SIZE); List<Integer> fetchAndUpdatePartitionsList = Arrays.asList(0, 2); // insert it into server-0 store int fetchPartitionKeyCount = 0; Store<ByteArray, byte[]> store = getStore(0, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) { store.put(entry.getKey(), new Versioned<byte[]>(entry.getValue())); if (isKeyPartition(entry.getKey(), 0, testStoreName, fetchAndUpdatePartitionsList)) { fetchPartitionKeyCount++; } } List<Integer> rebalancePartitionList = Arrays.asList(1, 3); RebalancePartitionsInfo stealInfo = new RebalancePartitionsInfo( 1, 0, rebalancePartitionList, new ArrayList<Integer>(0), Arrays.asList(testStoreName), 0); int asyncId = adminClient.rebalanceNode(stealInfo); assertNotSame("Got a valid rebalanceAsyncId", -1, asyncId); getAdminClient().waitForCompletion(1, asyncId, 120, TimeUnit.SECONDS); // assert data is copied correctly store = getStore(1, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) { if (isKeyPartition(entry.getKey(), 1, testStoreName, rebalancePartitionList)) { assertSame("entry should be present at store", 1, store.get(entry.getKey()).size()); assertEquals( "entry value should match", new String(entry.getValue()), new String(store.get(entry.getKey()).get(0).getValue())); } } }
@Test public void testLocalZonePartialDownSufficientReads() { // turn off one node in same zone as client so that reads can still // complete this.vservers.get(cluster.getNodeIdsInZone(clientZoneId).iterator().next()).stop(); try { client.get("AB", null); } catch (InsufficientOperationalNodesException e) { fail("Failed with exception: " + e); } }
private VProto.GetResponse handleGet(VProto.GetRequest request, Store<ByteArray, byte[]> store) { VProto.GetResponse.Builder response = VProto.GetResponse.newBuilder(); try { List<Versioned<byte[]>> values = store.get(ProtoUtils.decodeBytes(request.getKey())); for (Versioned<byte[]> versioned : values) response.addVersioned(ProtoUtils.encodeVersioned(versioned)); } catch (VoldemortException e) { response.setError(ProtoUtils.encodeError(getErrorMapper(), e)); } return response.build(); }
@Test public void testLocalZoneDown() { for (Integer nodeId : cluster.getNodeIdsInZone(clientZoneId)) { this.vservers.get(nodeId).stop(); } try { client.get("AB", null); fail("Did not fail fast"); } catch (InsufficientOperationalNodesException e) { } }
private Versioned<String> getInnerValue(String key) throws VoldemortException { List<Versioned<String>> values = innerStore.get(key); if (values.size() > 1) throw new VoldemortException( "Inconsistent metadata found: expected 1 version but found " + values.size() + " for key:" + key); if (values.size() > 0) return values.get(0); throw new VoldemortException("No metadata found for required key:" + key); }
/** * Performs a back-door proxy get to {@link * voldemort.client.rebalance.RebalancePartitionsInfo#getDonorId() getDonorId} * * @param key Key * @param donorNodeId donor node id * @throws ProxyUnreachableException if donor node can't be reached */ private List<Versioned<byte[]>> proxyGet(ByteArray key, int donorNodeId, byte[] transform) { Node donorNode = metadata.getCluster().getNodeById(donorNodeId); checkNodeAvailable(donorNode); long startNs = System.nanoTime(); try { Store<ByteArray, byte[], byte[]> redirectingStore = getRedirectingSocketStore(getName(), donorNodeId); List<Versioned<byte[]>> values = redirectingStore.get(key, transform); recordSuccess(donorNode, startNs); return values; } catch (UnreachableStoreException e) { recordException(donorNode, startNs, e); throw new ProxyUnreachableException("Failed to reach proxy node " + donorNode, e); } }
@Override @Test public void testNullKeys() throws Exception { System.out.println(" Testing null Keys "); Store<ByteArray, byte[], byte[]> store = getStore(); try { store.put(null, new Versioned<byte[]>(getValue(), getNewIncrementedVectorClock()), null); fail("Store should not put null keys!"); } catch (IllegalArgumentException e) { // this is good } catch (NullPointerException npe) { // this is good } try { store.get(null, null); fail("Store should not get null keys!"); } catch (IllegalArgumentException e) { // this is good } catch (NullPointerException npe) { // this is good } try { store.getAll(null, null); fail("Store should not getAll null keys!"); } catch (IllegalArgumentException e) { // this is good } catch (NullPointerException npe) { // this is good } try { store.getAll( Collections.<ByteArray>singleton(null), Collections.<ByteArray, byte[]>singletonMap(null, null)); fail("Store should not getAll null keys!"); } catch (IllegalArgumentException e) { // this is good } catch (NullPointerException npe) { // this is good } try { store.delete(null, new VectorClock()); fail("Store should not delete null keys!"); } catch (IllegalArgumentException e) { // this is good } catch (NullPointerException npe) { // this is good } }
@Test public void testLocalZonePartialDownInSufficientReads() { // Stop all but one node in same zone as client. This is not sufficient // for zone reads. Set<Integer> nodeIds = cluster.getNodeIdsInZone(clientZoneId); nodeIds.remove(nodeIds.iterator().next()); for (Integer nodeId : nodeIds) { this.vservers.get(nodeId).stop(); } try { client.get("AB", null); fail("Did not fail fast"); } catch (InsufficientOperationalNodesException e) { } }
@Override @Test public void testGetVersions() throws Exception { List<ByteArray> keys = getKeys(2); ByteArray key = keys.get(0); byte[] value = getValue(); VectorClock vc = getClock(0, 0); Store<ByteArray, byte[], byte[]> store = getStore(); store.put(key, Versioned.value(value, vc), null); List<Versioned<byte[]>> versioneds = store.get(key, null); List<Version> versions = store.getVersions(key); assertEquals(1, versioneds.size()); assertTrue(versions.size() > 0); for (int i = 0; i < versions.size(); i++) assertEquals(versioneds.get(0).getVersion(), versions.get(i)); assertEquals(0, store.getVersions(keys.get(1)).size()); }
@Test public void testTruncate() throws Exception { HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(TEST_STREAM_KEYS_SIZE); // insert it into server-0 store Store<ByteArray, byte[]> store = getStore(0, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) { store.put(entry.getKey(), new Versioned<byte[]>(entry.getValue())); } // do truncate request getAdminClient().truncate(0, testStoreName); store = getStore(0, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) { assertEquals("Deleted key should be missing.", 0, store.get(entry.getKey()).size()); } }
@Test public void testGetWithBinaryData() throws Exception { Store<ByteArray, byte[], byte[]> store = getStore(); byte[] allPossibleBytes = getAllPossibleBytes(); ByteArray key = new ByteArray(allPossibleBytes); VectorClock vc = getClock(0, 0); Versioned<byte[]> versioned = new Versioned<byte[]>(allPossibleBytes, vc); store.put(key, versioned, null); List<Versioned<byte[]>> found = store.get(key, null); assertEquals("Should only be one version stored.", 1, found.size()); System.out.println("individual bytes"); System.out.println("input"); printBytes(versioned.getValue()); System.out.println("found"); printBytes(found.get(0).getValue()); assertTrue("Values not equal!", valuesEqual(versioned.getValue(), found.get(0).getValue())); }
public void testDeletePartitionEntries() { HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(TEST_STREAM_KEYS_SIZE); // insert it into server-0 store Store<ByteArray, byte[]> store = getStore(0, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) { store.put(entry.getKey(), new Versioned<byte[]>(entry.getValue())); } List<Integer> deletePartitionsList = Arrays.asList(0, 2); // do delete partitions request getAdminClient().deletePartitions(0, testStoreName, deletePartitionsList, null); store = getStore(0, testStoreName); for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) { if (isKeyPartition(entry.getKey(), 0, testStoreName, deletePartitionsList)) { assertEquals("deleted partitions should be missing.", 0, store.get(entry.getKey()).size()); } } }
@Test public void testHadoopBuild() throws Exception { // create test data Map<String, String> values = new HashMap<String, String>(); File testDir = TestUtils.createTempDir(); File tempDir = new File(testDir, "temp"), tempDir2 = new File(testDir, "temp2"); File outputDir = new File(testDir, "output"), outputDir2 = new File(testDir, "output2"); File storeDir = TestUtils.createTempDir(testDir); for (int i = 0; i < 200; i++) values.put(Integer.toString(i), Integer.toBinaryString(i)); // write test data to text file File inputFile = File.createTempFile("input", ".txt", testDir); inputFile.deleteOnExit(); StringBuilder contents = new StringBuilder(); for (Map.Entry<String, String> entry : values.entrySet()) contents.append(entry.getKey() + "\t" + entry.getValue() + "\n"); FileUtils.writeStringToFile(inputFile, contents.toString()); String storeName = "test"; SerializerDefinition serDef = new SerializerDefinition("string"); Cluster cluster = ServerTestUtils.getLocalCluster(1); // Test backwards compatibility StoreDefinition def = new StoreDefinitionBuilder() .setName(storeName) .setType(ReadOnlyStorageConfiguration.TYPE_NAME) .setKeySerializer(serDef) .setValueSerializer(serDef) .setRoutingPolicy(RoutingTier.CLIENT) .setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY) .setReplicationFactor(1) .setPreferredReads(1) .setRequiredReads(1) .setPreferredWrites(1) .setRequiredWrites(1) .build(); HadoopStoreBuilder builder = new HadoopStoreBuilder( new Configuration(), TextStoreMapper.class, TextInputFormat.class, cluster, def, 64 * 1024, new Path(tempDir2.getAbsolutePath()), new Path(outputDir2.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, saveKeys, false); builder.build(); builder = new HadoopStoreBuilder( new Configuration(), TextStoreMapper.class, TextInputFormat.class, cluster, def, 64 * 1024, new Path(tempDir.getAbsolutePath()), new Path(outputDir.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, saveKeys, false); builder.build(); // Check if checkSum is generated in outputDir File nodeFile = new File(outputDir, "node-0"); // Check if metadata file exists File metadataFile = new File(nodeFile, ".metadata"); Assert.assertTrue(metadataFile.exists()); ReadOnlyStorageMetadata metadata = new ReadOnlyStorageMetadata(metadataFile); if (saveKeys) Assert.assertEquals( metadata.get(ReadOnlyStorageMetadata.FORMAT), ReadOnlyStorageFormat.READONLY_V2.getCode()); else Assert.assertEquals( metadata.get(ReadOnlyStorageMetadata.FORMAT), ReadOnlyStorageFormat.READONLY_V1.getCode()); Assert.assertEquals( metadata.get(ReadOnlyStorageMetadata.CHECKSUM_TYPE), CheckSum.toString(CheckSumType.MD5)); // Check contents of checkSum file byte[] md5 = Hex.decodeHex(((String) metadata.get(ReadOnlyStorageMetadata.CHECKSUM)).toCharArray()); byte[] checkSumBytes = CheckSumTests.calculateCheckSum(nodeFile.listFiles(), CheckSumType.MD5); Assert.assertEquals(0, ByteUtils.compare(checkSumBytes, md5)); // check if fetching works HdfsFetcher fetcher = new HdfsFetcher(); // Fetch to version directory File versionDir = new File(storeDir, "version-0"); fetcher.fetch(nodeFile.getAbsolutePath(), versionDir.getAbsolutePath()); Assert.assertTrue(versionDir.exists()); // open store @SuppressWarnings("unchecked") Serializer<Object> serializer = (Serializer<Object>) new DefaultSerializerFactory().getSerializer(serDef); ReadOnlyStorageEngine engine = new ReadOnlyStorageEngine( storeName, searchStrategy, new RoutingStrategyFactory().updateRoutingStrategy(def, cluster), 0, storeDir, 1); Store<Object, Object, Object> store = SerializingStore.wrap(engine, serializer, serializer, serializer); // check values for (Map.Entry<String, String> entry : values.entrySet()) { List<Versioned<Object>> found = store.get(entry.getKey(), null); Assert.assertEquals("Incorrect number of results", 1, found.size()); Assert.assertEquals(entry.getValue(), found.get(0).getValue()); } // also check the iterator - first key iterator... try { ClosableIterator<ByteArray> keyIterator = engine.keys(); if (!saveKeys) { fail("Should have thrown an exception since this RO format does not support iterators"); } int numElements = 0; while (keyIterator.hasNext()) { Assert.assertTrue(values.containsKey(serializer.toObject(keyIterator.next().get()))); numElements++; } Assert.assertEquals(numElements, values.size()); } catch (UnsupportedOperationException e) { if (saveKeys) { fail("Should not have thrown an exception since this RO format does support iterators"); } } // ... and entry iterator try { ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entryIterator = engine.entries(); if (!saveKeys) { fail("Should have thrown an exception since this RO format does not support iterators"); } int numElements = 0; while (entryIterator.hasNext()) { Pair<ByteArray, Versioned<byte[]>> entry = entryIterator.next(); Assert.assertEquals( values.get(serializer.toObject(entry.getFirst().get())), serializer.toObject(entry.getSecond().getValue())); numElements++; } Assert.assertEquals(numElements, values.size()); } catch (UnsupportedOperationException e) { if (saveKeys) { fail("Should not have thrown an exception since this RO format does support iterators"); } } }