예제 #1
0
 @Override
 public void setUp() throws Exception {
   super.setUp();
   metadataStore =
       ServerTestUtils.createMetadataStore(
           ServerTestUtils.getLocalCluster(1), ServerTestUtils.getStoreDefs(1));
 }
예제 #2
0
  @Override
  @Before
  public void setUp() throws IOException {
    cluster = ServerTestUtils.getLocalCluster(2, new int[][] {{0, 1, 2, 3}, {4, 5, 6, 7}});
    servers = new VoldemortServer[2];

    servers[0] =
        ServerTestUtils.startVoldemortServer(
            ServerTestUtils.createServerConfig(
                useNio,
                0,
                TestUtils.createTempDir().getAbsolutePath(),
                null,
                storesXmlfile,
                new Properties()),
            cluster);
    servers[1] =
        ServerTestUtils.startVoldemortServer(
            ServerTestUtils.createServerConfig(
                useNio,
                1,
                TestUtils.createTempDir().getAbsolutePath(),
                null,
                storesXmlfile,
                new Properties()),
            cluster);

    adminClient = ServerTestUtils.getAdminClient(cluster);
  }
예제 #3
0
  public byte[] getValidValue(ByteArray key) {
    String keyString = ByteUtils.getString(key.get(), "UTF-8");
    if (MetadataStore.CLUSTER_KEY.equals(keyString)) {
      return ByteUtils.getBytes(
          new ClusterMapper().writeCluster(ServerTestUtils.getLocalCluster(1)), "UTF-8");
    } else if (MetadataStore.STORES_KEY.equals(keyString)) {
      return ByteUtils.getBytes(
          new StoreDefinitionsMapper().writeStoreList(ServerTestUtils.getStoreDefs(1)), "UTF-8");

    } else if (MetadataStore.SERVER_STATE_KEY.equals(keyString)) {
      int i = (int) (Math.random() * VoldemortState.values().length);
      return ByteUtils.getBytes(VoldemortState.values()[i].toString(), "UTF-8");
    } else if (MetadataStore.REBALANCING_STEAL_INFO.equals(keyString)) {
      int size = (int) (Math.random() * 10);
      List<Integer> partition = new ArrayList<Integer>();
      for (int i = 0; i < size; i++) {
        partition.add((int) Math.random() * 10);
      }

      return ByteUtils.getBytes(
          new RebalancerState(
                  Arrays.asList(
                      new RebalancePartitionsInfo(
                          0,
                          (int) Math.random() * 5,
                          partition,
                          new ArrayList<Integer>(0),
                          new ArrayList<Integer>(0),
                          Arrays.asList("testStoreName"),
                          new HashMap<String, String>(),
                          new HashMap<String, String>(),
                          (int) Math.random() * 3)))
              .toJsonString(),
          "UTF-8");
    } else if (MetadataStore.GRANDFATHERING_INFO.equals(keyString)) {
      int size = (int) (Math.random() * 10);
      List<Integer> partition = new ArrayList<Integer>();
      for (int i = 0; i < size; i++) {
        partition.add((int) Math.random() * 10);
      }
      return ByteUtils.getBytes(
          new GrandfatherState(
                  Arrays.asList(
                      new RebalancePartitionsInfo(
                          0,
                          (int) Math.random() * 5,
                          partition,
                          new ArrayList<Integer>(0),
                          new ArrayList<Integer>(0),
                          Arrays.asList("testStoreName"),
                          new HashMap<String, String>(),
                          new HashMap<String, String>(),
                          (int) Math.random() * 3)))
              .toJsonString(),
          "UTF-8");
    }

    throw new RuntimeException("Unhandled key:" + keyString + " passed");
  }
예제 #4
0
  public byte[] getValidValue(ByteArray key) {
    String keyString = ByteUtils.getString(key.get(), "UTF-8");
    if (MetadataStore.CLUSTER_KEY.equals(keyString)
        || MetadataStore.REBALANCING_SOURCE_CLUSTER_XML.equals(keyString)) {
      return ByteUtils.getBytes(
          new ClusterMapper().writeCluster(ServerTestUtils.getLocalCluster(1)), "UTF-8");
    } else if (MetadataStore.STORES_KEY.equals(keyString)) {
      return ByteUtils.getBytes(
          new StoreDefinitionsMapper().writeStoreList(ServerTestUtils.getStoreDefs(1)), "UTF-8");

    } else if (MetadataStore.SERVER_STATE_KEY.equals(keyString)) {
      int i = (int) (Math.random() * VoldemortState.values().length);
      return ByteUtils.getBytes(VoldemortState.values()[i].toString(), "UTF-8");
    } else if (MetadataStore.REBALANCING_STEAL_INFO.equals(keyString)) {
      int size = (int) (Math.random() * 10) + 1;
      List<Integer> partition = new ArrayList<Integer>();
      for (int i = 0; i < size; i++) {
        partition.add((int) Math.random() * 10);
      }

      List<Integer> partitionIds = partition;

      HashMap<String, List<Integer>> storeToReplicaToPartitionList = Maps.newHashMap();
      storeToReplicaToPartitionList.put("test", partitionIds);

      return ByteUtils.getBytes(
          new RebalancerState(
                  Arrays.asList(
                      new RebalanceTaskInfo(
                          0,
                          (int) Math.random() * 5,
                          storeToReplicaToPartitionList,
                          ServerTestUtils.getLocalCluster(1))))
              .toJsonString(),
          "UTF-8");
    }

    throw new RuntimeException("Unhandled key:" + keyString + " passed");
  }
  @Override
  @Before
  public void setUp() throws IOException {
    cluster = ServerTestUtils.getLocalCluster(2, new int[][] {{0, 1, 2, 3}, {4, 5, 6, 7}});
    List<StoreDefinition> storeDefs = ServerTestUtils.getStoreDefs(1);

    failingStorageEngine =
        new RandomlyFailingDelegatingStore<ByteArray, byte[], byte[]>(
            new InMemoryStorageEngine<ByteArray, byte[], byte[]>(storeDefs.get(0).getName()));
    adminServer = getAdminServer(cluster.getNodeById(0), cluster, storeDefs, failingStorageEngine);
    adminClient = ServerTestUtils.getAdminClient(cluster);
    adminServer.start();
  }
예제 #6
0
  @Test
  public void testRebalacingSourceClusterXmlKey() {
    metadataStore.cleanAllRebalancingState();

    assertTrue("Should be null", null == metadataStore.getRebalancingSourceCluster());

    Cluster dummyCluster = ServerTestUtils.getLocalCluster(2);
    metadataStore.put(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, dummyCluster);
    assertEquals("Should be equal", dummyCluster, metadataStore.getRebalancingSourceCluster());

    metadataStore.put(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, (Object) null);
    assertTrue("Should be null", null == metadataStore.getRebalancingSourceCluster());

    List<Versioned<byte[]>> sourceClusterVersions =
        metadataStore.get(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, null);
    assertTrue("Just one version expected", 1 == sourceClusterVersions.size());
    assertEquals(
        "Empty string should map to null", "", new String(sourceClusterVersions.get(0).getValue()));
  }
예제 #7
0
  public void testUpdateClusterMetadata() {
    Cluster updatedCluster = ServerTestUtils.getLocalCluster(4);
    AdminClient client = getAdminClient();
    for (int i = 0; i < NUM_RUNS; i++) {
      VectorClock clock =
          ((VectorClock) client.getRemoteCluster(0).getVersion())
              .incremented(0, System.currentTimeMillis());
      client.updateRemoteCluster(0, updatedCluster, clock);

      assertEquals(
          "Cluster should match",
          updatedCluster,
          getVoldemortServer(0).getMetadataStore().getCluster());
      assertEquals(
          "AdminClient.getMetdata() should match",
          client.getRemoteCluster(0).getValue(),
          updatedCluster);

      // version should match
      assertEquals(
          "versions should match as well.", clock, client.getRemoteCluster(0).getVersion());
    }
  }
예제 #8
0
  @Test
  public void testHadoopBuild() throws Exception {
    // create test data
    Map<String, String> values = new HashMap<String, String>();
    File testDir = TestUtils.createTempDir();
    File tempDir = new File(testDir, "temp"), tempDir2 = new File(testDir, "temp2");
    File outputDir = new File(testDir, "output"), outputDir2 = new File(testDir, "output2");
    File storeDir = TestUtils.createTempDir(testDir);
    for (int i = 0; i < 200; i++) values.put(Integer.toString(i), Integer.toBinaryString(i));

    // write test data to text file
    File inputFile = File.createTempFile("input", ".txt", testDir);
    inputFile.deleteOnExit();
    StringBuilder contents = new StringBuilder();
    for (Map.Entry<String, String> entry : values.entrySet())
      contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
    FileUtils.writeStringToFile(inputFile, contents.toString());

    String storeName = "test";
    SerializerDefinition serDef = new SerializerDefinition("string");
    Cluster cluster = ServerTestUtils.getLocalCluster(1);

    // Test backwards compatibility
    StoreDefinition def =
        new StoreDefinitionBuilder()
            .setName(storeName)
            .setType(ReadOnlyStorageConfiguration.TYPE_NAME)
            .setKeySerializer(serDef)
            .setValueSerializer(serDef)
            .setRoutingPolicy(RoutingTier.CLIENT)
            .setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY)
            .setReplicationFactor(1)
            .setPreferredReads(1)
            .setRequiredReads(1)
            .setPreferredWrites(1)
            .setRequiredWrites(1)
            .build();
    HadoopStoreBuilder builder =
        new HadoopStoreBuilder(
            new Configuration(),
            TextStoreMapper.class,
            TextInputFormat.class,
            cluster,
            def,
            64 * 1024,
            new Path(tempDir2.getAbsolutePath()),
            new Path(outputDir2.getAbsolutePath()),
            new Path(inputFile.getAbsolutePath()),
            CheckSumType.MD5,
            saveKeys,
            false);
    builder.build();

    builder =
        new HadoopStoreBuilder(
            new Configuration(),
            TextStoreMapper.class,
            TextInputFormat.class,
            cluster,
            def,
            64 * 1024,
            new Path(tempDir.getAbsolutePath()),
            new Path(outputDir.getAbsolutePath()),
            new Path(inputFile.getAbsolutePath()),
            CheckSumType.MD5,
            saveKeys,
            false);
    builder.build();

    // Check if checkSum is generated in outputDir
    File nodeFile = new File(outputDir, "node-0");

    // Check if metadata file exists
    File metadataFile = new File(nodeFile, ".metadata");
    Assert.assertTrue(metadataFile.exists());

    ReadOnlyStorageMetadata metadata = new ReadOnlyStorageMetadata(metadataFile);
    if (saveKeys)
      Assert.assertEquals(
          metadata.get(ReadOnlyStorageMetadata.FORMAT),
          ReadOnlyStorageFormat.READONLY_V2.getCode());
    else
      Assert.assertEquals(
          metadata.get(ReadOnlyStorageMetadata.FORMAT),
          ReadOnlyStorageFormat.READONLY_V1.getCode());

    Assert.assertEquals(
        metadata.get(ReadOnlyStorageMetadata.CHECKSUM_TYPE), CheckSum.toString(CheckSumType.MD5));

    // Check contents of checkSum file
    byte[] md5 =
        Hex.decodeHex(((String) metadata.get(ReadOnlyStorageMetadata.CHECKSUM)).toCharArray());
    byte[] checkSumBytes = CheckSumTests.calculateCheckSum(nodeFile.listFiles(), CheckSumType.MD5);
    Assert.assertEquals(0, ByteUtils.compare(checkSumBytes, md5));

    // check if fetching works
    HdfsFetcher fetcher = new HdfsFetcher();

    // Fetch to version directory
    File versionDir = new File(storeDir, "version-0");
    fetcher.fetch(nodeFile.getAbsolutePath(), versionDir.getAbsolutePath());
    Assert.assertTrue(versionDir.exists());

    // open store
    @SuppressWarnings("unchecked")
    Serializer<Object> serializer =
        (Serializer<Object>) new DefaultSerializerFactory().getSerializer(serDef);
    ReadOnlyStorageEngine engine =
        new ReadOnlyStorageEngine(
            storeName,
            searchStrategy,
            new RoutingStrategyFactory().updateRoutingStrategy(def, cluster),
            0,
            storeDir,
            1);
    Store<Object, Object, Object> store =
        SerializingStore.wrap(engine, serializer, serializer, serializer);

    // check values
    for (Map.Entry<String, String> entry : values.entrySet()) {
      List<Versioned<Object>> found = store.get(entry.getKey(), null);
      Assert.assertEquals("Incorrect number of results", 1, found.size());
      Assert.assertEquals(entry.getValue(), found.get(0).getValue());
    }

    // also check the iterator - first key iterator...
    try {
      ClosableIterator<ByteArray> keyIterator = engine.keys();
      if (!saveKeys) {
        fail("Should have thrown an exception since this RO format does not support iterators");
      }
      int numElements = 0;
      while (keyIterator.hasNext()) {
        Assert.assertTrue(values.containsKey(serializer.toObject(keyIterator.next().get())));
        numElements++;
      }

      Assert.assertEquals(numElements, values.size());
    } catch (UnsupportedOperationException e) {
      if (saveKeys) {
        fail("Should not have thrown an exception since this RO format does support iterators");
      }
    }

    // ... and entry iterator
    try {
      ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entryIterator = engine.entries();
      if (!saveKeys) {
        fail("Should have thrown an exception since this RO format does not support iterators");
      }
      int numElements = 0;
      while (entryIterator.hasNext()) {
        Pair<ByteArray, Versioned<byte[]>> entry = entryIterator.next();
        Assert.assertEquals(
            values.get(serializer.toObject(entry.getFirst().get())),
            serializer.toObject(entry.getSecond().getValue()));
        numElements++;
      }

      Assert.assertEquals(numElements, values.size());
    } catch (UnsupportedOperationException e) {
      if (saveKeys) {
        fail("Should not have thrown an exception since this RO format does support iterators");
      }
    }
  }
예제 #9
0
  /**
   * Issue 258 : 'node--1' produced during store building if some reducer does not get any data.
   *
   * @throws Exception
   */
  @Test
  public void testRowsLessThanNodes() throws Exception {
    Map<String, String> values = new HashMap<String, String>();
    File testDir = TestUtils.createTempDir();
    File tempDir = new File(testDir, "temp");
    File outputDir = new File(testDir, "output");

    // write test data to text file
    File inputFile = File.createTempFile("input", ".txt", testDir);
    inputFile.deleteOnExit();
    StringBuilder contents = new StringBuilder();
    for (Map.Entry<String, String> entry : values.entrySet())
      contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
    FileUtils.writeStringToFile(inputFile, contents.toString());

    String storeName = "test";
    SerializerDefinition serDef = new SerializerDefinition("string");
    Cluster cluster = ServerTestUtils.getLocalCluster(10);

    // Test backwards compatibility
    StoreDefinition def =
        new StoreDefinitionBuilder()
            .setName(storeName)
            .setType(ReadOnlyStorageConfiguration.TYPE_NAME)
            .setKeySerializer(serDef)
            .setValueSerializer(serDef)
            .setRoutingPolicy(RoutingTier.CLIENT)
            .setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY)
            .setReplicationFactor(1)
            .setPreferredReads(1)
            .setRequiredReads(1)
            .setPreferredWrites(1)
            .setRequiredWrites(1)
            .build();
    HadoopStoreBuilder builder =
        new HadoopStoreBuilder(
            new Configuration(),
            TextStoreMapper.class,
            TextInputFormat.class,
            cluster,
            def,
            64 * 1024,
            new Path(tempDir.getAbsolutePath()),
            new Path(outputDir.getAbsolutePath()),
            new Path(inputFile.getAbsolutePath()),
            CheckSumType.MD5,
            saveKeys,
            false);
    builder.build();

    // Should not produce node--1 directory + have one folder for every node
    Assert.assertEquals(cluster.getNumberOfNodes(), outputDir.listFiles().length);
    for (File f : outputDir.listFiles()) {
      Assert.assertFalse(f.toString().contains("node--1"));
    }

    // Check if individual nodes exist, along with their metadata file
    for (int nodeId = 0; nodeId < 10; nodeId++) {
      File nodeFile = new File(outputDir, "node-" + Integer.toString(nodeId));
      Assert.assertTrue(nodeFile.exists());
      Assert.assertTrue(new File(nodeFile, ".metadata").exists());
    }
  }