Exemplo n.º 1
0
  private static <T> List<T> get(
      Cursor cursor, ByteArray key, LockMode lockMode, Serializer<T> serializer)
      throws DatabaseException {
    StoreUtils.assertValidKey(key);

    DatabaseEntry keyEntry = new DatabaseEntry(key.get());
    DatabaseEntry valueEntry = new DatabaseEntry();
    List<T> results = Lists.newArrayList();

    for (OperationStatus status = cursor.getSearchKey(keyEntry, valueEntry, lockMode);
        status == OperationStatus.SUCCESS;
        status = cursor.getNextDup(keyEntry, valueEntry, lockMode)) {
      results.add(serializer.toObject(valueEntry.getData()));
    }
    return results;
  }
Exemplo n.º 2
0
  @Test
  public void testHadoopBuild() throws Exception {
    // create test data
    Map<String, String> values = new HashMap<String, String>();
    File testDir = TestUtils.createTempDir();
    File tempDir = new File(testDir, "temp"), tempDir2 = new File(testDir, "temp2");
    File outputDir = new File(testDir, "output"), outputDir2 = new File(testDir, "output2");
    File storeDir = TestUtils.createTempDir(testDir);
    for (int i = 0; i < 200; i++) values.put(Integer.toString(i), Integer.toBinaryString(i));

    // write test data to text file
    File inputFile = File.createTempFile("input", ".txt", testDir);
    inputFile.deleteOnExit();
    StringBuilder contents = new StringBuilder();
    for (Map.Entry<String, String> entry : values.entrySet())
      contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
    FileUtils.writeStringToFile(inputFile, contents.toString());

    String storeName = "test";
    SerializerDefinition serDef = new SerializerDefinition("string");
    Cluster cluster = ServerTestUtils.getLocalCluster(1);

    // Test backwards compatibility
    StoreDefinition def =
        new StoreDefinitionBuilder()
            .setName(storeName)
            .setType(ReadOnlyStorageConfiguration.TYPE_NAME)
            .setKeySerializer(serDef)
            .setValueSerializer(serDef)
            .setRoutingPolicy(RoutingTier.CLIENT)
            .setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY)
            .setReplicationFactor(1)
            .setPreferredReads(1)
            .setRequiredReads(1)
            .setPreferredWrites(1)
            .setRequiredWrites(1)
            .build();
    HadoopStoreBuilder builder =
        new HadoopStoreBuilder(
            new Configuration(),
            TextStoreMapper.class,
            TextInputFormat.class,
            cluster,
            def,
            64 * 1024,
            new Path(tempDir2.getAbsolutePath()),
            new Path(outputDir2.getAbsolutePath()),
            new Path(inputFile.getAbsolutePath()),
            CheckSumType.MD5,
            saveKeys,
            false);
    builder.build();

    builder =
        new HadoopStoreBuilder(
            new Configuration(),
            TextStoreMapper.class,
            TextInputFormat.class,
            cluster,
            def,
            64 * 1024,
            new Path(tempDir.getAbsolutePath()),
            new Path(outputDir.getAbsolutePath()),
            new Path(inputFile.getAbsolutePath()),
            CheckSumType.MD5,
            saveKeys,
            false);
    builder.build();

    // Check if checkSum is generated in outputDir
    File nodeFile = new File(outputDir, "node-0");

    // Check if metadata file exists
    File metadataFile = new File(nodeFile, ".metadata");
    Assert.assertTrue(metadataFile.exists());

    ReadOnlyStorageMetadata metadata = new ReadOnlyStorageMetadata(metadataFile);
    if (saveKeys)
      Assert.assertEquals(
          metadata.get(ReadOnlyStorageMetadata.FORMAT),
          ReadOnlyStorageFormat.READONLY_V2.getCode());
    else
      Assert.assertEquals(
          metadata.get(ReadOnlyStorageMetadata.FORMAT),
          ReadOnlyStorageFormat.READONLY_V1.getCode());

    Assert.assertEquals(
        metadata.get(ReadOnlyStorageMetadata.CHECKSUM_TYPE), CheckSum.toString(CheckSumType.MD5));

    // Check contents of checkSum file
    byte[] md5 =
        Hex.decodeHex(((String) metadata.get(ReadOnlyStorageMetadata.CHECKSUM)).toCharArray());
    byte[] checkSumBytes = CheckSumTests.calculateCheckSum(nodeFile.listFiles(), CheckSumType.MD5);
    Assert.assertEquals(0, ByteUtils.compare(checkSumBytes, md5));

    // check if fetching works
    HdfsFetcher fetcher = new HdfsFetcher();

    // Fetch to version directory
    File versionDir = new File(storeDir, "version-0");
    fetcher.fetch(nodeFile.getAbsolutePath(), versionDir.getAbsolutePath());
    Assert.assertTrue(versionDir.exists());

    // open store
    @SuppressWarnings("unchecked")
    Serializer<Object> serializer =
        (Serializer<Object>) new DefaultSerializerFactory().getSerializer(serDef);
    ReadOnlyStorageEngine engine =
        new ReadOnlyStorageEngine(
            storeName,
            searchStrategy,
            new RoutingStrategyFactory().updateRoutingStrategy(def, cluster),
            0,
            storeDir,
            1);
    Store<Object, Object, Object> store =
        SerializingStore.wrap(engine, serializer, serializer, serializer);

    // check values
    for (Map.Entry<String, String> entry : values.entrySet()) {
      List<Versioned<Object>> found = store.get(entry.getKey(), null);
      Assert.assertEquals("Incorrect number of results", 1, found.size());
      Assert.assertEquals(entry.getValue(), found.get(0).getValue());
    }

    // also check the iterator - first key iterator...
    try {
      ClosableIterator<ByteArray> keyIterator = engine.keys();
      if (!saveKeys) {
        fail("Should have thrown an exception since this RO format does not support iterators");
      }
      int numElements = 0;
      while (keyIterator.hasNext()) {
        Assert.assertTrue(values.containsKey(serializer.toObject(keyIterator.next().get())));
        numElements++;
      }

      Assert.assertEquals(numElements, values.size());
    } catch (UnsupportedOperationException e) {
      if (saveKeys) {
        fail("Should not have thrown an exception since this RO format does support iterators");
      }
    }

    // ... and entry iterator
    try {
      ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entryIterator = engine.entries();
      if (!saveKeys) {
        fail("Should have thrown an exception since this RO format does not support iterators");
      }
      int numElements = 0;
      while (entryIterator.hasNext()) {
        Pair<ByteArray, Versioned<byte[]>> entry = entryIterator.next();
        Assert.assertEquals(
            values.get(serializer.toObject(entry.getFirst().get())),
            serializer.toObject(entry.getSecond().getValue()));
        numElements++;
      }

      Assert.assertEquals(numElements, values.size());
    } catch (UnsupportedOperationException e) {
      if (saveKeys) {
        fail("Should not have thrown an exception since this RO format does support iterators");
      }
    }
  }
  /**
   * Create the voldemort key and value from the input key and value and map it out for each of the
   * responsible voldemort nodes
   *
   * <p>The output key is the md5 of the serialized key returned by makeKey(). The output value is
   * the node_id & partition_id of the responsible node followed by serialized value returned by
   * makeValue() OR if we have setKeys flag on the serialized key and serialized value
   */
  public void map(
      K key, V value, OutputCollector<BytesWritable, BytesWritable> output, Reporter reporter)
      throws IOException {
    byte[] keyBytes = keySerializer.toBytes(makeKey(key, value));
    byte[] valBytes = valueSerializer.toBytes(makeValue(key, value));

    // Compress key and values if required
    if (keySerializerDefinition.hasCompression()) {
      keyBytes = keyCompressor.deflate(keyBytes);
    }

    if (valueSerializerDefinition.hasCompression()) {
      valBytes = valueCompressor.deflate(valBytes);
    }

    // Get the output byte arrays ready to populate
    byte[] outputValue;
    BytesWritable outputKey;

    // Leave initial offset for (a) node id (b) partition id
    // since they are written later
    int offsetTillNow = 2 * ByteUtils.SIZE_OF_INT;

    // In order - 4 ( for node id ) + 4 ( partition id ) + 1 ( replica
    // type - primary | secondary | tertiary... ] + 4 ( key size )
    // size ) + 4 ( value size ) + key + value
    outputValue =
        new byte
            [valBytes.length
                + keyBytes.length
                + ByteUtils.SIZE_OF_BYTE
                + 4 * ByteUtils.SIZE_OF_INT];

    // Write key length - leave byte for replica type
    offsetTillNow += ByteUtils.SIZE_OF_BYTE;
    ByteUtils.writeInt(outputValue, keyBytes.length, offsetTillNow);

    // Write value length
    offsetTillNow += ByteUtils.SIZE_OF_INT;
    ByteUtils.writeInt(outputValue, valBytes.length, offsetTillNow);

    // Write key
    offsetTillNow += ByteUtils.SIZE_OF_INT;
    System.arraycopy(keyBytes, 0, outputValue, offsetTillNow, keyBytes.length);

    // Write value
    offsetTillNow += keyBytes.length;
    System.arraycopy(valBytes, 0, outputValue, offsetTillNow, valBytes.length);

    // Generate MR key - 16 byte md5
    outputKey = new BytesWritable(keyBytes);

    // Generate partition and node list this key is destined for
    List<Integer> partitionList = routingStrategy.getPartitionList(keyBytes);
    Node[] partitionToNode = routingStrategy.getPartitionToNode();

    for (int replicaType = 0; replicaType < partitionList.size(); replicaType++) {

      // Node id
      ByteUtils.writeInt(outputValue, partitionToNode[partitionList.get(replicaType)].getId(), 0);

      // Primary partition id
      ByteUtils.writeInt(outputValue, partitionList.get(0), ByteUtils.SIZE_OF_INT);

      // Replica type
      ByteUtils.writeBytes(
          outputValue, replicaType, 2 * ByteUtils.SIZE_OF_INT, ByteUtils.SIZE_OF_BYTE);

      BytesWritable outputVal = new BytesWritable(outputValue);

      output.collect(outputKey, outputVal);
    }
  }