Ejemplo n.º 1
0
  @Override
  public <K, V, T> Store<K, V, T> getRawStore(
      String storeName, InconsistencyResolver<Versioned<V>> resolver) {

    Store<K, V, T> clientStore = null;

    // The lowest layer : Transporting request to coordinator
    R2Store r2store = null;
    this.d2Client = restClientFactoryConfig.getD2Client();
    if (this.d2Client == null) {
      r2store =
          new R2Store(
              storeName, this.config.getHttpBootstrapURL(), this.transportClient, this.config);
    } else {
      r2store =
          new R2Store(storeName, this.config.getHttpBootstrapURL(), this.d2Client, this.config);
    }

    this.rawStoreList.add(r2store);

    // bootstrap from the coordinator and obtain all the serialization
    // information.
    String serializerInfoXml = r2store.getSerializerInfoXml();
    SerializerDefinition keySerializerDefinition =
        RestUtils.parseKeySerializerDefinition(serializerInfoXml);
    SerializerDefinition valueSerializerDefinition =
        RestUtils.parseValueSerializerDefinition(serializerInfoXml);

    synchronized (this) {
      keySerializerMap.put(storeName, keySerializerDefinition);
      valueSerializerMap.put(storeName, valueSerializerDefinition);
    }

    if (logger.isDebugEnabled()) {
      logger.debug(
          "Bootstrapping for " + storeName + ": Key serializer " + keySerializerDefinition);
      logger.debug(
          "Bootstrapping for " + storeName + ": Value serializer " + valueSerializerDefinition);
    }

    // Start building the stack..
    // First, the transport layer
    Store<ByteArray, byte[], byte[]> store = r2store;

    // TODO: Add jmxId / some unique identifier to the Mbean name
    if (this.config.isEnableJmx()) {
      StatTrackingStore statStore = new StatTrackingStore(store, this.stats);
      store = statStore;
      JmxUtils.registerMbean(
          new StoreStatsJmx(statStore.getStats()),
          JmxUtils.createObjectName(JmxUtils.getPackageName(store.getClass()), store.getName()));
    }

    // Add compression layer
    if (keySerializerDefinition.hasCompression() || valueSerializerDefinition.hasCompression()) {
      store =
          new CompressingStore(
              store,
              new CompressionStrategyFactory().get(keySerializerDefinition.getCompression()),
              new CompressionStrategyFactory().get(valueSerializerDefinition.getCompression()));
    }

    // Add Serialization layer
    Serializer<K> keySerializer =
        (Serializer<K>) serializerFactory.getSerializer(keySerializerDefinition);
    Serializer<V> valueSerializer =
        (Serializer<V>) serializerFactory.getSerializer(valueSerializerDefinition);
    clientStore = SerializingStore.wrap(store, keySerializer, valueSerializer, null);

    // Add inconsistency Resolving layer
    InconsistencyResolver<Versioned<V>> secondaryResolver =
        resolver == null ? new TimeBasedInconsistencyResolver<V>() : resolver;
    clientStore =
        new InconsistencyResolvingStore<K, V, T>(
            clientStore,
            new ChainedResolver<Versioned<V>>(
                new VectorClockInconsistencyResolver<V>(), secondaryResolver));
    return clientStore;
  }
Ejemplo n.º 2
0
  @Test
  public void testHadoopBuild() throws Exception {
    // create test data
    Map<String, String> values = new HashMap<String, String>();
    File testDir = TestUtils.createTempDir();
    File tempDir = new File(testDir, "temp"), tempDir2 = new File(testDir, "temp2");
    File outputDir = new File(testDir, "output"), outputDir2 = new File(testDir, "output2");
    File storeDir = TestUtils.createTempDir(testDir);
    for (int i = 0; i < 200; i++) values.put(Integer.toString(i), Integer.toBinaryString(i));

    // write test data to text file
    File inputFile = File.createTempFile("input", ".txt", testDir);
    inputFile.deleteOnExit();
    StringBuilder contents = new StringBuilder();
    for (Map.Entry<String, String> entry : values.entrySet())
      contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
    FileUtils.writeStringToFile(inputFile, contents.toString());

    String storeName = "test";
    SerializerDefinition serDef = new SerializerDefinition("string");
    Cluster cluster = ServerTestUtils.getLocalCluster(1);

    // Test backwards compatibility
    StoreDefinition def =
        new StoreDefinitionBuilder()
            .setName(storeName)
            .setType(ReadOnlyStorageConfiguration.TYPE_NAME)
            .setKeySerializer(serDef)
            .setValueSerializer(serDef)
            .setRoutingPolicy(RoutingTier.CLIENT)
            .setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY)
            .setReplicationFactor(1)
            .setPreferredReads(1)
            .setRequiredReads(1)
            .setPreferredWrites(1)
            .setRequiredWrites(1)
            .build();
    HadoopStoreBuilder builder =
        new HadoopStoreBuilder(
            new Configuration(),
            TextStoreMapper.class,
            TextInputFormat.class,
            cluster,
            def,
            64 * 1024,
            new Path(tempDir2.getAbsolutePath()),
            new Path(outputDir2.getAbsolutePath()),
            new Path(inputFile.getAbsolutePath()),
            CheckSumType.MD5,
            saveKeys,
            false);
    builder.build();

    builder =
        new HadoopStoreBuilder(
            new Configuration(),
            TextStoreMapper.class,
            TextInputFormat.class,
            cluster,
            def,
            64 * 1024,
            new Path(tempDir.getAbsolutePath()),
            new Path(outputDir.getAbsolutePath()),
            new Path(inputFile.getAbsolutePath()),
            CheckSumType.MD5,
            saveKeys,
            false);
    builder.build();

    // Check if checkSum is generated in outputDir
    File nodeFile = new File(outputDir, "node-0");

    // Check if metadata file exists
    File metadataFile = new File(nodeFile, ".metadata");
    Assert.assertTrue(metadataFile.exists());

    ReadOnlyStorageMetadata metadata = new ReadOnlyStorageMetadata(metadataFile);
    if (saveKeys)
      Assert.assertEquals(
          metadata.get(ReadOnlyStorageMetadata.FORMAT),
          ReadOnlyStorageFormat.READONLY_V2.getCode());
    else
      Assert.assertEquals(
          metadata.get(ReadOnlyStorageMetadata.FORMAT),
          ReadOnlyStorageFormat.READONLY_V1.getCode());

    Assert.assertEquals(
        metadata.get(ReadOnlyStorageMetadata.CHECKSUM_TYPE), CheckSum.toString(CheckSumType.MD5));

    // Check contents of checkSum file
    byte[] md5 =
        Hex.decodeHex(((String) metadata.get(ReadOnlyStorageMetadata.CHECKSUM)).toCharArray());
    byte[] checkSumBytes = CheckSumTests.calculateCheckSum(nodeFile.listFiles(), CheckSumType.MD5);
    Assert.assertEquals(0, ByteUtils.compare(checkSumBytes, md5));

    // check if fetching works
    HdfsFetcher fetcher = new HdfsFetcher();

    // Fetch to version directory
    File versionDir = new File(storeDir, "version-0");
    fetcher.fetch(nodeFile.getAbsolutePath(), versionDir.getAbsolutePath());
    Assert.assertTrue(versionDir.exists());

    // open store
    @SuppressWarnings("unchecked")
    Serializer<Object> serializer =
        (Serializer<Object>) new DefaultSerializerFactory().getSerializer(serDef);
    ReadOnlyStorageEngine engine =
        new ReadOnlyStorageEngine(
            storeName,
            searchStrategy,
            new RoutingStrategyFactory().updateRoutingStrategy(def, cluster),
            0,
            storeDir,
            1);
    Store<Object, Object, Object> store =
        SerializingStore.wrap(engine, serializer, serializer, serializer);

    // check values
    for (Map.Entry<String, String> entry : values.entrySet()) {
      List<Versioned<Object>> found = store.get(entry.getKey(), null);
      Assert.assertEquals("Incorrect number of results", 1, found.size());
      Assert.assertEquals(entry.getValue(), found.get(0).getValue());
    }

    // also check the iterator - first key iterator...
    try {
      ClosableIterator<ByteArray> keyIterator = engine.keys();
      if (!saveKeys) {
        fail("Should have thrown an exception since this RO format does not support iterators");
      }
      int numElements = 0;
      while (keyIterator.hasNext()) {
        Assert.assertTrue(values.containsKey(serializer.toObject(keyIterator.next().get())));
        numElements++;
      }

      Assert.assertEquals(numElements, values.size());
    } catch (UnsupportedOperationException e) {
      if (saveKeys) {
        fail("Should not have thrown an exception since this RO format does support iterators");
      }
    }

    // ... and entry iterator
    try {
      ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entryIterator = engine.entries();
      if (!saveKeys) {
        fail("Should have thrown an exception since this RO format does not support iterators");
      }
      int numElements = 0;
      while (entryIterator.hasNext()) {
        Pair<ByteArray, Versioned<byte[]>> entry = entryIterator.next();
        Assert.assertEquals(
            values.get(serializer.toObject(entry.getFirst().get())),
            serializer.toObject(entry.getSecond().getValue()));
        numElements++;
      }

      Assert.assertEquals(numElements, values.size());
    } catch (UnsupportedOperationException e) {
      if (saveKeys) {
        fail("Should not have thrown an exception since this RO format does support iterators");
      }
    }
  }
  public static ReadOnlyStorageEngineTestInstance create(
      SearchStrategy strategy, File baseDir, int testSize, int numNodes, int repFactor)
      throws Exception {
    // create some test data
    Map<String, String> data = createTestData(testSize);
    JsonReader reader = makeTestDataReader(data, baseDir);

    // set up definitions for cluster and store
    List<Node> nodes = new ArrayList<Node>();
    for (int i = 0; i < numNodes; i++) {
      nodes.add(
          new Node(
              i,
              "localhost",
              8080 + i,
              6666 + i,
              7000 + i,
              Arrays.asList(4 * i, 4 * i + 1, 4 * i + 2, 4 * i + 3)));
    }
    Cluster cluster = new Cluster("test", nodes);
    SerializerDefinition serDef = new SerializerDefinition("json", "'string'");
    StoreDefinition storeDef =
        new StoreDefinitionBuilder()
            .setName("test")
            .setType(ReadOnlyStorageConfiguration.TYPE_NAME)
            .setKeySerializer(serDef)
            .setValueSerializer(serDef)
            .setRoutingPolicy(RoutingTier.CLIENT)
            .setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY)
            .setReplicationFactor(repFactor)
            .setPreferredReads(1)
            .setRequiredReads(1)
            .setPreferredWrites(1)
            .setRequiredWrites(1)
            .build();
    RoutingStrategy router = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster);

    // build store files in outputDir
    File outputDir = TestUtils.createTempDir(baseDir);
    JsonStoreBuilder storeBuilder =
        new JsonStoreBuilder(
            reader, cluster, storeDef, router, outputDir, null, testSize / 5, 1, 2, 10000);
    storeBuilder.build();

    File nodeDir = TestUtils.createTempDir(baseDir);
    @SuppressWarnings("unchecked")
    Serializer<String> serializer =
        (Serializer<String>) new DefaultSerializerFactory().getSerializer(serDef);
    Map<Integer, Store<String, String>> nodeStores = Maps.newHashMap();
    for (int i = 0; i < numNodes; i++) {
      File currNode = new File(nodeDir, Integer.toString(i));
      currNode.mkdirs();
      currNode.deleteOnExit();
      Utils.move(
          new File(outputDir, "node-" + Integer.toString(i)), new File(currNode, "version-0"));
      nodeStores.put(
          i,
          SerializingStore.wrap(
              new ReadOnlyStorageEngine("test", strategy, currNode, 1), serializer, serializer));
    }

    return new ReadOnlyStorageEngineTestInstance(data, baseDir, nodeStores, router, serializer);
  }