Ejemplo n.º 1
0
 private void loadMetadata() {
   this.cluster = metadataStore.getCluster();
   this.slopQueues =
       new ConcurrentHashMap<Integer, SynchronousQueue<Versioned<Slop>>>(
           cluster.getNumberOfNodes());
   this.attemptedByNode = new ConcurrentHashMap<Integer, Long>(cluster.getNumberOfNodes());
   this.succeededByNode = new ConcurrentHashMap<Integer, Long>(cluster.getNumberOfNodes());
 }
Ejemplo n.º 2
0
  private List<Integer> getNodes(int partition) {
    List<Integer> rv = new LinkedList<Integer>();
    Cluster cluster = adminClient.getAdminClientCluster();
    for (Node node : cluster.getNodes()) {
      if (node.getPartitionIds().contains(partition)) rv.add(node.getId());
    }

    return rv;
  }
Ejemplo n.º 3
0
  public void registerSystemEngine(StorageEngine<ByteArray, byte[], byte[]> engine) {

    Cluster cluster = this.metadata.getCluster();
    storeRepository.addStorageEngine(engine);

    /* Now add any store wrappers that are enabled */
    Store<ByteArray, byte[], byte[]> store = engine;

    if (voldemortConfig.isVerboseLoggingEnabled())
      store =
          new LoggingStore<ByteArray, byte[], byte[]>(
              store, cluster.getName(), SystemTime.INSTANCE);

    if (voldemortConfig.isMetadataCheckingEnabled())
      store = new InvalidMetadataCheckingStore(metadata.getNodeId(), store, metadata);

    if (voldemortConfig.isStatTrackingEnabled()) {
      StatTrackingStore statStore = new StatTrackingStore(store, this.storeStats);
      store = statStore;
      if (voldemortConfig.isJmxEnabled()) {

        MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
        ObjectName name = null;
        if (this.voldemortConfig.isEnableJmxClusterName())
          name =
              JmxUtils.createObjectName(
                  metadata.getCluster().getName() + "." + JmxUtils.getPackageName(store.getClass()),
                  store.getName());
        else
          name =
              JmxUtils.createObjectName(JmxUtils.getPackageName(store.getClass()), store.getName());

        synchronized (mbeanServer) {
          if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name);

          JmxUtils.registerMbean(
              mbeanServer,
              JmxUtils.createModelMBean(new StoreStatsJmx(statStore.getStats())),
              name);
        }
      }
    }

    storeRepository.addLocalStore(store);
  }
Ejemplo n.º 4
0
  @Before
  public void setup() throws IOException {
    byte[] bytes1 = {(byte) 'A', (byte) 'B'};
    byte[] bytes2 = {(byte) 'C', (byte) 'D'};
    List<StoreDefinition> stores = ClusterTestUtils.getZZZ322StoreDefs("memory");
    StoreDefinition storeDef = stores.get(0);
    cluster = ClusterTestUtils.getZZZCluster();
    ClientConfig clientConfig = new ClientConfig();
    clientConfig.setBootstrapUrls(cluster.getNodeById(0).getSocketUrl().toString());
    clientConfig.getZoneAffinity().setEnableGetOpZoneAffinity(true);
    clientConfig.setClientZoneId(clientZoneId);
    SocketStoreClientFactory socketStoreClientFactory = new SocketStoreClientFactory(clientConfig);
    for (Integer nodeId : cluster.getNodeIds()) {
      SocketStoreFactory socketStoreFactory = new ClientRequestExecutorPool(2, 10000, 100000, 1024);
      VoldemortConfig config =
          ServerTestUtils.createServerConfigWithDefs(
              true,
              nodeId,
              TestUtils.createTempDir().getAbsolutePath(),
              cluster,
              stores,
              new Properties());
      VoldemortServer vs =
          ServerTestUtils.startVoldemortServer(socketStoreFactory, config, cluster);
      vservers.put(nodeId, vs);
      socketStoreFactories.put(nodeId, socketStoreFactory);
      Store<ByteArray, byte[], byte[]> store =
          vs.getStoreRepository().getLocalStore(storeDef.getName());
      Node node = cluster.getNodeById(nodeId);

      VectorClock version1 = new VectorClock();
      version1.incrementVersion(0, System.currentTimeMillis());
      VectorClock version2 = version1.incremented(0, System.currentTimeMillis());

      if (node.getZoneId() == clientZoneId) {
        store.put(new ByteArray(bytes1), new Versioned<byte[]>(bytes1, version1), null);
      } else {
        store.put(new ByteArray(bytes1), new Versioned<byte[]>(bytes2, version2), null);
      }
    }

    client = socketStoreClientFactory.getRawStore(storeDef.getName(), null);
  }
Ejemplo n.º 5
0
 @Test
 public void testLocalZonePartialDownSufficientReads() {
   // turn off one node in same zone as client so that reads can still
   // complete
   this.vservers.get(cluster.getNodeIdsInZone(clientZoneId).iterator().next()).stop();
   try {
     client.get("AB", null);
   } catch (InsufficientOperationalNodesException e) {
     fail("Failed with exception: " + e);
   }
 }
Ejemplo n.º 6
0
  /**
   * For server side routing create NodeStore (socketstore) and pass it on to a {@link
   * RebootstrappingStore}.
   *
   * <p>The {@link RebootstrappingStore} handles invalid-metadata exceptions introduced due to
   * changes in cluster.xml at different nodes.
   *
   * @param def
   * @param cluster
   * @param localNode
   */
  public void registerNodeStores(StoreDefinition def, Cluster cluster, int localNode) {
    Map<Integer, Store<ByteArray, byte[], byte[]>> nodeStores =
        new HashMap<Integer, Store<ByteArray, byte[], byte[]>>(cluster.getNumberOfNodes());
    Map<Integer, NonblockingStore> nonblockingStores =
        new HashMap<Integer, NonblockingStore>(cluster.getNumberOfNodes());
    try {
      for (Node node : cluster.getNodes()) {
        Store<ByteArray, byte[], byte[]> store = getNodeStore(def.getName(), node, localNode);
        this.storeRepository.addNodeStore(node.getId(), store);
        nodeStores.put(node.getId(), store);

        NonblockingStore nonblockingStore = routedStoreFactory.toNonblockingStore(store);
        nonblockingStores.put(node.getId(), nonblockingStore);
      }

      Store<ByteArray, byte[], byte[]> store =
          routedStoreFactory.create(
              cluster,
              def,
              nodeStores,
              nonblockingStores,
              null,
              null,
              failureDetector,
              routedStoreConfig);

      store =
          new RebootstrappingStore(
              metadata, storeRepository, voldemortConfig, (RoutedStore) store, storeFactory);

      store =
          new InconsistencyResolvingStore<ByteArray, byte[], byte[]>(
              store, new VectorClockInconsistencyResolver<byte[]>());
      this.storeRepository.addRoutedStore(store);
    } catch (Exception e) {
      // Roll back
      for (Node node : cluster.getNodes())
        this.storeRepository.removeNodeStore(def.getName(), node.getId());
      throw new VoldemortException(e);
    }
  }
Ejemplo n.º 7
0
  @Test
  public void testLocalZoneDown() {
    for (Integer nodeId : cluster.getNodeIdsInZone(clientZoneId)) {
      this.vservers.get(nodeId).stop();
    }
    try {
      client.get("AB", null);
      fail("Did not fail fast");
    } catch (InsufficientOperationalNodesException e) {

    }
  }
Ejemplo n.º 8
0
  private void setFailureDetector(Map<Integer, Store<ByteArray, byte[], byte[]>> subStores)
      throws Exception {
    if (failureDetector != null) failureDetector.destroy();

    FailureDetectorConfig failureDetectorConfig = new FailureDetectorConfig();
    failureDetectorConfig.setImplementationClassName(failureDetectorCls.getName());
    failureDetectorConfig.setBannagePeriod(50);
    failureDetectorConfig.setNodes(cluster.getNodes());
    failureDetectorConfig.setStoreVerifier(MutableStoreVerifier.create(subStores));

    failureDetector = FailureDetectorUtils.create(failureDetectorConfig, false);
  }
  @Override
  @Before
  public void setUp() throws IOException {
    cluster = ServerTestUtils.getLocalCluster(2, new int[][] {{0, 1, 2, 3}, {4, 5, 6, 7}});
    List<StoreDefinition> storeDefs = ServerTestUtils.getStoreDefs(1);

    failingStorageEngine =
        new RandomlyFailingDelegatingStore<ByteArray, byte[], byte[]>(
            new InMemoryStorageEngine<ByteArray, byte[], byte[]>(storeDefs.get(0).getName()));
    adminServer = getAdminServer(cluster.getNodeById(0), cluster, storeDefs, failingStorageEngine);
    adminClient = ServerTestUtils.getAdminClient(cluster);
    adminServer.start();
  }
Ejemplo n.º 10
0
  private void reviveNodes(Set<Integer> failedNodes) {
    for (int node : failedNodes) {
      ForceFailStore<ByteArray, byte[], byte[]> forceFailStore = getForceFailStore(node);
      forceFailStore.setFail(false);

      if (logger.isTraceEnabled()) logger.trace("Stopped failing requests to " + node);
    }

    while (!failedNodes.isEmpty()) {
      for (int node : failedNodes)
        if (failureDetector.isAvailable(cluster.getNodeById(node))) failedNodes.remove(node);
    }
  }
Ejemplo n.º 11
0
  public StreamingSlopPusherJob(
      StoreRepository storeRepo,
      MetadataStore metadataStore,
      FailureDetector failureDetector,
      VoldemortConfig voldemortConfig,
      Semaphore repairPermits) {
    this.storeRepo = storeRepo;
    this.metadataStore = metadataStore;
    this.failureDetector = failureDetector;
    this.voldemortConfig = voldemortConfig;
    this.repairPermits = Utils.notNull(repairPermits);

    this.cluster = metadataStore.getCluster();
    this.slopQueues =
        new ConcurrentHashMap<Integer, SynchronousQueue<Versioned<Slop>>>(
            cluster.getNumberOfNodes());
    this.consumerExecutor =
        Executors.newFixedThreadPool(
            cluster.getNumberOfNodes(),
            new ThreadFactory() {

              public Thread newThread(Runnable r) {
                Thread thread = new Thread(r);
                thread.setName("slop-pusher");
                return thread;
              }
            });

    this.readThrottler = new EventThrottler(voldemortConfig.getSlopMaxReadBytesPerSec());
    this.adminClient = null;
    this.consumerResults = Lists.newArrayList();
    this.attemptedByNode = new ConcurrentHashMap<Integer, Long>(cluster.getNumberOfNodes());
    this.succeededByNode = new ConcurrentHashMap<Integer, Long>(cluster.getNumberOfNodes());

    this.zoneMapping = Maps.newHashMap();
  }
  @Before
  public void setUp() throws Exception {
    final int numServers = 2;
    servers = new VoldemortServer[numServers];
    int partitionMap[][] = {{0, 1, 2, 3}, {4, 5, 6, 7}};
    cluster =
        ServerTestUtils.startVoldemortCluster(
            numServers,
            servers,
            partitionMap,
            socketStoreFactory,
            true, // useNio
            null,
            storesXmlfile,
            new Properties());

    socketUrl = servers[0].getIdentityNode().getSocketUrl().toString();
    bootStrapUrls = new String[1];
    bootStrapUrls[0] = socketUrl;

    Node node = cluster.getNodeById(0);
    String bootstrapUrl = "tcp://" + node.getHost() + ":" + node.getSocketPort();
    ClientConfig clientConfig = new ClientConfig();
    clientConfig.setClientRegistryUpdateIntervalInSecs(5);
    clientConfig.setAsyncMetadataRefreshInMs(5000);
    clientConfig.setBootstrapUrls(bootstrapUrl);
    SocketStoreClientFactory storeClientFactory = new SocketStoreClientFactory(clientConfig);

    storeClient =
        new ZenStoreClient<String, String>(
            STORE_NAME,
            null,
            storeClientFactory,
            3,
            clientConfig.getClientContextName(),
            0,
            clientConfig);

    SystemStoreClientFactory<String, String> systemStoreFactory =
        new SystemStoreClientFactory<String, String>(clientConfig);

    sysStoreVersion =
        systemStoreFactory.createSystemStore(
            SystemStoreConstants.SystemStoreName.voldsys$_metadata_version_persistence.name());
    clientRegistryStore =
        systemStoreFactory.createSystemStore(
            SystemStoreConstants.SystemStoreName.voldsys$_client_registry.name());
  }
Ejemplo n.º 13
0
  @Test
  public void testLocalZonePartialDownInSufficientReads() {
    // Stop all but one node in same zone as client. This is not sufficient
    // for zone reads.
    Set<Integer> nodeIds = cluster.getNodeIdsInZone(clientZoneId);
    nodeIds.remove(nodeIds.iterator().next());
    for (Integer nodeId : nodeIds) {
      this.vservers.get(nodeId).stop();
    }
    try {
      client.get("AB", null);
      fail("Did not fail fast");
    } catch (InsufficientOperationalNodesException e) {

    }
  }
Ejemplo n.º 14
0
  @Override
  @Before
  public void setUp() {
    logger.info(" Initial SEED used for random number generator: " + TestUtils.SEED);
    final int numServers = 1;
    this.nodeId = 0;
    servers = new VoldemortServer[numServers];
    try {

      // Setup the cluster
      Properties props = new Properties();
      props.setProperty("rest.enable", "true");
      props.setProperty("http.enable", "true");

      Cluster customCluster = clusterMapper.readCluster(new FileReader(clusterXmlFile), false);

      cluster =
          ServerTestUtils.startVoldemortCluster(
              servers, null, clusterXmlFile, storesXmlfile, props, customCluster);

    } catch (IOException e) {
      fail("Failure to setup the cluster");
    }

    // Creating R2Store
    RESTClientConfig restClientConfig = new RESTClientConfig();
    restClientConfig
        .setHttpBootstrapURL("http://localhost:" + cluster.getNodeById(0).getRestPort())
        .setTimeoutMs(10000, TimeUnit.MILLISECONDS)
        .setMaxR2ConnectionPoolSize(100);
    clientFactory = new HttpClientFactory();
    Map<String, String> properties = new HashMap<String, String>();
    properties.put(
        HttpClientFactory.POOL_SIZE_KEY,
        Integer.toString(restClientConfig.getMaxR2ConnectionPoolSize()));
    TransportClient transportClient = clientFactory.getClient(properties);
    R2Store r2Store =
        new R2Store(
            STORE_NAME,
            restClientConfig.getHttpBootstrapURL(),
            "0",
            transportClient,
            restClientConfig,
            0);
    store = r2Store;
  }
Ejemplo n.º 15
0
  @Test
  public void testAddStore() throws Exception {
    AdminClient adminClient = getAdminClient();

    StoreDefinition definition =
        new StoreDefinitionBuilder()
            .setName("updateTest")
            .setType(InMemoryStorageConfiguration.TYPE_NAME)
            .setKeySerializer(new SerializerDefinition("string"))
            .setValueSerializer(new SerializerDefinition("string"))
            .setRoutingPolicy(RoutingTier.CLIENT)
            .setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY)
            .setReplicationFactor(1)
            .setPreferredReads(1)
            .setRequiredReads(1)
            .setPreferredWrites(1)
            .setRequiredWrites(1)
            .build();
    adminClient.addStore(definition);

    // now test the store
    StoreClientFactory factory =
        new SocketStoreClientFactory(
            new ClientConfig().setBootstrapUrls(cluster.getNodeById(0).getSocketUrl().toString()));

    StoreClient<Object, Object> client = factory.getStoreClient("updateTest");
    client.put("abc", "123");
    String s = (String) client.get("abc").getValue();
    assertEquals(s, "123");

    // test again with a unknown store
    try {
      client = factory.getStoreClient("updateTest2");
      client.put("abc", "123");
      s = (String) client.get("abc").getValue();
      assertEquals(s, "123");
      fail("Should have received bootstrap failure exception");
    } catch (Exception e) {
      if (!(e instanceof BootstrapFailureException)) throw e;
    }

    // make sure that the store list we get back from AdminClient
    Versioned<List<StoreDefinition>> list = adminClient.getRemoteStoreDefList(0);
    assertTrue(list.getValue().contains(definition));
  }
Ejemplo n.º 16
0
 private List<Integer> getPartitions(int nodeId) {
   Cluster cluster = adminClient.getAdminClientCluster();
   Node node = cluster.getNodeById(nodeId);
   return node.getPartitionIds();
 }
Ejemplo n.º 17
0
  @Test
  public void testOnePartitionEndToEndBasedOnVersion() throws Exception {
    long now = System.currentTimeMillis();

    // setup four nodes with one store and one partition
    final SocketStoreFactory socketStoreFactory =
        new ClientRequestExecutorPool(2, 10000, 100000, 32 * 1024);
    VoldemortServer[] servers = new VoldemortServer[4];
    int partitionMap[][] = {{0}, {1}, {2}, {3}};
    Cluster cluster =
        ServerTestUtils.startVoldemortCluster(
            4, servers, partitionMap, socketStoreFactory, true, null, STORES_XML, new Properties());

    Node node = cluster.getNodeById(0);
    String bootstrapUrl = "tcp://" + node.getHost() + ":" + node.getSocketPort();
    AdminClient adminClient = new AdminClient(bootstrapUrl);

    byte[] value = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    byte[] value2 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};

    // make versions
    VectorClock vc1 = new VectorClock();
    VectorClock vc2 = new VectorClock();
    VectorClock vc3 = new VectorClock();
    vc1.incrementVersion(0, now); // [0:1]
    vc2.incrementVersion(1, now - 5000); // [1:1]
    vc3.incrementVersion(0, now - 89000000); // [0:1], over a day old

    ArrayList<Pair<ByteArray, Versioned<byte[]>>> n0store =
        new ArrayList<Pair<ByteArray, Versioned<byte[]>>>();
    ArrayList<Pair<ByteArray, Versioned<byte[]>>> n1store =
        new ArrayList<Pair<ByteArray, Versioned<byte[]>>>();
    ArrayList<Pair<ByteArray, Versioned<byte[]>>> n2store =
        new ArrayList<Pair<ByteArray, Versioned<byte[]>>>();
    ArrayList<Pair<ByteArray, Versioned<byte[]>>> n3store =
        new ArrayList<Pair<ByteArray, Versioned<byte[]>>>();
    ArrayList<ByteArray> keysHashedToPar0 = new ArrayList<ByteArray>();

    // find store
    Versioned<List<StoreDefinition>> storeDefinitions =
        adminClient.metadataMgmtOps.getRemoteStoreDefList(0);
    List<StoreDefinition> StoreDefinitions = storeDefinitions.getValue();
    StoreDefinition storeDefinition = null;
    for (StoreDefinition def : StoreDefinitions) {
      if (def.getName().equals(STORE_NAME)) {
        storeDefinition = def;
        break;
      }
    }
    assertNotNull("No such store found: " + STORE_NAME, storeDefinition);

    RoutingStrategy router =
        new RoutingStrategyFactory().updateRoutingStrategy(storeDefinition, cluster);
    while (keysHashedToPar0.size() < 7) {
      // generate random key
      Map<ByteArray, byte[]> map = ServerTestUtils.createRandomKeyValuePairs(1);
      ByteArray key = map.keySet().iterator().next();
      key.get()[0] = (byte) keysHashedToPar0.size();
      Integer masterPartition = router.getMasterPartition(key.get());
      if (masterPartition == 0) {
        keysHashedToPar0.add(key);
      } else {
        continue;
      }
    }
    ByteArray k6 = keysHashedToPar0.get(6);
    ByteArray k5 = keysHashedToPar0.get(5);
    ByteArray k4 = keysHashedToPar0.get(4);
    ByteArray k3 = keysHashedToPar0.get(3);
    ByteArray k2 = keysHashedToPar0.get(2);
    ByteArray k1 = keysHashedToPar0.get(1);
    ByteArray k0 = keysHashedToPar0.get(0);

    // insert K6 into node 0,1,2
    Versioned<byte[]> v6 = new Versioned<byte[]>(value, vc1);
    n0store.add(Pair.create(k6, v6));
    n1store.add(Pair.create(k6, v6));
    n2store.add(Pair.create(k6, v6));

    // insert K6(conflicting value and version) into node 0,1,2,3
    Versioned<byte[]> v6ConflictEarly = new Versioned<byte[]>(value2, vc2);
    n0store.add(Pair.create(k6, v6ConflictEarly));
    n1store.add(Pair.create(k6, v6ConflictEarly));
    n2store.add(Pair.create(k6, v6ConflictEarly));
    n3store.add(Pair.create(k6, v6ConflictEarly));

    // insert K4,K5 into four nodes
    Versioned<byte[]> v5 = new Versioned<byte[]>(value, vc1);
    Versioned<byte[]> v4 = new Versioned<byte[]>(value, vc1);
    n0store.add(Pair.create(k5, v5));
    n1store.add(Pair.create(k5, v5));
    n2store.add(Pair.create(k5, v5));
    n3store.add(Pair.create(k5, v5));
    n0store.add(Pair.create(k4, v4));
    n1store.add(Pair.create(k4, v4));
    n2store.add(Pair.create(k4, v4));
    n3store.add(Pair.create(k4, v4));

    // insert K3 into node 0,1,2
    Versioned<byte[]> v3 = new Versioned<byte[]>(value, vc2);
    n0store.add(Pair.create(k3, v3));
    n1store.add(Pair.create(k3, v3));
    n2store.add(Pair.create(k3, v3));

    // insert K3(conflicting but latest version) into node 0,1,2,3
    Versioned<byte[]> v3ConflictLate = new Versioned<byte[]>(value, vc1);
    n0store.add(Pair.create(k3, v3ConflictLate));
    n1store.add(Pair.create(k3, v3ConflictLate));
    n2store.add(Pair.create(k3, v3ConflictLate));
    n3store.add(Pair.create(k3, v3ConflictLate));

    // insert K2 into node 0,1
    Versioned<byte[]> v2 = new Versioned<byte[]>(value, vc1);
    n0store.add(Pair.create(k2, v2));
    n1store.add(Pair.create(k2, v2));

    // insert K1 into node 0
    Versioned<byte[]> v1 = new Versioned<byte[]>(value, vc1);
    n0store.add(Pair.create(k1, v1));

    // insert K0(out of retention) into node 0,1,2
    Versioned<byte[]> v0 = new Versioned<byte[]>(value, vc3);
    n0store.add(Pair.create(k0, v0));
    n1store.add(Pair.create(k0, v0));
    n2store.add(Pair.create(k0, v0));

    // stream to store
    adminClient.streamingOps.updateEntries(0, STORE_NAME, n0store.iterator(), null);
    adminClient.streamingOps.updateEntries(1, STORE_NAME, n1store.iterator(), null);
    adminClient.streamingOps.updateEntries(2, STORE_NAME, n2store.iterator(), null);
    adminClient.streamingOps.updateEntries(3, STORE_NAME, n3store.iterator(), null);

    // should have FULL:2(K4,K5), LATEST_CONSISTENT:1(K3),
    // INCONSISTENT:2(K6,K2), ignored(K1,K0)
    List<String> urls = new ArrayList<String>();
    urls.add(bootstrapUrl);
    ConsistencyCheck.ComparisonType[] comparisonTypes = ConsistencyCheck.ComparisonType.values();

    for (ConsistencyCheck.ComparisonType type : comparisonTypes) {
      StringWriter sw = new StringWriter();
      ConsistencyCheck checker = new ConsistencyCheck(urls, STORE_NAME, 0, sw, type);
      Reporter reporter = null;
      checker.connect();
      reporter = checker.execute();

      assertEquals(7 - 2, reporter.numTotalKeys);
      assertEquals(3, reporter.numGoodKeys);
    }

    for (VoldemortServer vs : servers) {
      vs.stop();
    }
  }
Ejemplo n.º 18
0
  /**
   * Issue 258 : 'node--1' produced during store building if some reducer does not get any data.
   *
   * @throws Exception
   */
  @Test
  public void testRowsLessThanNodes() throws Exception {
    Map<String, String> values = new HashMap<String, String>();
    File testDir = TestUtils.createTempDir();
    File tempDir = new File(testDir, "temp");
    File outputDir = new File(testDir, "output");

    // write test data to text file
    File inputFile = File.createTempFile("input", ".txt", testDir);
    inputFile.deleteOnExit();
    StringBuilder contents = new StringBuilder();
    for (Map.Entry<String, String> entry : values.entrySet())
      contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
    FileUtils.writeStringToFile(inputFile, contents.toString());

    String storeName = "test";
    SerializerDefinition serDef = new SerializerDefinition("string");
    Cluster cluster = ServerTestUtils.getLocalCluster(10);

    // Test backwards compatibility
    StoreDefinition def =
        new StoreDefinitionBuilder()
            .setName(storeName)
            .setType(ReadOnlyStorageConfiguration.TYPE_NAME)
            .setKeySerializer(serDef)
            .setValueSerializer(serDef)
            .setRoutingPolicy(RoutingTier.CLIENT)
            .setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY)
            .setReplicationFactor(1)
            .setPreferredReads(1)
            .setRequiredReads(1)
            .setPreferredWrites(1)
            .setRequiredWrites(1)
            .build();
    HadoopStoreBuilder builder =
        new HadoopStoreBuilder(
            new Configuration(),
            TextStoreMapper.class,
            TextInputFormat.class,
            cluster,
            def,
            64 * 1024,
            new Path(tempDir.getAbsolutePath()),
            new Path(outputDir.getAbsolutePath()),
            new Path(inputFile.getAbsolutePath()),
            CheckSumType.MD5,
            saveKeys,
            false);
    builder.build();

    // Should not produce node--1 directory + have one folder for every node
    Assert.assertEquals(cluster.getNumberOfNodes(), outputDir.listFiles().length);
    for (File f : outputDir.listFiles()) {
      Assert.assertFalse(f.toString().contains("node--1"));
    }

    // Check if individual nodes exist, along with their metadata file
    for (int nodeId = 0; nodeId < 10; nodeId++) {
      File nodeFile = new File(outputDir, "node-" + Integer.toString(nodeId));
      Assert.assertTrue(nodeFile.exists());
      Assert.assertTrue(new File(nodeFile, ".metadata").exists());
    }
  }
Ejemplo n.º 19
0
  /**
   * Create a PipelineRoutedStore
   *
   * @param innerStores The mapping of node to client
   * @param nonblockingStores
   * @param slopStores The stores for hints
   * @param nonblockingSlopStores
   * @param cluster Cluster definition
   * @param storeDef Store definition
   */
  public PipelineRoutedStore(
      Map<Integer, Store<ByteArray, byte[], byte[]>> innerStores,
      Map<Integer, NonblockingStore> nonblockingStores,
      Map<Integer, Store<ByteArray, Slop, byte[]>> slopStores,
      Map<Integer, NonblockingStore> nonblockingSlopStores,
      Cluster cluster,
      StoreDefinition storeDef,
      FailureDetector failureDetector,
      boolean repairReads,
      TimeoutConfig timeoutConfig,
      int clientZoneId,
      boolean isJmxEnabled,
      String identifierString,
      ZoneAffinity zoneAffinity) {
    super(
        storeDef.getName(),
        innerStores,
        cluster,
        storeDef,
        repairReads,
        timeoutConfig,
        failureDetector,
        SystemTime.INSTANCE);
    if (zoneAffinity != null
        && storeDef.getZoneCountReads() != null
        && storeDef.getZoneCountReads() > 0) {
      if (zoneAffinity.isGetOpZoneAffinityEnabled()) {
        throw new IllegalArgumentException(
            "storeDef.getZoneCountReads() is non-zero while zoneAffinityGet is enabled");
      }
      if (zoneAffinity.isGetAllOpZoneAffinityEnabled()) {
        throw new IllegalArgumentException(
            "storeDef.getZoneCountReads() is non-zero while zoneAffinityGetAll is enabled");
      }
    }
    this.nonblockingSlopStores = nonblockingSlopStores;
    if (clientZoneId == Zone.UNSET_ZONE_ID) {
      Collection<Zone> availableZones = cluster.getZones();
      this.clientZone = availableZones.iterator().next();
      if (availableZones.size() > 1) {
        String format =
            "Client Zone is not specified. Default to Zone %d. The servers could be in a remote zone";
        logger.warn(String.format(format, this.clientZone.getId()));
      } else {
        if (logger.isDebugEnabled())
          logger.debug(
              String.format(
                  "Client Zone is not specified. Default to Zone %d", this.clientZone.getId()));
      }
    } else {
      this.clientZone = cluster.getZoneById(clientZoneId);
    }
    this.nonblockingStores = new ConcurrentHashMap<Integer, NonblockingStore>(nonblockingStores);
    this.slopStores = slopStores;
    if (storeDef.getRoutingStrategyType().compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) {
      zoneRoutingEnabled = true;
    } else {
      zoneRoutingEnabled = false;
    }
    if (storeDef.hasHintedHandoffStrategyType()) {
      HintedHandoffStrategyFactory factory =
          new HintedHandoffStrategyFactory(zoneRoutingEnabled, clientZone.getId());
      this.handoffStrategy = factory.updateHintedHandoffStrategy(storeDef, cluster);
    } else {
      this.handoffStrategy = null;
    }

    this.jmxEnabled = isJmxEnabled;
    this.identifierString = identifierString;
    if (this.jmxEnabled) {
      stats = new PipelineRoutedStats();
      JmxUtils.registerMbean(
          stats,
          JmxUtils.createObjectName(
              JmxUtils.getPackageName(stats.getClass()), getName() + identifierString));
    }
    if (zoneAffinity != null) {
      this.zoneAffinity = zoneAffinity;
    } else {
      this.zoneAffinity = new ZoneAffinity();
    }
  }
Ejemplo n.º 20
0
  /**
   * Register the given engine with the storage repository
   *
   * @param engine Register the storage engine
   * @param isReadOnly Boolean indicating if this store is read-only
   * @param storeType The type of the store
   * @param storeDef store definition for the store to be registered
   */
  public void registerEngine(
      StorageEngine<ByteArray, byte[], byte[]> engine,
      boolean isReadOnly,
      String storeType,
      StoreDefinition storeDef) {
    Cluster cluster = this.metadata.getCluster();
    storeRepository.addStorageEngine(engine);

    /* Now add any store wrappers that are enabled */
    Store<ByteArray, byte[], byte[]> store = engine;

    boolean isMetadata = store.getName().compareTo(MetadataStore.METADATA_STORE_NAME) == 0;
    boolean isSlop = storeType.compareTo("slop") == 0;
    boolean isView = storeType.compareTo(ViewStorageConfiguration.TYPE_NAME) == 0;

    if (voldemortConfig.isVerboseLoggingEnabled())
      store =
          new LoggingStore<ByteArray, byte[], byte[]>(
              store, cluster.getName(), SystemTime.INSTANCE);
    if (!isSlop) {
      if (!isReadOnly && !isMetadata && !isView) {
        // wrap store to enforce retention policy
        if (voldemortConfig.isEnforceRetentionPolicyOnRead() && storeDef != null) {
          RetentionEnforcingStore retentionEnforcingStore =
              new RetentionEnforcingStore(
                  store,
                  storeDef,
                  voldemortConfig.isDeleteExpiredValuesOnRead(),
                  SystemTime.INSTANCE);
          metadata.addMetadataStoreListener(store.getName(), retentionEnforcingStore);
          store = retentionEnforcingStore;
        }

        if (voldemortConfig.isEnableRebalanceService()) {
          ProxyPutStats proxyPutStats = new ProxyPutStats(aggregatedProxyPutStats);
          if (voldemortConfig.isJmxEnabled()) {
            JmxUtils.registerMbean(
                proxyPutStats,
                JmxUtils.createObjectName(
                    "voldemort.store.rebalancing", engine.getName() + "-proxy-puts"));
          }
          store =
              new RedirectingStore(
                  store,
                  metadata,
                  storeRepository,
                  failureDetector,
                  storeFactory,
                  proxyPutWorkerPool,
                  proxyPutStats);
          if (voldemortConfig.isJmxEnabled()) {
            MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
            ObjectName name = null;
            if (this.voldemortConfig.isEnableJmxClusterName())
              name =
                  JmxUtils.createObjectName(
                      cluster.getName() + "." + JmxUtils.getPackageName(RedirectingStore.class),
                      store.getName());
            else
              name =
                  JmxUtils.createObjectName(
                      JmxUtils.getPackageName(RedirectingStore.class), store.getName());

            synchronized (mbeanServer) {
              if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name);

              JmxUtils.registerMbean(mbeanServer, JmxUtils.createModelMBean(store), name);
            }
          }
        }
      }

      if (voldemortConfig.isMetadataCheckingEnabled() && !isMetadata) {
        store = new InvalidMetadataCheckingStore(metadata.getNodeId(), store, metadata);
      }
    }

    if (voldemortConfig.isStatTrackingEnabled()) {
      StatTrackingStore statStore = new StatTrackingStore(store, this.storeStats);
      store = statStore;
      if (voldemortConfig.isJmxEnabled()) {

        MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
        ObjectName name = null;
        if (this.voldemortConfig.isEnableJmxClusterName())
          name =
              JmxUtils.createObjectName(
                  metadata.getCluster().getName() + "." + JmxUtils.getPackageName(store.getClass()),
                  store.getName());
        else
          name =
              JmxUtils.createObjectName(JmxUtils.getPackageName(store.getClass()), store.getName());

        synchronized (mbeanServer) {
          if (mbeanServer.isRegistered(name)) JmxUtils.unregisterMbean(mbeanServer, name);

          JmxUtils.registerMbean(
              mbeanServer,
              JmxUtils.createModelMBean(new StoreStatsJmx(statStore.getStats())),
              name);
        }
      }

      // Wrap everything under the rate limiting store (barring the
      // metadata store)
      if (voldemortConfig.isEnableQuotaLimiting() && !isMetadata) {
        FileBackedCachingStorageEngine quotaStore =
            (FileBackedCachingStorageEngine)
                storeRepository.getStorageEngine(
                    SystemStoreConstants.SystemStoreName.voldsys$_store_quotas.toString());
        QuotaLimitStats quotaStats = new QuotaLimitStats(this.aggregatedQuotaStats);
        QuotaLimitingStore rateLimitingStore =
            new QuotaLimitingStore(store, this.storeStats, quotaStats, quotaStore);
        if (voldemortConfig.isJmxEnabled()) {
          JmxUtils.registerMbean(
              this.aggregatedQuotaStats,
              JmxUtils.createObjectName(
                  "voldemort.store.quota", store.getName() + "-quota-limit-stats"));
        }
        store = rateLimitingStore;
      }
    }

    storeRepository.addLocalStore(store);
  }
Ejemplo n.º 21
0
  @Before
  public void setUp() throws Exception {
    cluster = getNineNodeCluster();
    storeDef =
        getStoreDef(
            STORE_NAME,
            REPLICATION_FACTOR,
            P_READS,
            R_READS,
            P_WRITES,
            R_WRITES,
            RoutingStrategyType.CONSISTENT_STRATEGY);
    for (Node node : cluster.getNodes()) {
      VoldemortException e = new UnreachableStoreException("Node down");

      InMemoryStorageEngine<ByteArray, byte[], byte[]> storageEngine =
          new InMemoryStorageEngine<ByteArray, byte[], byte[]>(STORE_NAME);
      LoggingStore<ByteArray, byte[], byte[]> loggingStore =
          new LoggingStore<ByteArray, byte[], byte[]>(storageEngine);
      subStores.put(
          node.getId(),
          new ForceFailStore<ByteArray, byte[], byte[]>(loggingStore, e, node.getId()));
    }

    setFailureDetector(subStores);

    for (Node node : cluster.getNodes()) {
      int nodeId = node.getId();
      StoreRepository storeRepo = new StoreRepository();
      storeRepo.addLocalStore(subStores.get(nodeId));

      for (int i = 0; i < NUM_NODES_TOTAL; i++) storeRepo.addNodeStore(i, subStores.get(i));

      SlopStorageEngine slopStorageEngine =
          new SlopStorageEngine(
              new InMemoryStorageEngine<ByteArray, byte[], byte[]>(SLOP_STORE_NAME), cluster);
      StorageEngine<ByteArray, Slop, byte[]> storageEngine = slopStorageEngine.asSlopStore();
      storeRepo.setSlopStore(slopStorageEngine);
      slopStores.put(nodeId, storageEngine);

      MetadataStore metadataStore =
          ServerTestUtils.createMetadataStore(cluster, Lists.newArrayList(storeDef));
      StreamingSlopPusherJob pusher =
          new StreamingSlopPusherJob(
              storeRepo,
              metadataStore,
              failureDetector,
              ServerTestUtils.createServerConfigWithDefs(
                  false,
                  nodeId,
                  TestUtils.createTempDir().getAbsolutePath(),
                  cluster,
                  Lists.newArrayList(storeDef),
                  new Properties()));
      slopPusherJobs.add(pusher);
    }

    routedStoreThreadPool = Executors.newFixedThreadPool(NUM_THREADS);
    routedStoreFactory = new RoutedStoreFactory(true, routedStoreThreadPool, 1000L);
    strategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster);
    Map<Integer, NonblockingStore> nonblockingStores = Maps.newHashMap();

    for (Map.Entry<Integer, Store<ByteArray, byte[], byte[]>> entry : subStores.entrySet())
      nonblockingStores.put(
          entry.getKey(), routedStoreFactory.toNonblockingStore(entry.getValue()));

    store =
        routedStoreFactory.create(
            cluster,
            storeDef,
            subStores,
            nonblockingStores,
            slopStores,
            false,
            Zone.DEFAULT_ZONE_ID,
            failureDetector);

    generateData();
  }
Ejemplo n.º 22
0
  public void run() {

    // don't try to run slop pusher job when rebalancing
    if (metadataStore
        .getServerState()
        .equals(MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER)) {
      logger.error("Cannot run slop pusher job since Voldemort server is rebalancing");
      return;
    }

    boolean terminatedEarly = false;
    Date startTime = new Date();
    logger.info("Started streaming slop pusher job at " + startTime);

    SlopStorageEngine slopStorageEngine = storeRepo.getSlopStore();
    ClosableIterator<Pair<ByteArray, Versioned<Slop>>> iterator = null;

    if (adminClient == null) {
      adminClient =
          new AdminClient(
              cluster,
              new AdminClientConfig()
                  .setMaxThreads(cluster.getNumberOfNodes())
                  .setMaxConnectionsPerNode(1));
    }

    if (voldemortConfig.getSlopZonesDownToTerminate() > 0) {
      // Populating the zone mapping for early termination
      zoneMapping.clear();
      for (Node n : cluster.getNodes()) {
        if (failureDetector.isAvailable(n)) {
          Set<Integer> nodes = zoneMapping.get(n.getZoneId());
          if (nodes == null) {
            nodes = Sets.newHashSet();
            zoneMapping.put(n.getZoneId(), nodes);
          }
          nodes.add(n.getId());
        }
      }

      // Check how many zones are down
      int zonesDown = 0;
      for (Zone zone : cluster.getZones()) {
        if (zoneMapping.get(zone.getId()) == null || zoneMapping.get(zone.getId()).size() == 0)
          zonesDown++;
      }

      // Terminate early
      if (voldemortConfig.getSlopZonesDownToTerminate() <= zoneMapping.size()
          && zonesDown >= voldemortConfig.getSlopZonesDownToTerminate()) {
        logger.info(
            "Completed streaming slop pusher job at "
                + startTime
                + " early because "
                + zonesDown
                + " zones are down");
        stopAdminClient();
        return;
      }
    }

    // Clearing the statistics
    AtomicLong attemptedPushes = new AtomicLong(0);
    for (Node node : cluster.getNodes()) {
      attemptedByNode.put(node.getId(), 0L);
      succeededByNode.put(node.getId(), 0L);
    }

    acquireRepairPermit();
    try {
      StorageEngine<ByteArray, Slop, byte[]> slopStore = slopStorageEngine.asSlopStore();
      iterator = slopStore.entries();

      while (iterator.hasNext()) {
        Pair<ByteArray, Versioned<Slop>> keyAndVal;
        try {
          keyAndVal = iterator.next();
          Versioned<Slop> versioned = keyAndVal.getSecond();

          // Retrieve the node
          int nodeId = versioned.getValue().getNodeId();
          Node node = cluster.getNodeById(nodeId);

          attemptedPushes.incrementAndGet();
          Long attempted = attemptedByNode.get(nodeId);
          attemptedByNode.put(nodeId, attempted + 1L);
          if (attemptedPushes.get() % 10000 == 0)
            logger.info("Attempted pushing " + attemptedPushes + " slops");

          if (logger.isTraceEnabled())
            logger.trace(
                "Pushing slop for "
                    + versioned.getValue().getNodeId()
                    + " and store  "
                    + versioned.getValue().getStoreName());

          if (failureDetector.isAvailable(node)) {
            SynchronousQueue<Versioned<Slop>> slopQueue = slopQueues.get(nodeId);
            if (slopQueue == null) {
              // No previous slop queue, add one
              slopQueue = new SynchronousQueue<Versioned<Slop>>();
              slopQueues.put(nodeId, slopQueue);
              consumerResults.add(
                  consumerExecutor.submit(new SlopConsumer(nodeId, slopQueue, slopStorageEngine)));
            }
            boolean offered =
                slopQueue.offer(
                    versioned, voldemortConfig.getClientRoutingTimeoutMs(), TimeUnit.MILLISECONDS);
            if (!offered) {
              if (logger.isDebugEnabled())
                logger.debug(
                    "No consumer appeared for slop in "
                        + voldemortConfig.getClientConnectionTimeoutMs()
                        + " ms");
            }
            readThrottler.maybeThrottle(nBytesRead(keyAndVal));
          } else {
            logger.trace(node + " declared down, won't push slop");
          }
        } catch (RejectedExecutionException e) {
          throw new VoldemortException("Ran out of threads in executor", e);
        }
      }

    } catch (InterruptedException e) {
      logger.warn("Interrupted exception", e);
      terminatedEarly = true;
    } catch (Exception e) {
      logger.error(e, e);
      terminatedEarly = true;
    } finally {
      try {
        if (iterator != null) iterator.close();
      } catch (Exception e) {
        logger.warn("Failed to close iterator cleanly as database might be closed", e);
      }

      // Adding the poison pill
      for (SynchronousQueue<Versioned<Slop>> slopQueue : slopQueues.values()) {
        try {
          slopQueue.put(END);
        } catch (InterruptedException e) {
          logger.warn("Error putting poison pill", e);
        }
      }

      for (Future result : consumerResults) {
        try {
          result.get();
        } catch (Exception e) {
          logger.warn("Exception in consumer", e);
        }
      }

      // Only if exception didn't take place do we update the counts
      if (!terminatedEarly) {
        Map<Integer, Long> outstanding =
            Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes());
        for (int nodeId : succeededByNode.keySet()) {
          logger.info(
              "Slops to node "
                  + nodeId
                  + " - Succeeded - "
                  + succeededByNode.get(nodeId)
                  + " - Attempted - "
                  + attemptedByNode.get(nodeId));
          outstanding.put(nodeId, attemptedByNode.get(nodeId) - succeededByNode.get(nodeId));
        }
        slopStorageEngine.resetStats(outstanding);
        logger.info("Completed streaming slop pusher job which started at " + startTime);
      } else {
        for (int nodeId : succeededByNode.keySet()) {
          logger.info(
              "Slops to node "
                  + nodeId
                  + " - Succeeded - "
                  + succeededByNode.get(nodeId)
                  + " - Attempted - "
                  + attemptedByNode.get(nodeId));
        }
        logger.info("Completed early streaming slop pusher job which started at " + startTime);
      }

      // Shut down admin client as not to waste connections
      consumerResults.clear();
      slopQueues.clear();
      stopAdminClient();
      this.repairPermits.release();
    }
  }
  /*
   * Test to validate that the client bootstraps on metadata change. First do
   * some operations to validate that the client is correctly initialized.
   * Then update the cluster.xml using the Admin Tool (which should update the
   * metadata version as well). Verify that the client bootstraps after this
   * update.
   *
   * Whether the client has automatically bootstrapped is verified by checking
   * the new bootstrap time in the client registry.
   */
  @Test
  public void testEndToEndRebootstrap() {
    try {
      // Do a sample get, put to check client is correctly initialized.
      String key = "city";
      String value = "SF";
      String bootstrapTime = "";
      String newBootstrapTime = "";
      AdminClient adminClient =
          new AdminClient(
              bootStrapUrls[0], new AdminClientConfig(), new ClientConfig(), CLIENT_ZONE_ID);

      try {
        storeClient.put(key, value);
        String received = storeClient.getValue(key);
        assertEquals(value, received);
      } catch (VoldemortException ve) {
        fail("Error in doing basic get, put");
      }

      String originalClientInfo = null;

      try {
        originalClientInfo = clientRegistryStore.getSysStore(storeClient.getClientId()).getValue();

        Properties props = new Properties();
        props.load(new ByteArrayInputStream(originalClientInfo.getBytes()));

        bootstrapTime = props.getProperty("bootstrapTime");
        assertNotNull(bootstrapTime);
      } catch (Exception e) {
        fail("Error in retrieving bootstrap time: " + e);
      }

      // Update cluster.xml metadata
      VoldemortAdminTool adminTool = new VoldemortAdminTool();
      ClusterMapper mapper = new ClusterMapper();
      for (Node node : cluster.getNodes()) {
        VoldemortAdminTool.executeSetMetadata(
            node.getId(), adminClient, CLUSTER_KEY, mapper.writeCluster(cluster));
      }

      // Wait for about 15 seconds to be sure
      try {
        Thread.sleep(15000);
      } catch (Exception e) {
        fail("Interrupted .");
      }

      // // Retrieve the new client bootstrap timestamp
      String newClientInfo = null;

      try {
        newClientInfo = clientRegistryStore.getSysStore(storeClient.getClientId()).getValue();
        Properties newProps = new Properties();
        newProps.load(new ByteArrayInputStream(newClientInfo.getBytes()));
        newBootstrapTime = newProps.getProperty("bootstrapTime");
        assertNotNull(newBootstrapTime);
      } catch (Exception e) {
        fail("Error in retrieving bootstrap time: " + e);
      }

      assertFalse(bootstrapTime.equals(newBootstrapTime));
      long origTime = Long.parseLong(bootstrapTime);
      long newTime = Long.parseLong(newBootstrapTime);
      assertTrue(newTime > origTime);

    } catch (Exception e) {
      fail("Error in validating end to end client rebootstrap : " + e);
    }
  }