@Before
  public void setup() throws IOException {
    byte[] bytes1 = {(byte) 'A', (byte) 'B'};
    byte[] bytes2 = {(byte) 'C', (byte) 'D'};
    List<StoreDefinition> stores = ClusterTestUtils.getZZZ322StoreDefs("memory");
    StoreDefinition storeDef = stores.get(0);
    cluster = ClusterTestUtils.getZZZCluster();
    ClientConfig clientConfig = new ClientConfig();
    clientConfig.setBootstrapUrls(cluster.getNodeById(0).getSocketUrl().toString());
    clientConfig.getZoneAffinity().setEnableGetOpZoneAffinity(true);
    clientConfig.setClientZoneId(clientZoneId);
    SocketStoreClientFactory socketStoreClientFactory = new SocketStoreClientFactory(clientConfig);
    for (Integer nodeId : cluster.getNodeIds()) {
      SocketStoreFactory socketStoreFactory = new ClientRequestExecutorPool(2, 10000, 100000, 1024);
      VoldemortConfig config =
          ServerTestUtils.createServerConfigWithDefs(
              true,
              nodeId,
              TestUtils.createTempDir().getAbsolutePath(),
              cluster,
              stores,
              new Properties());
      VoldemortServer vs =
          ServerTestUtils.startVoldemortServer(socketStoreFactory, config, cluster);
      vservers.put(nodeId, vs);
      socketStoreFactories.put(nodeId, socketStoreFactory);
      Store<ByteArray, byte[], byte[]> store =
          vs.getStoreRepository().getLocalStore(storeDef.getName());
      Node node = cluster.getNodeById(nodeId);

      VectorClock version1 = new VectorClock();
      version1.incrementVersion(0, System.currentTimeMillis());
      VectorClock version2 = version1.incremented(0, System.currentTimeMillis());

      if (node.getZoneId() == clientZoneId) {
        store.put(new ByteArray(bytes1), new Versioned<byte[]>(bytes1, version1), null);
      } else {
        store.put(new ByteArray(bytes1), new Versioned<byte[]>(bytes2, version2), null);
      }
    }

    client = socketStoreClientFactory.getRawStore(storeDef.getName(), null);
  }
  @Override
  @Before
  public void setUp() throws IOException {
    cluster = ServerTestUtils.getLocalCluster(2, new int[][] {{0, 1, 2, 3}, {4, 5, 6, 7}});
    List<StoreDefinition> storeDefs = ServerTestUtils.getStoreDefs(1);

    failingStorageEngine =
        new RandomlyFailingDelegatingStore<ByteArray, byte[], byte[]>(
            new InMemoryStorageEngine<ByteArray, byte[], byte[]>(storeDefs.get(0).getName()));
    adminServer = getAdminServer(cluster.getNodeById(0), cluster, storeDefs, failingStorageEngine);
    adminClient = ServerTestUtils.getAdminClient(cluster);
    adminServer.start();
  }
示例#3
0
  private void reviveNodes(Set<Integer> failedNodes) {
    for (int node : failedNodes) {
      ForceFailStore<ByteArray, byte[], byte[]> forceFailStore = getForceFailStore(node);
      forceFailStore.setFail(false);

      if (logger.isTraceEnabled()) logger.trace("Stopped failing requests to " + node);
    }

    while (!failedNodes.isEmpty()) {
      for (int node : failedNodes)
        if (failureDetector.isAvailable(cluster.getNodeById(node))) failedNodes.remove(node);
    }
  }
  @Before
  public void setUp() throws Exception {
    final int numServers = 2;
    servers = new VoldemortServer[numServers];
    int partitionMap[][] = {{0, 1, 2, 3}, {4, 5, 6, 7}};
    cluster =
        ServerTestUtils.startVoldemortCluster(
            numServers,
            servers,
            partitionMap,
            socketStoreFactory,
            true, // useNio
            null,
            storesXmlfile,
            new Properties());

    socketUrl = servers[0].getIdentityNode().getSocketUrl().toString();
    bootStrapUrls = new String[1];
    bootStrapUrls[0] = socketUrl;

    Node node = cluster.getNodeById(0);
    String bootstrapUrl = "tcp://" + node.getHost() + ":" + node.getSocketPort();
    ClientConfig clientConfig = new ClientConfig();
    clientConfig.setClientRegistryUpdateIntervalInSecs(5);
    clientConfig.setAsyncMetadataRefreshInMs(5000);
    clientConfig.setBootstrapUrls(bootstrapUrl);
    SocketStoreClientFactory storeClientFactory = new SocketStoreClientFactory(clientConfig);

    storeClient =
        new ZenStoreClient<String, String>(
            STORE_NAME,
            null,
            storeClientFactory,
            3,
            clientConfig.getClientContextName(),
            0,
            clientConfig);

    SystemStoreClientFactory<String, String> systemStoreFactory =
        new SystemStoreClientFactory<String, String>(clientConfig);

    sysStoreVersion =
        systemStoreFactory.createSystemStore(
            SystemStoreConstants.SystemStoreName.voldsys$_metadata_version_persistence.name());
    clientRegistryStore =
        systemStoreFactory.createSystemStore(
            SystemStoreConstants.SystemStoreName.voldsys$_client_registry.name());
  }
  @Override
  @Before
  public void setUp() {
    logger.info(" Initial SEED used for random number generator: " + TestUtils.SEED);
    final int numServers = 1;
    this.nodeId = 0;
    servers = new VoldemortServer[numServers];
    try {

      // Setup the cluster
      Properties props = new Properties();
      props.setProperty("rest.enable", "true");
      props.setProperty("http.enable", "true");

      Cluster customCluster = clusterMapper.readCluster(new FileReader(clusterXmlFile), false);

      cluster =
          ServerTestUtils.startVoldemortCluster(
              servers, null, clusterXmlFile, storesXmlfile, props, customCluster);

    } catch (IOException e) {
      fail("Failure to setup the cluster");
    }

    // Creating R2Store
    RESTClientConfig restClientConfig = new RESTClientConfig();
    restClientConfig
        .setHttpBootstrapURL("http://localhost:" + cluster.getNodeById(0).getRestPort())
        .setTimeoutMs(10000, TimeUnit.MILLISECONDS)
        .setMaxR2ConnectionPoolSize(100);
    clientFactory = new HttpClientFactory();
    Map<String, String> properties = new HashMap<String, String>();
    properties.put(
        HttpClientFactory.POOL_SIZE_KEY,
        Integer.toString(restClientConfig.getMaxR2ConnectionPoolSize()));
    TransportClient transportClient = clientFactory.getClient(properties);
    R2Store r2Store =
        new R2Store(
            STORE_NAME,
            restClientConfig.getHttpBootstrapURL(),
            "0",
            transportClient,
            restClientConfig,
            0);
    store = r2Store;
  }
示例#6
0
  @Test
  public void testAddStore() throws Exception {
    AdminClient adminClient = getAdminClient();

    StoreDefinition definition =
        new StoreDefinitionBuilder()
            .setName("updateTest")
            .setType(InMemoryStorageConfiguration.TYPE_NAME)
            .setKeySerializer(new SerializerDefinition("string"))
            .setValueSerializer(new SerializerDefinition("string"))
            .setRoutingPolicy(RoutingTier.CLIENT)
            .setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY)
            .setReplicationFactor(1)
            .setPreferredReads(1)
            .setRequiredReads(1)
            .setPreferredWrites(1)
            .setRequiredWrites(1)
            .build();
    adminClient.addStore(definition);

    // now test the store
    StoreClientFactory factory =
        new SocketStoreClientFactory(
            new ClientConfig().setBootstrapUrls(cluster.getNodeById(0).getSocketUrl().toString()));

    StoreClient<Object, Object> client = factory.getStoreClient("updateTest");
    client.put("abc", "123");
    String s = (String) client.get("abc").getValue();
    assertEquals(s, "123");

    // test again with a unknown store
    try {
      client = factory.getStoreClient("updateTest2");
      client.put("abc", "123");
      s = (String) client.get("abc").getValue();
      assertEquals(s, "123");
      fail("Should have received bootstrap failure exception");
    } catch (Exception e) {
      if (!(e instanceof BootstrapFailureException)) throw e;
    }

    // make sure that the store list we get back from AdminClient
    Versioned<List<StoreDefinition>> list = adminClient.getRemoteStoreDefList(0);
    assertTrue(list.getValue().contains(definition));
  }
示例#7
0
 private List<Integer> getPartitions(int nodeId) {
   Cluster cluster = adminClient.getAdminClientCluster();
   Node node = cluster.getNodeById(nodeId);
   return node.getPartitionIds();
 }
  @Test
  public void testOnePartitionEndToEndBasedOnVersion() throws Exception {
    long now = System.currentTimeMillis();

    // setup four nodes with one store and one partition
    final SocketStoreFactory socketStoreFactory =
        new ClientRequestExecutorPool(2, 10000, 100000, 32 * 1024);
    VoldemortServer[] servers = new VoldemortServer[4];
    int partitionMap[][] = {{0}, {1}, {2}, {3}};
    Cluster cluster =
        ServerTestUtils.startVoldemortCluster(
            4, servers, partitionMap, socketStoreFactory, true, null, STORES_XML, new Properties());

    Node node = cluster.getNodeById(0);
    String bootstrapUrl = "tcp://" + node.getHost() + ":" + node.getSocketPort();
    AdminClient adminClient = new AdminClient(bootstrapUrl);

    byte[] value = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    byte[] value2 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};

    // make versions
    VectorClock vc1 = new VectorClock();
    VectorClock vc2 = new VectorClock();
    VectorClock vc3 = new VectorClock();
    vc1.incrementVersion(0, now); // [0:1]
    vc2.incrementVersion(1, now - 5000); // [1:1]
    vc3.incrementVersion(0, now - 89000000); // [0:1], over a day old

    ArrayList<Pair<ByteArray, Versioned<byte[]>>> n0store =
        new ArrayList<Pair<ByteArray, Versioned<byte[]>>>();
    ArrayList<Pair<ByteArray, Versioned<byte[]>>> n1store =
        new ArrayList<Pair<ByteArray, Versioned<byte[]>>>();
    ArrayList<Pair<ByteArray, Versioned<byte[]>>> n2store =
        new ArrayList<Pair<ByteArray, Versioned<byte[]>>>();
    ArrayList<Pair<ByteArray, Versioned<byte[]>>> n3store =
        new ArrayList<Pair<ByteArray, Versioned<byte[]>>>();
    ArrayList<ByteArray> keysHashedToPar0 = new ArrayList<ByteArray>();

    // find store
    Versioned<List<StoreDefinition>> storeDefinitions =
        adminClient.metadataMgmtOps.getRemoteStoreDefList(0);
    List<StoreDefinition> StoreDefinitions = storeDefinitions.getValue();
    StoreDefinition storeDefinition = null;
    for (StoreDefinition def : StoreDefinitions) {
      if (def.getName().equals(STORE_NAME)) {
        storeDefinition = def;
        break;
      }
    }
    assertNotNull("No such store found: " + STORE_NAME, storeDefinition);

    RoutingStrategy router =
        new RoutingStrategyFactory().updateRoutingStrategy(storeDefinition, cluster);
    while (keysHashedToPar0.size() < 7) {
      // generate random key
      Map<ByteArray, byte[]> map = ServerTestUtils.createRandomKeyValuePairs(1);
      ByteArray key = map.keySet().iterator().next();
      key.get()[0] = (byte) keysHashedToPar0.size();
      Integer masterPartition = router.getMasterPartition(key.get());
      if (masterPartition == 0) {
        keysHashedToPar0.add(key);
      } else {
        continue;
      }
    }
    ByteArray k6 = keysHashedToPar0.get(6);
    ByteArray k5 = keysHashedToPar0.get(5);
    ByteArray k4 = keysHashedToPar0.get(4);
    ByteArray k3 = keysHashedToPar0.get(3);
    ByteArray k2 = keysHashedToPar0.get(2);
    ByteArray k1 = keysHashedToPar0.get(1);
    ByteArray k0 = keysHashedToPar0.get(0);

    // insert K6 into node 0,1,2
    Versioned<byte[]> v6 = new Versioned<byte[]>(value, vc1);
    n0store.add(Pair.create(k6, v6));
    n1store.add(Pair.create(k6, v6));
    n2store.add(Pair.create(k6, v6));

    // insert K6(conflicting value and version) into node 0,1,2,3
    Versioned<byte[]> v6ConflictEarly = new Versioned<byte[]>(value2, vc2);
    n0store.add(Pair.create(k6, v6ConflictEarly));
    n1store.add(Pair.create(k6, v6ConflictEarly));
    n2store.add(Pair.create(k6, v6ConflictEarly));
    n3store.add(Pair.create(k6, v6ConflictEarly));

    // insert K4,K5 into four nodes
    Versioned<byte[]> v5 = new Versioned<byte[]>(value, vc1);
    Versioned<byte[]> v4 = new Versioned<byte[]>(value, vc1);
    n0store.add(Pair.create(k5, v5));
    n1store.add(Pair.create(k5, v5));
    n2store.add(Pair.create(k5, v5));
    n3store.add(Pair.create(k5, v5));
    n0store.add(Pair.create(k4, v4));
    n1store.add(Pair.create(k4, v4));
    n2store.add(Pair.create(k4, v4));
    n3store.add(Pair.create(k4, v4));

    // insert K3 into node 0,1,2
    Versioned<byte[]> v3 = new Versioned<byte[]>(value, vc2);
    n0store.add(Pair.create(k3, v3));
    n1store.add(Pair.create(k3, v3));
    n2store.add(Pair.create(k3, v3));

    // insert K3(conflicting but latest version) into node 0,1,2,3
    Versioned<byte[]> v3ConflictLate = new Versioned<byte[]>(value, vc1);
    n0store.add(Pair.create(k3, v3ConflictLate));
    n1store.add(Pair.create(k3, v3ConflictLate));
    n2store.add(Pair.create(k3, v3ConflictLate));
    n3store.add(Pair.create(k3, v3ConflictLate));

    // insert K2 into node 0,1
    Versioned<byte[]> v2 = new Versioned<byte[]>(value, vc1);
    n0store.add(Pair.create(k2, v2));
    n1store.add(Pair.create(k2, v2));

    // insert K1 into node 0
    Versioned<byte[]> v1 = new Versioned<byte[]>(value, vc1);
    n0store.add(Pair.create(k1, v1));

    // insert K0(out of retention) into node 0,1,2
    Versioned<byte[]> v0 = new Versioned<byte[]>(value, vc3);
    n0store.add(Pair.create(k0, v0));
    n1store.add(Pair.create(k0, v0));
    n2store.add(Pair.create(k0, v0));

    // stream to store
    adminClient.streamingOps.updateEntries(0, STORE_NAME, n0store.iterator(), null);
    adminClient.streamingOps.updateEntries(1, STORE_NAME, n1store.iterator(), null);
    adminClient.streamingOps.updateEntries(2, STORE_NAME, n2store.iterator(), null);
    adminClient.streamingOps.updateEntries(3, STORE_NAME, n3store.iterator(), null);

    // should have FULL:2(K4,K5), LATEST_CONSISTENT:1(K3),
    // INCONSISTENT:2(K6,K2), ignored(K1,K0)
    List<String> urls = new ArrayList<String>();
    urls.add(bootstrapUrl);
    ConsistencyCheck.ComparisonType[] comparisonTypes = ConsistencyCheck.ComparisonType.values();

    for (ConsistencyCheck.ComparisonType type : comparisonTypes) {
      StringWriter sw = new StringWriter();
      ConsistencyCheck checker = new ConsistencyCheck(urls, STORE_NAME, 0, sw, type);
      Reporter reporter = null;
      checker.connect();
      reporter = checker.execute();

      assertEquals(7 - 2, reporter.numTotalKeys);
      assertEquals(3, reporter.numGoodKeys);
    }

    for (VoldemortServer vs : servers) {
      vs.stop();
    }
  }
  public void run() {

    // don't try to run slop pusher job when rebalancing
    if (metadataStore
        .getServerState()
        .equals(MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER)) {
      logger.error("Cannot run slop pusher job since Voldemort server is rebalancing");
      return;
    }

    boolean terminatedEarly = false;
    Date startTime = new Date();
    logger.info("Started streaming slop pusher job at " + startTime);

    SlopStorageEngine slopStorageEngine = storeRepo.getSlopStore();
    ClosableIterator<Pair<ByteArray, Versioned<Slop>>> iterator = null;

    if (adminClient == null) {
      adminClient =
          new AdminClient(
              cluster,
              new AdminClientConfig()
                  .setMaxThreads(cluster.getNumberOfNodes())
                  .setMaxConnectionsPerNode(1));
    }

    if (voldemortConfig.getSlopZonesDownToTerminate() > 0) {
      // Populating the zone mapping for early termination
      zoneMapping.clear();
      for (Node n : cluster.getNodes()) {
        if (failureDetector.isAvailable(n)) {
          Set<Integer> nodes = zoneMapping.get(n.getZoneId());
          if (nodes == null) {
            nodes = Sets.newHashSet();
            zoneMapping.put(n.getZoneId(), nodes);
          }
          nodes.add(n.getId());
        }
      }

      // Check how many zones are down
      int zonesDown = 0;
      for (Zone zone : cluster.getZones()) {
        if (zoneMapping.get(zone.getId()) == null || zoneMapping.get(zone.getId()).size() == 0)
          zonesDown++;
      }

      // Terminate early
      if (voldemortConfig.getSlopZonesDownToTerminate() <= zoneMapping.size()
          && zonesDown >= voldemortConfig.getSlopZonesDownToTerminate()) {
        logger.info(
            "Completed streaming slop pusher job at "
                + startTime
                + " early because "
                + zonesDown
                + " zones are down");
        stopAdminClient();
        return;
      }
    }

    // Clearing the statistics
    AtomicLong attemptedPushes = new AtomicLong(0);
    for (Node node : cluster.getNodes()) {
      attemptedByNode.put(node.getId(), 0L);
      succeededByNode.put(node.getId(), 0L);
    }

    acquireRepairPermit();
    try {
      StorageEngine<ByteArray, Slop, byte[]> slopStore = slopStorageEngine.asSlopStore();
      iterator = slopStore.entries();

      while (iterator.hasNext()) {
        Pair<ByteArray, Versioned<Slop>> keyAndVal;
        try {
          keyAndVal = iterator.next();
          Versioned<Slop> versioned = keyAndVal.getSecond();

          // Retrieve the node
          int nodeId = versioned.getValue().getNodeId();
          Node node = cluster.getNodeById(nodeId);

          attemptedPushes.incrementAndGet();
          Long attempted = attemptedByNode.get(nodeId);
          attemptedByNode.put(nodeId, attempted + 1L);
          if (attemptedPushes.get() % 10000 == 0)
            logger.info("Attempted pushing " + attemptedPushes + " slops");

          if (logger.isTraceEnabled())
            logger.trace(
                "Pushing slop for "
                    + versioned.getValue().getNodeId()
                    + " and store  "
                    + versioned.getValue().getStoreName());

          if (failureDetector.isAvailable(node)) {
            SynchronousQueue<Versioned<Slop>> slopQueue = slopQueues.get(nodeId);
            if (slopQueue == null) {
              // No previous slop queue, add one
              slopQueue = new SynchronousQueue<Versioned<Slop>>();
              slopQueues.put(nodeId, slopQueue);
              consumerResults.add(
                  consumerExecutor.submit(new SlopConsumer(nodeId, slopQueue, slopStorageEngine)));
            }
            boolean offered =
                slopQueue.offer(
                    versioned, voldemortConfig.getClientRoutingTimeoutMs(), TimeUnit.MILLISECONDS);
            if (!offered) {
              if (logger.isDebugEnabled())
                logger.debug(
                    "No consumer appeared for slop in "
                        + voldemortConfig.getClientConnectionTimeoutMs()
                        + " ms");
            }
            readThrottler.maybeThrottle(nBytesRead(keyAndVal));
          } else {
            logger.trace(node + " declared down, won't push slop");
          }
        } catch (RejectedExecutionException e) {
          throw new VoldemortException("Ran out of threads in executor", e);
        }
      }

    } catch (InterruptedException e) {
      logger.warn("Interrupted exception", e);
      terminatedEarly = true;
    } catch (Exception e) {
      logger.error(e, e);
      terminatedEarly = true;
    } finally {
      try {
        if (iterator != null) iterator.close();
      } catch (Exception e) {
        logger.warn("Failed to close iterator cleanly as database might be closed", e);
      }

      // Adding the poison pill
      for (SynchronousQueue<Versioned<Slop>> slopQueue : slopQueues.values()) {
        try {
          slopQueue.put(END);
        } catch (InterruptedException e) {
          logger.warn("Error putting poison pill", e);
        }
      }

      for (Future result : consumerResults) {
        try {
          result.get();
        } catch (Exception e) {
          logger.warn("Exception in consumer", e);
        }
      }

      // Only if exception didn't take place do we update the counts
      if (!terminatedEarly) {
        Map<Integer, Long> outstanding =
            Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes());
        for (int nodeId : succeededByNode.keySet()) {
          logger.info(
              "Slops to node "
                  + nodeId
                  + " - Succeeded - "
                  + succeededByNode.get(nodeId)
                  + " - Attempted - "
                  + attemptedByNode.get(nodeId));
          outstanding.put(nodeId, attemptedByNode.get(nodeId) - succeededByNode.get(nodeId));
        }
        slopStorageEngine.resetStats(outstanding);
        logger.info("Completed streaming slop pusher job which started at " + startTime);
      } else {
        for (int nodeId : succeededByNode.keySet()) {
          logger.info(
              "Slops to node "
                  + nodeId
                  + " - Succeeded - "
                  + succeededByNode.get(nodeId)
                  + " - Attempted - "
                  + attemptedByNode.get(nodeId));
        }
        logger.info("Completed early streaming slop pusher job which started at " + startTime);
      }

      // Shut down admin client as not to waste connections
      consumerResults.clear();
      slopQueues.clear();
      stopAdminClient();
      this.repairPermits.release();
    }
  }