public void put(ByteArray key, Versioned<byte[]> value) throws PersistenceFailureException {
    StoreUtils.assertValidKey(key);
    boolean doCommit = false;
    Connection conn = null;
    PreparedStatement insert = null;
    PreparedStatement select = null;
    ResultSet results = null;
    String insertSql = "insert into " + name + " (key_, version_, value_) values (?, ?, ?)";
    String selectSql = "select key_, version_ from " + name + " where key_ = ?";
    try {
      conn = datasource.getConnection();
      conn.setAutoCommit(false);

      // check for superior versions
      select = conn.prepareStatement(selectSql);
      select.setBytes(1, key.get());
      results = select.executeQuery();
      while (results.next()) {
        byte[] thisKey = results.getBytes("key_");
        VectorClock version = new VectorClock(results.getBytes("version_"));
        Occured occured = value.getVersion().compare(version);
        if (occured == Occured.BEFORE)
          throw new ObsoleteVersionException(
              "Attempt to put version "
                  + value.getVersion()
                  + " which is superceeded by "
                  + version
                  + ".");
        else if (occured == Occured.AFTER) delete(conn, thisKey, version.toBytes());
      }

      // Okay, cool, now put the value
      insert = conn.prepareStatement(insertSql);
      insert.setBytes(1, key.get());
      VectorClock clock = (VectorClock) value.getVersion();
      insert.setBytes(2, clock.toBytes());
      insert.setBytes(3, value.getValue());
      insert.executeUpdate();
      doCommit = true;
    } catch (SQLException e) {
      if (e.getErrorCode() == MYSQL_ERR_DUP_KEY || e.getErrorCode() == MYSQL_ERR_DUP_ENTRY) {
        throw new ObsoleteVersionException("Key or value already used.");
      } else {
        throw new PersistenceFailureException("Fix me!", e);
      }
    } finally {
      if (conn != null) {
        try {
          if (doCommit) conn.commit();
          else conn.rollback();
        } catch (SQLException e) {
        }
      }
      tryClose(results);
      tryClose(insert);
      tryClose(select);
      tryClose(conn);
    }
  }
  @Override
  @Test
  public void testFetchedEqualsPut() throws Exception {
    System.out.println("                    Testing Fetchhed equals put                    ");
    ByteArray key = getKey();
    Store<ByteArray, byte[], byte[]> store = getStore();
    VectorClock clock = getClock(1, 1, 2, 3, 3, 4);
    byte[] value = getValue();
    System.out.println("Value chosen : " + value);
    List<Versioned<byte[]>> resultList = store.get(key, null);
    assertNotNull("Null result list obtained from a get request", resultList);
    assertEquals("Store not empty at start!", 0, resultList.size());
    Versioned<byte[]> versioned = new Versioned<byte[]>(value, clock);
    store.put(key, versioned, null);

    List<Versioned<byte[]>> found = store.get(key, null);
    assertEquals("Should only be one version stored.", 1, found.size());

    System.out.println("individual bytes");
    System.out.println("input");
    printBytes(versioned.getValue());

    System.out.println("found");
    printBytes(found.get(0).getValue());
    assertTrue("Values not equal!", valuesEqual(versioned.getValue(), found.get(0).getValue()));
  }
示例#3
0
  /**
   * convert Object to String depending on key.
   *
   * <p>StoreRepository takes only StorageEngine<ByteArray,byte[]> and for persistence on disk we
   * need to convert them to String.<br>
   *
   * @param key
   * @param value
   * @return
   */
  private Versioned<Object> convertStringToObject(String key, Versioned<String> value) {
    Object valueObject = null;

    if (CLUSTER_KEY.equals(key)) {
      valueObject = clusterMapper.readCluster(new StringReader(value.getValue()));
    } else if (STORES_KEY.equals(key)) {
      valueObject = storeMapper.readStoreList(new StringReader(value.getValue()));
    } else if (SERVER_STATE_KEY.equals(key) || CLUSTER_STATE_KEY.equals(key)) {
      valueObject = VoldemortState.valueOf(value.getValue());
    } else if (NODE_ID_KEY.equals(key)) {
      valueObject = Integer.parseInt(value.getValue());
    } else if (REBALANCING_STEAL_INFO.equals(key)) {
      String valueString = value.getValue();
      if (valueString.startsWith("[")) {
        valueObject = RebalancerState.create(valueString);
      } else {
        valueObject =
            new RebalancerState(Arrays.asList(RebalancePartitionsInfo.create(valueString)));
      }
    } else {
      throw new VoldemortException(
          "Unhandled key:'" + key + "' for String to Object serialization.");
    }

    return new Versioned<Object>(valueObject, value.getVersion());
  }
    @Override
    protected Versioned<Slop> computeNext() {
      try {
        Versioned<Slop> head = null;
        if (!shutDown) {
          head = slopQueue.take();
          if (head.equals(END)) {
            shutDown = true;
            isComplete = true;
          } else {
            slopsDone++;
            if (slopsDone % voldemortConfig.getSlopBatchSize() == 0) {
              shutDown = true;
            }

            writeThrottler.maybeThrottle(writtenLast);
            writtenLast = slopSize(head);
            deleteBatch.add(Pair.create(head.getValue().makeKey(), head.getVersion()));
            return head;
          }
        }
        return endOfData();
      } catch (Exception e) {
        logger.error("Got an exception " + e);
        return endOfData();
      }
    }
示例#5
0
  /**
   * @param keyBytes: keyName strings serialized as bytes eg. 'cluster.xml'
   * @return List of values (only 1 for Metadata) versioned byte[] eg. UTF bytes for cluster xml
   *     definitions
   * @throws VoldemortException
   */
  public List<Versioned<byte[]>> get(ByteArray keyBytes) throws VoldemortException {
    try {
      String key = ByteUtils.getString(keyBytes.get(), "UTF-8");

      if (METADATA_KEYS.contains(key)) {
        List<Versioned<byte[]>> values = Lists.newArrayList();

        // Get the cached value and convert to string
        Versioned<String> value = convertObjectToString(key, metadataCache.get(key));

        values.add(
            new Versioned<byte[]>(
                ByteUtils.getBytes(value.getValue(), "UTF-8"), value.getVersion()));

        return values;
      } else {
        throw new VoldemortException("Unhandled Key:" + key + " for MetadataStore get()");
      }
    } catch (Exception e) {
      throw new VoldemortException(
          "Failed to read metadata key:"
              + ByteUtils.getString(keyBytes.get(), "UTF-8")
              + " delete config/.temp config/.version directories and restart.",
          e);
    }
  }
示例#6
0
 public VoldemortOperation makePutOperation(String key, Versioned<byte[]> versioned) {
   return new VoldemortOperation(
       VoldemortOpCode.PUT_OP_CODE,
       key,
       versioned.getValue(),
       (VectorClock) versioned.getVersion());
 }
示例#7
0
  @Override
  @Test
  public void testPutVersioned() {
    VectorClock vc = new VectorClock();
    vc.incrementVersion(this.nodeId, System.currentTimeMillis());
    VectorClock initialVC = vc.clone();

    client.put("k", new Versioned<String>("v", vc));
    Versioned<String> v = client.get("k");
    assertEquals("GET should return the version set by PUT.", "v", v.getValue());

    VectorClock expected = initialVC.clone();
    expected.incrementVersion(this.nodeId, System.currentTimeMillis());
    assertEquals(
        "The version should be incremented after a put.",
        expected.getEntries(),
        ((VectorClock) v.getVersion()).getEntries());
    try {
      client.put("k", new Versioned<String>("v", initialVC));
      fail("Put of obsolete version should throw exception.");
    } catch (ObsoleteVersionException e) {
      // this is good
    }
    // PUT of a concurrent version should succeed
    client.put(
        "k",
        new Versioned<String>(
            "v2", new VectorClock().incremented(nodeId + 1, time.getMilliseconds())));
    assertEquals("GET should return the new value set by PUT.", "v2", client.getValue("k"));
    assertEquals(
        "GET should return the new version set by PUT.",
        expected.incremented(nodeId + 1, time.getMilliseconds()),
        client.get("k").getVersion());
  }
示例#8
0
 @Override
 public V getValue(K key, V defaultValue) {
   Versioned<V> retVal = get(key);
   if (retVal == null) {
     return defaultValue;
   } else {
     return retVal.getValue();
   }
 }
 private static void printVersioned(Versioned<Object> v) {
   if (v == null) {
     System.out.println("null");
   } else {
     System.out.print(v.getVersion());
     System.out.print(": ");
     printObject(v.getValue());
     System.out.println();
   }
 }
示例#10
0
  private void checkValues(Versioned<byte[]> value, List<Versioned<byte[]>> list, ByteArray key) {
    assertEquals("should return exactly one value ", 1, list.size());

    assertEquals(
        "should return the last saved version", value.getVersion(), list.get(0).getVersion());
    assertEquals(
        "should return the last saved value (key:" + ByteUtils.getString(key.get(), "UTF-8") + ")",
        new String(value.getValue()),
        new String(list.get(0).getValue()));
  }
示例#11
0
  /**
   * A write through put to inner-store.
   *
   * @param keyBytes: keyName strings serialized as bytes eg. 'cluster.xml'
   * @param valueBytes: versioned byte[] eg. UTF bytes for cluster xml definitions
   * @throws VoldemortException
   */
  public synchronized void put(ByteArray keyBytes, Versioned<byte[]> valueBytes)
      throws VoldemortException {
    String key = ByteUtils.getString(keyBytes.get(), "UTF-8");
    Versioned<String> value =
        new Versioned<String>(
            ByteUtils.getString(valueBytes.getValue(), "UTF-8"), valueBytes.getVersion());

    Versioned<Object> valueObject = convertStringToObject(key, value);

    this.put(key, valueObject);
  }
示例#12
0
  /**
   * helper function to convert strings to bytes as needed.
   *
   * @param key
   * @param value
   */
  @SuppressWarnings("unchecked")
  public void put(String key, Versioned<Object> value) {
    if (METADATA_KEYS.contains(key)) {

      // try inserting into inner store first
      putInner(key, convertObjectToString(key, value));

      // cache all keys if innerStore put succeeded
      metadataCache.put(key, value);

      // do special stuff if needed
      if (CLUSTER_KEY.equals(key)) {
        updateRoutingStrategies((Cluster) value.getValue(), getStoreDefList());
      } else if (STORES_KEY.equals(key)) {
        updateRoutingStrategies(getCluster(), (List<StoreDefinition>) value.getValue());
      }

    } else {
      throw new VoldemortException("Unhandled Key:" + key + " for MetadataStore put()");
    }
  }
  @Test
  public void testGetWithBinaryData() throws Exception {
    Store<ByteArray, byte[], byte[]> store = getStore();

    byte[] allPossibleBytes = getAllPossibleBytes();
    ByteArray key = new ByteArray(allPossibleBytes);
    VectorClock vc = getClock(0, 0);
    Versioned<byte[]> versioned = new Versioned<byte[]>(allPossibleBytes, vc);
    store.put(key, versioned, null);

    List<Versioned<byte[]>> found = store.get(key, null);
    assertEquals("Should only be one version stored.", 1, found.size());

    System.out.println("individual bytes");
    System.out.println("input");
    printBytes(versioned.getValue());

    System.out.println("found");
    printBytes(found.get(0).getValue());
    assertTrue("Values not equal!", valuesEqual(versioned.getValue(), found.get(0).getValue()));
  }
示例#14
0
  /**
   * Converts Object to byte[] depending on the key
   *
   * <p>StoreRepository takes only StorageEngine<ByteArray,byte[]> and for persistence on disk we
   * need to convert them to String.<br>
   *
   * @param key
   * @param value
   * @return
   */
  @SuppressWarnings("unchecked")
  private Versioned<String> convertObjectToString(String key, Versioned<Object> value) {
    String valueStr = value.getValue().toString();

    if (CLUSTER_KEY.equals(key)) {
      valueStr = clusterMapper.writeCluster((Cluster) value.getValue());
    } else if (STORES_KEY.equals(key)) {
      valueStr = storeMapper.writeStoreList((List<StoreDefinition>) value.getValue());
    } else if (REBALANCING_STEAL_INFO.equals(key)) {
      RebalancerState rebalancerState = (RebalancerState) value.getValue();
      valueStr = rebalancerState.toJsonString();
    } else if (SERVER_STATE_KEY.equals(key)
        || CLUSTER_STATE_KEY.equals(key)
        || NODE_ID_KEY.equals(key)) {
      valueStr = value.getValue().toString();
    } else {
      throw new VoldemortException(
          "Unhandled key:'" + key + "' for Object to String serialization.");
    }

    return new Versioned<String>(valueStr, value.getVersion());
  }
  public synchronized void put(String key, Versioned<String> value) throws VoldemortException {
    StoreUtils.assertValidKey(key);

    if (null == value.getValue()) {
      throw new VoldemortException("metadata cannot be null !!");
    }
    // Check for obsolete version
    File[] files = getDirectory(key).listFiles();
    for (File file : files) {
      if (file.getName().equals(key)) {
        VectorClock clock = readVersion(key);
        if (value.getVersion().compare(clock) == Occured.AFTER) {
          // continue
        } else if (value.getVersion().compare(clock) == Occured.BEFORE) {
          throw new ObsoleteVersionException(
              "A successor version "
                  + clock
                  + "  to this "
                  + value.getVersion()
                  + " exists for key "
                  + key);
        } else if (value.getVersion().compare(clock) == Occured.CONCURRENTLY) {
          throw new ObsoleteVersionException("Concurrent Operation not allowed on Metadata.");
        }
      }
    }

    File keyFile = new File(getDirectory(key), key);
    VectorClock newClock = (VectorClock) value.getVersion();
    if (!keyFile.exists() || keyFile.delete()) {
      try {
        FileUtils.writeStringToFile(keyFile, value.getValue(), "UTF-8");
        writeVersion(key, newClock);
      } catch (IOException e) {
        throw new VoldemortException(e);
      }
    }
  }
示例#16
0
  public static void main(String[] args) {

    String bootstrapUrl = "tcp://localhost:6666";
    StoreClientFactory factory =
        new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(bootstrapUrl));

    // create a client that executes operations on a single store
    StoreClient store = factory.getStoreClient("kevoree");

    store.put("pompier1", "capteurs");

    Versioned<String> data = store.get("pompier1");

    System.out.println(data.getValue() + " " + data.getVersion());
    store.delete("pompier1", data.getVersion());
  }
 /**
  * Returns the approximate size of slop to help in throttling
  *
  * @param slopVersioned The versioned slop whose size we want
  * @return Size in bytes
  */
 private int slopSize(Versioned<Slop> slopVersioned) {
   int nBytes = 0;
   Slop slop = slopVersioned.getValue();
   nBytes += slop.getKey().length();
   nBytes += ((VectorClock) slopVersioned.getVersion()).sizeInBytes();
   switch (slop.getOperation()) {
     case PUT:
       {
         nBytes += slop.getValue().length;
         break;
       }
     case DELETE:
       {
         break;
       }
     default:
       logger.error("Unknown slop operation: " + slop.getOperation());
   }
   return nBytes;
 }
示例#18
0
  public void simple() {

    StoreClient<String, Doc> client = _factory.getStoreClient("test");

    Versioned<Doc> v = client.get("key");

    if (v == null) {
      Doc d = new Doc("name", "geir");
      d.add("x", 1);

      v = new Versioned<Doc>(d);
    }

    // update the value
    client.put("key", v);

    v = client.get("key");

    System.out.println("value : " + v.getValue());
    System.out.println("clock : " + v.getVersion());
  }
示例#19
0
 public void put(ByteArray key, Versioned<byte[]> versioned) throws VoldemortException {
   StoreUtils.assertValidKey(key);
   SocketAndStreams sands = pool.checkout(destination);
   try {
     requestFormat.writePutRequest(
         sands.getOutputStream(),
         name,
         key,
         versioned.getValue(),
         (VectorClock) versioned.getVersion(),
         reroute);
     sands.getOutputStream().flush();
     requestFormat.readPutResponse(sands.getInputStream());
   } catch (IOException e) {
     close(sands.getSocket());
     throw new UnreachableStoreException(
         "Failure in put on " + destination + ": " + e.getMessage(), e);
   } finally {
     pool.checkin(destination, sands);
   }
 }
 static int valueSize(Versioned<byte[]> value) {
   return value.getValue().length + ((VectorClock) value.getVersion()).sizeInBytes() + 1;
 }
示例#21
0
  public void put(
      ByteArray key, Versioned<byte[]> versioned, byte[] transforms, long putOpTimeoutInMs)
      throws VoldemortException {

    long startTimeMs = -1;
    long startTimeNs = -1;

    if (logger.isDebugEnabled()) {
      startTimeMs = System.currentTimeMillis();
      startTimeNs = System.nanoTime();
    }

    StoreUtils.assertValidKey(key);
    PutPipelineData pipelineData = new PutPipelineData();
    if (zoneRoutingEnabled) pipelineData.setZonesRequired(storeDef.getZoneCountWrites());
    else pipelineData.setZonesRequired(null);
    pipelineData.setStartTimeNs(System.nanoTime());
    pipelineData.setStoreName(getName());
    pipelineData.setStats(stats);

    Pipeline pipeline = new Pipeline(Operation.PUT, putOpTimeoutInMs, TimeUnit.MILLISECONDS);
    pipeline.setEnableHintedHandoff(isHintedHandoffEnabled());

    HintedHandoff hintedHandoff = null;

    // Get the correct type of configure nodes action depending on the store
    // requirements
    AbstractConfigureNodes<ByteArray, Void, PutPipelineData> configureNodes =
        makeNodeConfigurationForPut(pipelineData, key);

    if (isHintedHandoffEnabled())
      hintedHandoff =
          new HintedHandoff(
              failureDetector,
              slopStores,
              nonblockingSlopStores,
              handoffStrategy,
              pipelineData.getFailedNodes(),
              putOpTimeoutInMs);

    pipeline.addEventAction(Event.STARTED, configureNodes);

    pipeline.addEventAction(
        Event.CONFIGURED,
        new PerformSerialPutRequests(
            pipelineData,
            isHintedHandoffEnabled() ? Event.RESPONSES_RECEIVED : Event.COMPLETED,
            key,
            transforms,
            failureDetector,
            innerStores,
            storeDef.getRequiredWrites(),
            versioned,
            time,
            Event.MASTER_DETERMINED));
    pipeline.addEventAction(
        Event.MASTER_DETERMINED,
        new PerformParallelPutRequests(
            pipelineData,
            Event.RESPONSES_RECEIVED,
            key,
            transforms,
            failureDetector,
            storeDef.getPreferredWrites(),
            storeDef.getRequiredWrites(),
            putOpTimeoutInMs,
            nonblockingStores,
            hintedHandoff));
    if (isHintedHandoffEnabled()) {
      pipeline.addEventAction(
          Event.ABORTED,
          new PerformPutHintedHandoff(
              pipelineData, Event.ERROR, key, versioned, transforms, hintedHandoff, time));
      pipeline.addEventAction(
          Event.RESPONSES_RECEIVED,
          new PerformPutHintedHandoff(
              pipelineData,
              Event.HANDOFF_FINISHED,
              key,
              versioned,
              transforms,
              hintedHandoff,
              time));
      pipeline.addEventAction(
          Event.HANDOFF_FINISHED,
          new IncrementClock(pipelineData, Event.COMPLETED, versioned, time));
    } else
      pipeline.addEventAction(
          Event.RESPONSES_RECEIVED,
          new IncrementClock(pipelineData, Event.COMPLETED, versioned, time));

    pipeline.addEvent(Event.STARTED);
    if (logger.isDebugEnabled()) {
      logger.debug(
          "Operation "
              + pipeline.getOperation().getSimpleName()
              + " Key "
              + ByteUtils.toHexString(key.get()));
    }
    try {
      pipeline.execute();
    } catch (VoldemortException e) {
      stats.reportException(e);
      throw e;
    }

    if (logger.isDebugEnabled()) {
      logger.debug(
          "Finished "
              + pipeline.getOperation().getSimpleName()
              + " for key "
              + ByteUtils.toHexString(key.get())
              + " keyRef: "
              + System.identityHashCode(key)
              + "; started at "
              + startTimeMs
              + " took "
              + (System.nanoTime() - startTimeNs)
              + " value: "
              + versioned.getValue()
              + " (size: "
              + versioned.getValue().length
              + ")");
    }

    if (pipelineData.getFatalError() != null) throw pipelineData.getFatalError();
  }
示例#22
0
  /**
   * Sends nested multipart response. Outer multipart wraps all the keys requested. Each key has a
   * separate multipart for the versioned values.
   */
  @Override
  public void sendResponse(StoreStats performanceStats, boolean isFromLocalZone, long startTimeInMs)
      throws Exception {

    // multiPartKeys is the outer multipart
    MimeMultipart multiPartKeys = new MimeMultipart();
    ByteArrayOutputStream keysOutputStream = new ByteArrayOutputStream();

    for (Entry<ByteArray, List<Versioned<byte[]>>> entry : versionedResponses.entrySet()) {
      ByteArray key = entry.getKey();
      String contentLocationKey =
          "/" + this.storeName + "/" + new String(Base64.encodeBase64(key.get()));

      // Create the individual body part - for each key requested
      MimeBodyPart keyBody = new MimeBodyPart();
      try {
        // Add the right headers
        keyBody.addHeader(CONTENT_TYPE, "application/octet-stream");
        keyBody.addHeader(CONTENT_TRANSFER_ENCODING, "binary");
        keyBody.addHeader(CONTENT_LOCATION, contentLocationKey);
      } catch (MessagingException me) {
        logger.error("Exception while constructing key body headers", me);
        keysOutputStream.close();
        throw me;
      }
      // multiPartValues is the inner multipart
      MimeMultipart multiPartValues = new MimeMultipart();
      for (Versioned<byte[]> versionedValue : entry.getValue()) {

        byte[] responseValue = versionedValue.getValue();

        VectorClock vectorClock = (VectorClock) versionedValue.getVersion();
        String eTag = RestUtils.getSerializedVectorClock(vectorClock);

        // Create the individual body part - for each versioned value of
        // a key
        MimeBodyPart valueBody = new MimeBodyPart();
        try {
          // Add the right headers
          valueBody.addHeader(CONTENT_TYPE, "application/octet-stream");
          valueBody.addHeader(CONTENT_TRANSFER_ENCODING, "binary");
          valueBody.addHeader(RestMessageHeaders.X_VOLD_VECTOR_CLOCK, eTag);
          valueBody.setContent(responseValue, "application/octet-stream");

          multiPartValues.addBodyPart(valueBody);
        } catch (MessagingException me) {
          logger.error("Exception while constructing value body part", me);
          keysOutputStream.close();
          throw me;
        }
      }
      try {
        // Add the inner multipart as the content of the outer body part
        keyBody.setContent(multiPartValues);
        multiPartKeys.addBodyPart(keyBody);
      } catch (MessagingException me) {
        logger.error("Exception while constructing key body part", me);
        keysOutputStream.close();
        throw me;
      }
    }
    try {
      multiPartKeys.writeTo(keysOutputStream);
    } catch (Exception e) {
      logger.error("Exception while writing mutipart to output stream", e);
      throw e;
    }

    ChannelBuffer responseContent = ChannelBuffers.dynamicBuffer();
    responseContent.writeBytes(keysOutputStream.toByteArray());

    // Create the Response object
    HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);

    // Set the right headers
    response.setHeader(CONTENT_TYPE, "multipart/binary");
    response.setHeader(CONTENT_TRANSFER_ENCODING, "binary");

    // Copy the data into the payload
    response.setContent(responseContent);
    response.setHeader(CONTENT_LENGTH, response.getContent().readableBytes());

    // Write the response to the Netty Channel
    this.messageEvent.getChannel().write(response);

    if (performanceStats != null && isFromLocalZone) {
      recordStats(performanceStats, startTimeInMs, Tracked.GET_ALL);
    }

    keysOutputStream.close();
  }
  public void run() {

    // don't try to run slop pusher job when rebalancing
    if (metadataStore
        .getServerState()
        .equals(MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER)) {
      logger.error("Cannot run slop pusher job since Voldemort server is rebalancing");
      return;
    }

    boolean terminatedEarly = false;
    Date startTime = new Date();
    logger.info("Started streaming slop pusher job at " + startTime);

    SlopStorageEngine slopStorageEngine = storeRepo.getSlopStore();
    ClosableIterator<Pair<ByteArray, Versioned<Slop>>> iterator = null;

    if (adminClient == null) {
      adminClient =
          new AdminClient(
              cluster,
              new AdminClientConfig()
                  .setMaxThreads(cluster.getNumberOfNodes())
                  .setMaxConnectionsPerNode(1));
    }

    if (voldemortConfig.getSlopZonesDownToTerminate() > 0) {
      // Populating the zone mapping for early termination
      zoneMapping.clear();
      for (Node n : cluster.getNodes()) {
        if (failureDetector.isAvailable(n)) {
          Set<Integer> nodes = zoneMapping.get(n.getZoneId());
          if (nodes == null) {
            nodes = Sets.newHashSet();
            zoneMapping.put(n.getZoneId(), nodes);
          }
          nodes.add(n.getId());
        }
      }

      // Check how many zones are down
      int zonesDown = 0;
      for (Zone zone : cluster.getZones()) {
        if (zoneMapping.get(zone.getId()) == null || zoneMapping.get(zone.getId()).size() == 0)
          zonesDown++;
      }

      // Terminate early
      if (voldemortConfig.getSlopZonesDownToTerminate() <= zoneMapping.size()
          && zonesDown >= voldemortConfig.getSlopZonesDownToTerminate()) {
        logger.info(
            "Completed streaming slop pusher job at "
                + startTime
                + " early because "
                + zonesDown
                + " zones are down");
        stopAdminClient();
        return;
      }
    }

    // Clearing the statistics
    AtomicLong attemptedPushes = new AtomicLong(0);
    for (Node node : cluster.getNodes()) {
      attemptedByNode.put(node.getId(), 0L);
      succeededByNode.put(node.getId(), 0L);
    }

    acquireRepairPermit();
    try {
      StorageEngine<ByteArray, Slop, byte[]> slopStore = slopStorageEngine.asSlopStore();
      iterator = slopStore.entries();

      while (iterator.hasNext()) {
        Pair<ByteArray, Versioned<Slop>> keyAndVal;
        try {
          keyAndVal = iterator.next();
          Versioned<Slop> versioned = keyAndVal.getSecond();

          // Retrieve the node
          int nodeId = versioned.getValue().getNodeId();
          Node node = cluster.getNodeById(nodeId);

          attemptedPushes.incrementAndGet();
          Long attempted = attemptedByNode.get(nodeId);
          attemptedByNode.put(nodeId, attempted + 1L);
          if (attemptedPushes.get() % 10000 == 0)
            logger.info("Attempted pushing " + attemptedPushes + " slops");

          if (logger.isTraceEnabled())
            logger.trace(
                "Pushing slop for "
                    + versioned.getValue().getNodeId()
                    + " and store  "
                    + versioned.getValue().getStoreName());

          if (failureDetector.isAvailable(node)) {
            SynchronousQueue<Versioned<Slop>> slopQueue = slopQueues.get(nodeId);
            if (slopQueue == null) {
              // No previous slop queue, add one
              slopQueue = new SynchronousQueue<Versioned<Slop>>();
              slopQueues.put(nodeId, slopQueue);
              consumerResults.add(
                  consumerExecutor.submit(new SlopConsumer(nodeId, slopQueue, slopStorageEngine)));
            }
            boolean offered =
                slopQueue.offer(
                    versioned, voldemortConfig.getClientRoutingTimeoutMs(), TimeUnit.MILLISECONDS);
            if (!offered) {
              if (logger.isDebugEnabled())
                logger.debug(
                    "No consumer appeared for slop in "
                        + voldemortConfig.getClientConnectionTimeoutMs()
                        + " ms");
            }
            readThrottler.maybeThrottle(nBytesRead(keyAndVal));
          } else {
            logger.trace(node + " declared down, won't push slop");
          }
        } catch (RejectedExecutionException e) {
          throw new VoldemortException("Ran out of threads in executor", e);
        }
      }

    } catch (InterruptedException e) {
      logger.warn("Interrupted exception", e);
      terminatedEarly = true;
    } catch (Exception e) {
      logger.error(e, e);
      terminatedEarly = true;
    } finally {
      try {
        if (iterator != null) iterator.close();
      } catch (Exception e) {
        logger.warn("Failed to close iterator cleanly as database might be closed", e);
      }

      // Adding the poison pill
      for (SynchronousQueue<Versioned<Slop>> slopQueue : slopQueues.values()) {
        try {
          slopQueue.put(END);
        } catch (InterruptedException e) {
          logger.warn("Error putting poison pill", e);
        }
      }

      for (Future result : consumerResults) {
        try {
          result.get();
        } catch (Exception e) {
          logger.warn("Exception in consumer", e);
        }
      }

      // Only if exception didn't take place do we update the counts
      if (!terminatedEarly) {
        Map<Integer, Long> outstanding =
            Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes());
        for (int nodeId : succeededByNode.keySet()) {
          logger.info(
              "Slops to node "
                  + nodeId
                  + " - Succeeded - "
                  + succeededByNode.get(nodeId)
                  + " - Attempted - "
                  + attemptedByNode.get(nodeId));
          outstanding.put(nodeId, attemptedByNode.get(nodeId) - succeededByNode.get(nodeId));
        }
        slopStorageEngine.resetStats(outstanding);
        logger.info("Completed streaming slop pusher job which started at " + startTime);
      } else {
        for (int nodeId : succeededByNode.keySet()) {
          logger.info(
              "Slops to node "
                  + nodeId
                  + " - Succeeded - "
                  + succeededByNode.get(nodeId)
                  + " - Attempted - "
                  + attemptedByNode.get(nodeId));
        }
        logger.info("Completed early streaming slop pusher job which started at " + startTime);
      }

      // Shut down admin client as not to waste connections
      consumerResults.clear();
      slopQueues.clear();
      stopAdminClient();
      this.repairPermits.release();
    }
  }
  public VAdminProto.AddStoreResponse handleAddStore(VAdminProto.AddStoreRequest request) {
    VAdminProto.AddStoreResponse.Builder response = VAdminProto.AddStoreResponse.newBuilder();

    // don't try to add a store in the middle of rebalancing
    if (metadataStore
            .getServerState()
            .equals(MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER)
        || metadataStore
            .getServerState()
            .equals(MetadataStore.VoldemortState.REBALANCING_CLUSTER)) {
      response.setError(
          ProtoUtils.encodeError(
              errorCodeMapper, new VoldemortException("Rebalancing in progress")));
      return response.build();
    }

    try {
      // adding a store requires decoding the passed in store string
      StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
      StoreDefinition def = mapper.readStore(new StringReader(request.getStoreDefinition()));

      synchronized (lock) {
        // only allow a single store to be created at a time. We'll see concurrent errors when
        // writing the
        // stores.xml file out otherwise. (see ConfigurationStorageEngine.put for details)

        if (!storeRepository.hasLocalStore(def.getName())) {
          // open the store
          storageService.openStore(def);

          // update stores list in metadata store (this also has the
          // effect of updating the stores.xml file)
          List<StoreDefinition> currentStoreDefs;
          List<Versioned<byte[]>> v = metadataStore.get(MetadataStore.STORES_KEY);

          if (((v.size() > 0) ? 1 : 0) > 0) {
            Versioned<byte[]> currentValue = v.get(0);
            currentStoreDefs =
                mapper.readStoreList(
                    new StringReader(ByteUtils.getString(currentValue.getValue(), "UTF-8")));
          } else {
            currentStoreDefs = Lists.newArrayList();
          }
          currentStoreDefs.add(def);
          try {
            metadataStore.put(MetadataStore.STORES_KEY, currentStoreDefs);
          } catch (Exception e) {
            throw new VoldemortException(e);
          }
        } else {
          throw new StoreOperationFailureException(
              String.format("Store '%s' already exists on this server", def.getName()));
        }
      }
    } catch (VoldemortException e) {
      response.setError(ProtoUtils.encodeError(errorCodeMapper, e));
      logger.error("handleAddStore failed for request(" + request.toString() + ")", e);
    }

    return response.build();
  }