/**
  * The query source to execute. It is preferable to use either {@link #query(byte[])} or {@link
  * #query(org.elasticsearch.index.query.QueryBuilder)}.
  */
 @Required
 public DeleteByQueryRequest query(String querySource) {
   UnicodeUtil.UTF8Result result = Unicode.fromStringAsUtf8(querySource);
   this.querySource = result.result;
   this.querySourceOffset = 0;
   this.querySourceLength = result.length;
   this.querySourceUnsafe = true;
   return this;
 }
 @Override
 public String toString() {
   return "["
       + Arrays.toString(indices)
       + "]["
       + Arrays.toString(types)
       + "], querySource["
       + Unicode.fromBytes(querySource)
       + "]";
 }
    @SuppressWarnings({"StringEquality"})
    @Override
    public void run() {
      TermDocs termDocs = null;
      TermEnum termEnum = null;
      try {
        BloomFilter filter = BloomFilterFactory.getFilter(reader.numDocs(), 15);
        termDocs = reader.termDocs();
        termEnum = reader.terms(new Term(field));
        do {
          Term term = termEnum.term();
          if (term == null || term.field() != field) break;

          // LUCENE MONITOR: 4.0, move to use bytes!
          UnicodeUtil.UTF8Result utf8Result = Unicode.fromStringAsUtf8(term.text());
          termDocs.seek(termEnum);
          while (termDocs.next()) {
            // when traversing, make sure to ignore deleted docs, so the key->docId will be correct
            if (!reader.isDeleted(termDocs.doc())) {
              filter.add(utf8Result.result, 0, utf8Result.length);
            }
          }
        } while (termEnum.next());
        ConcurrentMap<String, BloomFilterEntry> fieldCache = cache.get(reader.getFieldCacheKey());
        if (fieldCache != null) {
          if (fieldCache.containsKey(field)) {
            BloomFilterEntry filterEntry = new BloomFilterEntry(reader.numDocs(), filter);
            filterEntry.loading.set(false);
            fieldCache.put(field, filterEntry);
          }
        }
      } catch (Exception e) {
        logger.warn("failed to load bloom filter for [{}]", e, field);
      } finally {
        try {
          if (termDocs != null) {
            termDocs.close();
          }
        } catch (IOException e) {
          // ignore
        }
        try {
          if (termEnum != null) {
            termEnum.close();
          }
        } catch (IOException e) {
          // ignore
        }
      }
    }
 private UidField.DocIdAndVersion loadCurrentVersionFromIndex(
     BloomCache bloomCache, Engine.Searcher searcher, Term uid) {
   UnicodeUtil.UTF8Result utf8 = Unicode.fromStringAsUtf8(uid.text());
   for (IndexReader reader : searcher.searcher().subReaders()) {
     BloomFilter filter = bloomCache.filter(reader, UidFieldMapper.NAME, true);
     // we know that its not there...
     if (!filter.isPresent(utf8.result, 0, utf8.length)) {
       continue;
     }
     UidField.DocIdAndVersion docIdAndVersion = UidField.loadDocIdAndVersion(reader, uid);
     // either -2 (its there, but no version associated), or an actual version
     if (docIdAndVersion.docId != -1) {
       return docIdAndVersion;
     }
   }
   return null;
 }
 public static String buildScrollId(
     String type,
     Collection<? extends SearchPhaseResult> searchPhaseResults,
     @Nullable Map<String, String> attributes)
     throws IOException {
   StringBuilder sb = new StringBuilder().append(type).append(';');
   sb.append(searchPhaseResults.size()).append(';');
   for (SearchPhaseResult searchPhaseResult : searchPhaseResults) {
     sb.append(searchPhaseResult.id())
         .append(':')
         .append(searchPhaseResult.shardTarget().nodeId())
         .append(';');
   }
   if (attributes == null) {
     sb.append("0;");
   } else {
     sb.append(attributes.size()).append(";");
     for (Map.Entry<String, String> entry : attributes.entrySet()) {
       sb.append(entry.getKey()).append(':').append(entry.getValue()).append(';');
     }
   }
   return Base64.encodeBytes(Unicode.fromStringAsBytes(sb.toString()), Base64.URL_SAFE);
 }
 public static ParsedScrollId parseScrollId(String scrollId) {
   try {
     scrollId = Unicode.fromBytes(Base64.decode(scrollId, Base64.URL_SAFE));
   } catch (IOException e) {
     throw new ElasticSearchIllegalArgumentException("Failed to decode scrollId", e);
   }
   String[] elements = Strings.splitStringToArray(scrollId, ';');
   int index = 0;
   String type = elements[index++];
   int contextSize = Integer.parseInt(elements[index++]);
   @SuppressWarnings({"unchecked"})
   Tuple<String, Long>[] context = new Tuple[contextSize];
   for (int i = 0; i < contextSize; i++) {
     String element = elements[index++];
     int sep = element.indexOf(':');
     if (sep == -1) {
       throw new ElasticSearchIllegalArgumentException("Malformed scrollId [" + scrollId + "]");
     }
     context[i] =
         new Tuple<String, Long>(
             element.substring(sep + 1), Long.parseLong(element.substring(0, sep)));
   }
   Map<String, String> attributes;
   int attributesSize = Integer.parseInt(elements[index++]);
   if (attributesSize == 0) {
     attributes = ImmutableMap.of();
   } else {
     attributes = Maps.newHashMapWithExpectedSize(attributesSize);
     for (int i = 0; i < attributesSize; i++) {
       String element = elements[index++];
       int sep = element.indexOf(':');
       attributes.put(element.substring(0, sep), element.substring(sep + 1));
     }
   }
   return new ParsedScrollId(scrollId, type, context, attributes);
 }
  @Test
  public void testBroadcastOperations() throws IOException {
    startNode("server1");

    client("server1").admin().indices().prepareCreate("test").execute().actionGet(5000);

    logger.info("Running Cluster Health");
    ClusterHealthResponse clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.status());
    assertThat(clusterHealth.timedOut(), equalTo(false));
    assertThat(clusterHealth.status(), equalTo(ClusterHealthStatus.YELLOW));

    client("server1")
        .index(indexRequest("test").type("type1").id("1").source(source("1", "test")))
        .actionGet();
    FlushResponse flushResponse =
        client("server1").admin().indices().flush(flushRequest("test")).actionGet();
    assertThat(flushResponse.totalShards(), equalTo(10));
    assertThat(flushResponse.successfulShards(), equalTo(5));
    assertThat(flushResponse.failedShards(), equalTo(0));
    client("server1")
        .index(indexRequest("test").type("type1").id("2").source(source("2", "test")))
        .actionGet();
    RefreshResponse refreshResponse =
        client("server1").admin().indices().refresh(refreshRequest("test")).actionGet();
    assertThat(refreshResponse.totalShards(), equalTo(10));
    assertThat(refreshResponse.successfulShards(), equalTo(5));
    assertThat(refreshResponse.failedShards(), equalTo(0));

    logger.info("Count");
    // check count
    for (int i = 0; i < 5; i++) {
      // test successful
      CountResponse countResponse =
          client("server1")
              .count(
                  countRequest("test")
                      .query(termQuery("_type", "type1"))
                      .operationThreading(BroadcastOperationThreading.NO_THREADS))
              .actionGet();
      assertThat(countResponse.count(), equalTo(2l));
      assertThat(countResponse.totalShards(), equalTo(5));
      assertThat(countResponse.successfulShards(), equalTo(5));
      assertThat(countResponse.failedShards(), equalTo(0));
    }

    for (int i = 0; i < 5; i++) {
      CountResponse countResponse =
          client("server1")
              .count(
                  countRequest("test")
                      .query(termQuery("_type", "type1"))
                      .operationThreading(BroadcastOperationThreading.SINGLE_THREAD))
              .actionGet();
      assertThat(countResponse.count(), equalTo(2l));
      assertThat(countResponse.totalShards(), equalTo(5));
      assertThat(countResponse.successfulShards(), equalTo(5));
      assertThat(countResponse.failedShards(), equalTo(0));
    }

    for (int i = 0; i < 5; i++) {
      CountResponse countResponse =
          client("server1")
              .count(
                  countRequest("test")
                      .query(termQuery("_type", "type1"))
                      .operationThreading(BroadcastOperationThreading.THREAD_PER_SHARD))
              .actionGet();
      assertThat(countResponse.count(), equalTo(2l));
      assertThat(countResponse.totalShards(), equalTo(5));
      assertThat(countResponse.successfulShards(), equalTo(5));
      assertThat(countResponse.failedShards(), equalTo(0));
    }

    for (int i = 0; i < 5; i++) {
      // test failed (simply query that can't be parsed)
      CountResponse countResponse =
          client("server1")
              .count(
                  countRequest("test")
                      .query(Unicode.fromStringAsBytes("{ term : { _type : \"type1 } }")))
              .actionGet();

      assertThat(countResponse.count(), equalTo(0l));
      assertThat(countResponse.totalShards(), equalTo(5));
      assertThat(countResponse.successfulShards(), equalTo(0));
      assertThat(countResponse.failedShards(), equalTo(5));
      for (ShardOperationFailedException exp : countResponse.shardFailures()) {
        assertThat(exp.reason(), containsString("QueryParsingException"));
      }
    }
  }