/** populates a request object (pre-populated with defaults) based on a parser. */
 public static void parseRequest(TermVectorRequest termVectorRequest, XContentParser parser)
     throws IOException {
   XContentParser.Token token;
   String currentFieldName = null;
   List<String> fields = new ArrayList<>();
   while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
     if (token == XContentParser.Token.FIELD_NAME) {
       currentFieldName = parser.currentName();
     } else if (currentFieldName != null) {
       if (currentFieldName.equals("fields")) {
         if (token == XContentParser.Token.START_ARRAY) {
           while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
             fields.add(parser.text());
           }
         } else {
           throw new ElasticsearchParseException(
               "The parameter fields must be given as an array! Use syntax : \"fields\" : [\"field1\", \"field2\",...]");
         }
       } else if (currentFieldName.equals("offsets")) {
         termVectorRequest.offsets(parser.booleanValue());
       } else if (currentFieldName.equals("positions")) {
         termVectorRequest.positions(parser.booleanValue());
       } else if (currentFieldName.equals("payloads")) {
         termVectorRequest.payloads(parser.booleanValue());
       } else if (currentFieldName.equals("term_statistics")
           || currentFieldName.equals("termStatistics")) {
         termVectorRequest.termStatistics(parser.booleanValue());
       } else if (currentFieldName.equals("field_statistics")
           || currentFieldName.equals("fieldStatistics")) {
         termVectorRequest.fieldStatistics(parser.booleanValue());
       } else if ("_index"
           .equals(currentFieldName)) { // the following is important for multi request parsing.
         termVectorRequest.index = parser.text();
       } else if ("_type".equals(currentFieldName)) {
         termVectorRequest.type = parser.text();
       } else if ("_id".equals(currentFieldName)) {
         if (termVectorRequest.doc != null) {
           throw new ElasticsearchParseException(
               "Either \"id\" or \"doc\" can be specified, but not both!");
         }
         termVectorRequest.id = parser.text();
       } else if ("doc".equals(currentFieldName)) {
         if (termVectorRequest.id != null) {
           throw new ElasticsearchParseException(
               "Either \"id\" or \"doc\" can be specified, but not both!");
         }
         termVectorRequest.doc(jsonBuilder().copyCurrentStructure(parser));
       } else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
         termVectorRequest.routing = parser.text();
       } else {
         throw new ElasticsearchParseException(
             "The parameter " + currentFieldName + " is not valid for term vector request!");
       }
     }
   }
   if (fields.size() > 0) {
     String[] fieldsAsArray = new String[fields.size()];
     termVectorRequest.selectedFields(fields.toArray(fieldsAsArray));
   }
 }
 /**
  * Constructs a new term vector request for a document that will be fetch from the provided index.
  * Use {@link #type(String)} and {@link #id(String)} to specify the document to load.
  */
 public TermVectorRequest(TermVectorRequest other) {
   super(other.index());
   this.id = other.id();
   this.type = other.type();
   this.flagsEnum = other.getFlags().clone();
   this.preference = other.preference();
   this.routing = other.routing();
   if (other.selectedFields != null) {
     this.selectedFields = new HashSet<>(other.selectedFields);
   }
 }
 private AggregatedDfs getAggregatedDfs(Fields termVectorFields, TermVectorRequest request)
     throws IOException {
   DfsOnlyRequest dfsOnlyRequest =
       new DfsOnlyRequest(
           termVectorFields,
           new String[] {request.index()},
           new String[] {request.type()},
           request.selectedFields());
   DfsOnlyResponse response = dfsAction.execute(dfsOnlyRequest).actionGet();
   return response.getDfs();
 }