/**
  * Returns a randomly selected existing field name out of the fields that are contained in the
  * document provided as an argument.
  */
 public static String randomExistingFieldName(Random random, IngestDocument ingestDocument) {
   Map<String, Object> source = new TreeMap<>(ingestDocument.getSourceAndMetadata());
   Map.Entry<String, Object> randomEntry = RandomPicks.randomFrom(random, source.entrySet());
   String key = randomEntry.getKey();
   while (randomEntry.getValue() instanceof Map) {
     @SuppressWarnings("unchecked")
     Map<String, Object> map = (Map<String, Object>) randomEntry.getValue();
     Map<String, Object> treeMap = new TreeMap<>(map);
     randomEntry = RandomPicks.randomFrom(random, treeMap.entrySet());
     key += "." + randomEntry.getKey();
   }
   assert ingestDocument.getFieldValue(key, Object.class) != null;
   return key;
 }
 public void testBuiltInAnalyzersAreCached() throws IOException {
   Settings settings =
       Settings.builder()
           .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
           .build();
   Settings indexSettings =
       Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
   IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
   IndexAnalyzers indexAnalyzers =
       new AnalysisRegistry(
               new Environment(settings),
               emptyMap(),
               emptyMap(),
               emptyMap(),
               emptyMap(),
               emptyMap())
           .build(idxSettings);
   IndexAnalyzers otherIndexAnalyzers =
       new AnalysisRegistry(
               new Environment(settings),
               emptyMap(),
               emptyMap(),
               emptyMap(),
               emptyMap(),
               emptyMap())
           .build(idxSettings);
   final int numIters = randomIntBetween(5, 20);
   for (int i = 0; i < numIters; i++) {
     PreBuiltAnalyzers preBuiltAnalyzers =
         RandomPicks.randomFrom(random(), PreBuiltAnalyzers.values());
     assertSame(
         indexAnalyzers.get(preBuiltAnalyzers.name()),
         otherIndexAnalyzers.get(preBuiltAnalyzers.name()));
   }
 }
 private String randomAnalyzer() {
   while (true) {
     PreBuiltAnalyzers preBuiltAnalyzers =
         RandomPicks.randomFrom(getRandom(), PreBuiltAnalyzers.values());
     if (preBuiltAnalyzers == PreBuiltAnalyzers.SORANI
         && compatibilityVersion().before(Version.V_1_3_0)) {
       continue; // SORANI was added in 1.3.0
     }
     return preBuiltAnalyzers.name().toLowerCase(Locale.ROOT);
   }
 }
Example #4
0
 /**
  * Returns a random source containing a random number of fields, objects and array, with maximum
  * depth 5.
  *
  * @param random Random generator
  */
 public static BytesReference randomSource(Random random) {
   // the source can be stored in any format and eventually converted when retrieved depending on
   // the format of the response
   XContentType xContentType = RandomPicks.randomFrom(random, XContentType.values());
   try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType)) {
     builder.startObject();
     addFields(random, builder, 0);
     builder.endObject();
     return builder.bytes();
   } catch (IOException e) {
     throw new RuntimeException(e);
   }
 }
  public void testDuelGlobalOrdinals() throws Exception {
    Random random = getRandom();
    final int numDocs = scaledRandomIntBetween(10, 1000);
    final int numValues = scaledRandomIntBetween(10, 500);
    final String[] values = new String[numValues];
    for (int i = 0; i < numValues; ++i) {
      values[i] = new String(RandomStrings.randomAsciiOfLength(random, 10));
    }
    for (int i = 0; i < numDocs; i++) {
      Document d = new Document();
      final int numVals = randomInt(3);
      for (int j = 0; j < numVals; ++j) {
        final String value = RandomPicks.randomFrom(random, Arrays.asList(values));
        d.add(new StringField("string", value, Field.Store.NO));
        d.add(new SortedSetDocValuesField("bytes", new BytesRef(value)));
      }
      writer.addDocument(d);
      if (randomInt(10) == 0) {
        refreshReader();
      }
    }
    refreshReader();

    Map<FieldDataType, Type> typeMap = new HashMap<FieldDataType, DuelFieldDataTests.Type>();
    typeMap.put(
        new FieldDataType("string", ImmutableSettings.builder().put("format", "fst")), Type.Bytes);
    typeMap.put(
        new FieldDataType("string", ImmutableSettings.builder().put("format", "paged_bytes")),
        Type.Bytes);
    typeMap.put(
        new FieldDataType("string", ImmutableSettings.builder().put("format", "doc_values")),
        Type.Bytes);

    for (Map.Entry<FieldDataType, Type> entry : typeMap.entrySet()) {
      ifdService.clear();
      IndexOrdinalsFieldData fieldData =
          getForField(entry.getKey(), entry.getValue().name().toLowerCase(Locale.ROOT));
      RandomAccessOrds left = fieldData.load(readerContext).getOrdinalsValues();
      fieldData.clear();
      RandomAccessOrds right =
          fieldData
              .loadGlobal(topLevelReader)
              .load(topLevelReader.leaves().get(0))
              .getOrdinalsValues();
      assertEquals(left.getValueCount(), right.getValueCount());
      for (long ord = 0; ord < left.getValueCount(); ++ord) {
        assertEquals(left.lookupOrd(ord), right.lookupOrd(ord));
      }
    }
  }
 /** @return a {@link HasChildQueryBuilder} with random values all over the place */
 @Override
 protected HasChildQueryBuilder doCreateTestQueryBuilder() {
   int min = randomIntBetween(0, Integer.MAX_VALUE / 2);
   int max = randomIntBetween(min, Integer.MAX_VALUE);
   InnerHitsBuilder.InnerHit innerHit =
       new InnerHitsBuilder.InnerHit().setSize(100).addSort(STRING_FIELD_NAME, SortOrder.ASC);
   return new HasChildQueryBuilder(
       CHILD_TYPE,
       RandomQueryBuilder.createQuery(random()),
       max,
       min,
       RandomPicks.randomFrom(random(), ScoreMode.values()),
       randomBoolean() ? null : new QueryInnerHits("inner_hits_name", innerHit));
 }
 private FsDirectoryService randomDirectorService(IndexStore indexStore, ShardPath path) {
   final IndexSettings indexSettings = indexStore.getIndexSettings();
   final IndexMetaData build =
       IndexMetaData.builder(indexSettings.getIndexMetaData())
           .settings(
               Settings.builder()
                   .put(indexSettings.getSettings())
                   .put(
                       IndexStoreModule.STORE_TYPE,
                       RandomPicks.randomFrom(random, IndexStoreModule.Type.values())
                           .getSettingsKey()))
           .build();
   final IndexSettings newIndexSettings =
       new IndexSettings(build, indexSettings.getNodeSettings(), Collections.EMPTY_LIST);
   return new FsDirectoryService(newIndexSettings, indexStore, path);
 }
 private ShardRouting corruptRandomPrimaryFile(final boolean includePerCommitFiles)
     throws IOException {
   ClusterState state = client().admin().cluster().prepareState().get().getState();
   Index test = state.metaData().index("test").getIndex();
   GroupShardsIterator shardIterators =
       state.getRoutingTable().activePrimaryShardsGrouped(new String[] {"test"}, false);
   List<ShardIterator> iterators = iterableAsArrayList(shardIterators);
   ShardIterator shardIterator = RandomPicks.randomFrom(random(), iterators);
   ShardRouting shardRouting = shardIterator.nextOrNull();
   assertNotNull(shardRouting);
   assertTrue(shardRouting.primary());
   assertTrue(shardRouting.assignedToNode());
   String nodeId = shardRouting.currentNodeId();
   NodesStatsResponse nodeStatses =
       client().admin().cluster().prepareNodesStats(nodeId).setFs(true).get();
   Set<Path> files = new TreeSet<>(); // treeset makes sure iteration order is deterministic
   for (FsInfo.Path info : nodeStatses.getNodes().get(0).getFs()) {
     String path = info.getPath();
     Path file =
         PathUtils.get(path)
             .resolve("indices")
             .resolve(test.getUUID())
             .resolve(Integer.toString(shardRouting.getId()))
             .resolve("index");
     if (Files.exists(file)) { // multi data path might only have one path in use
       try (DirectoryStream<Path> stream = Files.newDirectoryStream(file)) {
         for (Path item : stream) {
           if (Files.isRegularFile(item)
               && "write.lock".equals(item.getFileName().toString()) == false) {
             if (includePerCommitFiles || isPerSegmentFile(item.getFileName().toString())) {
               files.add(item);
             }
           }
         }
       }
     }
   }
   pruneOldDeleteGenerations(files);
   CorruptionUtils.corruptFile(random(), files.toArray(new Path[0]));
   return shardRouting;
 }
 /** @return a {@link HasChildQueryBuilder} with random values all over the place */
 @Override
 protected NestedQueryBuilder doCreateTestQueryBuilder() {
   QueryBuilder innerQueryBuilder = RandomQueryBuilder.createQuery(random());
   if (randomBoolean()) {
     requiresRewrite = true;
     innerQueryBuilder = new WrapperQueryBuilder(innerQueryBuilder.toString());
   }
   NestedQueryBuilder nqb =
       new NestedQueryBuilder(
           "nested1", innerQueryBuilder, RandomPicks.randomFrom(random(), ScoreMode.values()));
   nqb.ignoreUnmapped(randomBoolean());
   if (randomBoolean()) {
     nqb.innerHit(
         new InnerHitBuilder()
             .setName(randomAsciiOfLengthBetween(1, 10))
             .setSize(randomIntBetween(0, 100))
             .addSort(new FieldSortBuilder(INT_FIELD_NAME).order(SortOrder.ASC)),
         nqb.ignoreUnmapped());
   }
   return nqb;
 }
  public void testRangeQuery() throws Exception {
    Settings indexSettings =
        Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
    IndexSettings idxSettings =
        IndexSettingsModule.newIndexSettings(randomAsciiOfLengthBetween(1, 10), indexSettings);
    QueryShardContext context =
        new QueryShardContext(
            0, idxSettings, null, null, null, null, null, null, null, null, () -> nowInMillis);
    RangeFieldMapper.RangeFieldType ft = new RangeFieldMapper.RangeFieldType(type);
    ft.setName(FIELDNAME);
    ft.setIndexOptions(IndexOptions.DOCS);

    ShapeRelation relation = RandomPicks.randomFrom(random(), ShapeRelation.values());
    boolean includeLower = random().nextBoolean();
    boolean includeUpper = random().nextBoolean();
    Object from = nextFrom();
    Object to = nextTo(from);

    assertEquals(
        getExpectedRangeQuery(relation, from, to, includeLower, includeUpper),
        ft.rangeQuery(from, to, includeLower, includeUpper, relation, context));
  }
 @Before
 public void setupProperties() {
   type = RandomPicks.randomFrom(random(), RangeType.values());
   nowInMillis = randomPositiveLong();
   if (type == RangeType.DATE) {
     addModifier(
         new Modifier("format", true) {
           @Override
           public void modify(MappedFieldType ft) {
             ((RangeFieldMapper.RangeFieldType) ft)
                 .setDateTimeFormatter(Joda.forPattern("basic_week_date", Locale.ROOT));
           }
         });
     addModifier(
         new Modifier("locale", true) {
           @Override
           public void modify(MappedFieldType ft) {
             ((RangeFieldMapper.RangeFieldType) ft)
                 .setDateTimeFormatter(Joda.forPattern("date_optional_time", Locale.CANADA));
           }
         });
   }
 }
  @Nightly
  public void testBigDocuments() throws IOException {
    // "big" as "much bigger than the chunk size"
    // for this test we force a FS dir
    // we can't just use newFSDirectory, because this test doesn't really index anything.
    // so if we get NRTCachingDir+SimpleText, we make massive stored fields and OOM (LUCENE-4484)
    Directory dir =
        new MockDirectoryWrapper(
            random(), new MMapDirectory(_TestUtil.getTempDir("testBigDocuments")));
    IndexWriterConfig iwConf =
        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
    iwConf.setMaxBufferedDocs(RandomInts.randomIntBetween(random(), 2, 30));
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf);

    if (dir instanceof MockDirectoryWrapper) {
      ((MockDirectoryWrapper) dir).setThrottling(Throttling.NEVER);
    }

    final Document emptyDoc = new Document(); // emptyDoc
    final Document bigDoc1 = new Document(); // lot of small fields
    final Document bigDoc2 = new Document(); // 1 very big field

    final Field idField = new StringField("id", "", Store.NO);
    emptyDoc.add(idField);
    bigDoc1.add(idField);
    bigDoc2.add(idField);

    final FieldType onlyStored = new FieldType(StringField.TYPE_STORED);
    onlyStored.setIndexed(false);

    final Field smallField =
        new Field("fld", randomByteArray(random().nextInt(10), 256), onlyStored);
    final int numFields = RandomInts.randomIntBetween(random(), 500000, 1000000);
    for (int i = 0; i < numFields; ++i) {
      bigDoc1.add(smallField);
    }

    final Field bigField =
        new Field(
            "fld",
            randomByteArray(RandomInts.randomIntBetween(random(), 1000000, 5000000), 2),
            onlyStored);
    bigDoc2.add(bigField);

    final int numDocs = atLeast(5);
    final Document[] docs = new Document[numDocs];
    for (int i = 0; i < numDocs; ++i) {
      docs[i] = RandomPicks.randomFrom(random(), Arrays.asList(emptyDoc, bigDoc1, bigDoc2));
    }
    for (int i = 0; i < numDocs; ++i) {
      idField.setStringValue("" + i);
      iw.addDocument(docs[i]);
      if (random().nextInt(numDocs) == 0) {
        iw.commit();
      }
    }
    iw.commit();
    iw.forceMerge(1); // look at what happens when big docs are merged
    final DirectoryReader rd = DirectoryReader.open(dir);
    final IndexSearcher searcher = new IndexSearcher(rd);
    for (int i = 0; i < numDocs; ++i) {
      final Query query = new TermQuery(new Term("id", "" + i));
      final TopDocs topDocs = searcher.search(query, 1);
      assertEquals("" + i, 1, topDocs.totalHits);
      final Document doc = rd.document(topDocs.scoreDocs[0].doc);
      assertNotNull(doc);
      final IndexableField[] fieldValues = doc.getFields("fld");
      assertEquals(docs[i].getFields("fld").length, fieldValues.length);
      if (fieldValues.length > 0) {
        assertEquals(docs[i].getFields("fld")[0].binaryValue(), fieldValues[0].binaryValue());
      }
    }
    rd.close();
    iw.close();
    dir.close();
  }
  /**
   * Test that allocates an index on one or more old nodes and then do a rolling upgrade one node
   * after another is shut down and restarted from a newer version and we verify that all documents
   * are still around after each nodes upgrade.
   */
  public void testIndexRollingUpgrade() throws Exception {
    String[] indices = new String[randomIntBetween(1, 3)];
    for (int i = 0; i < indices.length; i++) {
      indices[i] = "test" + i;
      assertAcked(
          prepareCreate(indices[i])
              .setSettings(
                  Settings.builder()
                      .put(
                          "index.routing.allocation.exclude._name",
                          backwardsCluster().newNodePattern())
                      .put(indexSettings())));
    }

    int numDocs = randomIntBetween(100, 150);
    IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
    String[] indexForDoc = new String[docs.length];
    for (int i = 0; i < numDocs; i++) {
      docs[i] =
          client()
              .prepareIndex(
                  indexForDoc[i] = RandomPicks.randomFrom(getRandom(), indices),
                  "type1",
                  String.valueOf(i))
              .setSource(
                  "field1",
                  English.intToEnglish(i),
                  "num_int",
                  randomInt(),
                  "num_double",
                  randomDouble());
    }
    indexRandom(true, docs);
    for (String index : indices) {
      assertAllShardsOnNodes(index, backwardsCluster().backwardsNodePattern());
    }
    disableAllocation(indices);
    backwardsCluster().allowOnAllNodes(indices);
    logClusterState();
    boolean upgraded;
    do {
      logClusterState();
      SearchResponse countResponse = client().prepareSearch().setSize(0).get();
      assertHitCount(countResponse, numDocs);
      assertSimpleSort("num_double", "num_int");
      upgraded = backwardsCluster().upgradeOneNode();
      ensureYellow();
      countResponse = client().prepareSearch().setSize(0).get();
      assertHitCount(countResponse, numDocs);
      for (int i = 0; i < numDocs; i++) {
        docs[i] =
            client()
                .prepareIndex(indexForDoc[i], "type1", String.valueOf(i))
                .setSource(
                    "field1",
                    English.intToEnglish(i),
                    "num_int",
                    randomInt(),
                    "num_double",
                    randomDouble());
      }
      indexRandom(true, docs);
    } while (upgraded);
    enableAllocation(indices);
    ensureYellow();
    SearchResponse countResponse = client().prepareSearch().setSize(0).get();
    assertHitCount(countResponse, numDocs);
    assertSimpleSort("num_double", "num_int");

    String[] newIndices = new String[randomIntBetween(1, 3)];

    for (int i = 0; i < newIndices.length; i++) {
      newIndices[i] = "new_index" + i;
      createIndex(newIndices[i]);
    }
    assertVersionCreated(
        Version.CURRENT, newIndices); // new indices are all created with the new version
    assertVersionCreated(compatibilityVersion(), indices);
  }