@SuppressWarnings("unchecked")
    void onShardResponse(ShardId shardId, TransportShardMultiPercolateAction.Response response) {
      logger.trace("{} Percolate shard response", shardId);
      try {
        for (TransportShardMultiPercolateAction.Response.Item item : response.items()) {
          AtomicReferenceArray shardResults = responsesByItemAndShard.get(item.slot());
          if (shardResults == null) {
            assert false : "shardResults can't be null";
            continue;
          }

          if (item.failed()) {
            shardResults.set(
                shardId.id(),
                new BroadcastShardOperationFailedException(shardId, item.error().string()));
          } else {
            shardResults.set(shardId.id(), item.response());
          }

          assert expectedOperationsPerItem.get(item.slot()).get() >= 1
              : "slot[" + item.slot() + "] can't be lower than one";
          if (expectedOperationsPerItem.get(item.slot()).decrementAndGet() == 0) {
            // Failure won't bubble up, since we fail the whole request now via the catch clause
            // below,
            // so expectedOperationsPerItem will not be decremented twice.
            reduce(item.slot());
          }
        }
      } catch (Throwable e) {
        logger.error("{} Percolate original reduce error", e, shardId);
        finalListener.onFailure(e);
      }
    }
  /**
   * Execute the given {@link IndexRequest} on a primary shard, throwing a {@link
   * RetryOnPrimaryException} if the operation needs to be re-tried.
   */
  protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(
      BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) throws Throwable {
    Engine.Index operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
    Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
    final ShardId shardId = indexShard.shardId();
    if (update != null) {
      final String indexName = shardId.getIndex();
      mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update);
      operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
      update = operation.parsedDoc().dynamicMappingsUpdate();
      if (update != null) {
        throw new RetryOnPrimaryException(
            shardId, "Dynamics mappings are not available on the node that holds the primary yet");
      }
    }
    final boolean created = indexShard.index(operation);

    // update the version on request so it will happen on the replicas
    final long version = operation.version();
    request.version(version);
    request.versionType(request.versionType().versionTypeForReplicationAndRecovery());

    assert request.versionType().validateVersionForWrites(request.version());

    return new WriteResult(
        new IndexResponse(
            shardId.getIndex(), request.type(), request.id(), request.version(), created),
        operation.getTranslogLocation());
  }
  @Override
  protected GetFieldMappingsResponse shardOperation(
      final GetFieldMappingsIndexRequest request, ShardId shardId) {
    assert shardId != null;
    IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
    Collection<String> typeIntersection;
    if (request.types().length == 0) {
      typeIntersection = indexService.mapperService().types();

    } else {
      typeIntersection =
          indexService
              .mapperService()
              .types()
              .stream()
              .filter(type -> Regex.simpleMatch(request.types(), type))
              .collect(Collectors.toCollection(ArrayList::new));
      if (typeIntersection.isEmpty()) {
        throw new TypeMissingException(shardId.getIndex(), request.types());
      }
    }

    MapBuilder<String, Map<String, FieldMappingMetaData>> typeMappings = new MapBuilder<>();
    for (String type : typeIntersection) {
      DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
      Map<String, FieldMappingMetaData> fieldMapping =
          findFieldMappingsByType(documentMapper, request);
      if (!fieldMapping.isEmpty()) {
        typeMappings.put(type, fieldMapping);
      }
    }

    return new GetFieldMappingsResponse(
        singletonMap(shardId.getIndexName(), typeMappings.immutableMap()));
  }
    private void doFinish() {
      if (finished.compareAndSet(false, true)) {
        Releasables.close(indexShardReference);
        final ShardId shardId = shardIt.shardId();
        final ActionWriteResponse.ShardInfo.Failure[] failuresArray;
        if (!shardReplicaFailures.isEmpty()) {
          int slot = 0;
          failuresArray = new ActionWriteResponse.ShardInfo.Failure[shardReplicaFailures.size()];
          for (Map.Entry<String, Throwable> entry : shardReplicaFailures.entrySet()) {
            RestStatus restStatus = ExceptionsHelper.status(entry.getValue());
            failuresArray[slot++] =
                new ActionWriteResponse.ShardInfo.Failure(
                    shardId.getIndex(),
                    shardId.getId(),
                    entry.getKey(),
                    entry.getValue(),
                    restStatus,
                    false);
          }
        } else {
          failuresArray = ActionWriteResponse.EMPTY;
        }
        finalResponse.setShardInfo(
            new ActionWriteResponse.ShardInfo(totalShards, success.get(), failuresArray));

        listener.onResponse(finalResponse);
      }
    }
  @Override
  public synchronized IndexShard createShard(int sShardId) throws ElasticSearchException {
    ShardId shardId = new ShardId(index, sShardId);
    if (shardsInjectors.containsKey(shardId.id())) {
      throw new IndexShardAlreadyExistsException(shardId + " already exists");
    }

    indicesLifecycle.beforeIndexShardCreated(shardId);

    logger.debug("creating shard_id [{}]", shardId.id());

    ModulesBuilder modules = new ModulesBuilder();
    modules.add(new ShardsPluginsModule(indexSettings, pluginsService));
    modules.add(new IndexShardModule(shardId));
    modules.add(new StoreModule(indexSettings, injector.getInstance(IndexStore.class)));
    modules.add(new DeletionPolicyModule(indexSettings));
    modules.add(new MergePolicyModule(indexSettings));
    modules.add(new MergeSchedulerModule(indexSettings));
    modules.add(new TranslogModule(indexSettings));
    modules.add(new EngineModule(indexSettings));
    modules.add(new IndexShardGatewayModule(injector.getInstance(IndexGateway.class)));

    Injector shardInjector = modules.createChildInjector(injector);

    shardsInjectors =
        newMapBuilder(shardsInjectors).put(shardId.id(), shardInjector).immutableMap();

    IndexShard indexShard = shardInjector.getInstance(IndexShard.class);

    indicesLifecycle.afterIndexShardCreated(indexShard);

    shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap();

    return indexShard;
  }
Beispiel #6
0
 /**
  * Just like {@link #getLogger(Class,
  * org.elasticsearch.common.settings.Settings,ShardId,String...)} but String loggerName instead of
  * Class.
  */
 public static ESLogger getLogger(
     String loggerName, Settings settings, ShardId shardId, String... prefixes) {
   return getLogger(
       loggerName,
       settings,
       asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes)
           .toArray(new String[0]));
 }
Beispiel #7
0
 public static ESLogger getLogger(
     Class clazz, Settings settings, ShardId shardId, String... prefixes) {
   return getLogger(
       clazz,
       settings,
       shardId.getIndex(),
       asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
 }
 /** Clears the post allocation flag for the specified shard */
 public Builder clearPostAllocationFlag(ShardId shardId) {
   assert this.index.equals(shardId.index().name());
   IndexShardRoutingTable indexShard = shards.get(shardId.id());
   shards.put(
       indexShard.shardId().id(),
       new IndexShardRoutingTable(indexShard.shardId(), indexShard.shards(), false));
   return this;
 }
 @Override
 public void writeTo(StreamOutput out) throws IOException {
   super.writeTo(out);
   if (out.getVersion().before(Version.V_1_4_0)) {
     // older nodes expect the concrete index as part of the request
     request.index(shardId.getIndex());
   }
   request.writeTo(out);
   if (out.getVersion().onOrAfter(Version.V_1_4_0)) {
     shardId.writeTo(out);
   } else {
     out.writeVInt(shardId.id());
   }
 }
  @Override
  public void readFrom(StreamInput in) throws IOException {
    shardId = ShardId.readShardId(in);
    int size = in.readVInt();
    phase1FileNames = new ArrayList<String>(size);
    for (int i = 0; i < size; i++) {
      phase1FileNames.add(in.readUTF());
    }

    size = in.readVInt();
    phase1FileSizes = new ArrayList<Long>(size);
    for (int i = 0; i < size; i++) {
      phase1FileSizes.add(in.readVLong());
    }

    size = in.readVInt();
    phase1ExistingFileNames = new ArrayList<String>(size);
    for (int i = 0; i < size; i++) {
      phase1ExistingFileNames.add(in.readUTF());
    }

    size = in.readVInt();
    phase1ExistingFileSizes = new ArrayList<Long>(size);
    for (int i = 0; i < size; i++) {
      phase1ExistingFileSizes.add(in.readVLong());
    }

    phase1TotalSize = in.readVLong();
    phase1ExistingTotalSize = in.readVLong();
  }
  @Override
  public void writeTo(StreamOutput out) throws IOException {
    shardId.writeTo(out);

    out.writeVInt(phase1FileNames.size());
    for (String phase1FileName : phase1FileNames) {
      out.writeUTF(phase1FileName);
    }

    out.writeVInt(phase1FileSizes.size());
    for (Long phase1FileSize : phase1FileSizes) {
      out.writeVLong(phase1FileSize);
    }

    out.writeVInt(phase1ExistingFileNames.size());
    for (String phase1ExistingFileName : phase1ExistingFileNames) {
      out.writeUTF(phase1ExistingFileName);
    }

    out.writeVInt(phase1ExistingFileSizes.size());
    for (Long phase1ExistingFileSize : phase1ExistingFileSizes) {
      out.writeVLong(phase1ExistingFileSize);
    }

    out.writeVLong(phase1TotalSize);
    out.writeVLong(phase1ExistingTotalSize);
  }
 @Override
 public void writeTo(StreamOutput out) throws IOException {
   super.writeTo(out);
   out.writeLong(tookInMillis);
   out.writeLong(ingestId);
   shardId.writeTo(out);
   out.writeVInt(successCount);
   out.writeVInt(quorumShards);
   out.writeVInt(actionRequests.size());
   for (ActionRequest actionRequest : actionRequests) {
     if (actionRequest == null) {
       out.writeBoolean(false);
     } else {
       out.writeBoolean(true);
       if (actionRequest instanceof IndexRequest) {
         out.writeBoolean(true);
       } else if (actionRequest instanceof DeleteRequest) {
         out.writeBoolean(false);
       } else {
         throw new ElasticsearchIllegalStateException(
             "action request not supported: " + actionRequest.getClass().getName());
       }
       actionRequest.writeTo(out);
     }
   }
   out.writeVInt(failures.size());
   for (IngestActionFailure f : failures) {
     f.writeTo(out);
   }
 }
 @Override
 public void readFrom(StreamInput in) throws IOException {
   super.readFrom(in);
   tookInMillis = in.readLong();
   ingestId = in.readLong();
   shardId = ShardId.readShardId(in);
   successCount = in.readVInt();
   quorumShards = in.readVInt();
   actionRequests = newLinkedList();
   int size = in.readVInt();
   for (int i = 0; i < size; i++) {
     boolean exists = in.readBoolean();
     if (exists) {
       boolean b = in.readBoolean();
       if (b) {
         IndexRequest indexRequest = new IndexRequest();
         indexRequest.readFrom(in);
         actionRequests.add(indexRequest);
       } else {
         DeleteRequest deleteRequest = new DeleteRequest();
         deleteRequest.readFrom(in);
         actionRequests.add(deleteRequest);
       }
     } else {
       actionRequests.add(null);
     }
   }
   failures = newLinkedList();
   size = in.readVInt();
   for (int i = 0; i < size; i++) {
     failures.add(IngestActionFailure.from(in));
   }
 }
 private void closeShard(
     String reason, ShardId sId, IndexShard indexShard, Store store, IndexEventListener listener) {
   final int shardId = sId.id();
   final Settings indexSettings = this.getIndexSettings().getSettings();
   try {
     try {
       listener.beforeIndexShardClosed(sId, indexShard, indexSettings);
     } finally {
       // this logic is tricky, we want to close the engine so we rollback the changes done to it
       // and close the shard so no operations are allowed to it
       if (indexShard != null) {
         try {
           // only flush we are we closed (closed index or shutdown) and if we are not deleted
           final boolean flushEngine = deleted.get() == false && closed.get();
           indexShard.close(reason, flushEngine);
         } catch (Exception e) {
           logger.debug("[{}] failed to close index shard", e, shardId);
           // ignore
         }
       }
       // call this before we close the store, so we can release resources for it
       listener.afterIndexShardClosed(sId, indexShard, indexSettings);
     }
   } finally {
     try {
       store.close();
     } catch (Exception e) {
       logger.warn(
           "[{}] failed to close store on shard removal (reason: [{}])", e, shardId, reason);
     }
   }
 }
 @Override
 public int hashCode() {
   int result = shardId.hashCode();
   result = 31 * result + shards.hashCode();
   result = 31 * result + (primaryAllocatedPostApi ? 1 : 0);
   return result;
 }
 @Override
 public String toString() {
   if (shardId != null) {
     return shardId.toString();
   } else {
     return index;
   }
 }
 @Override
 public void writeTo(StreamOutput out) throws IOException {
   shardId.writeTo(out);
   out.writeVInt(shards.length);
   for (ShardStats stats : shards) {
     stats.writeTo(out);
   }
 }
 @Override
 public void readFrom(StreamInput in) throws IOException {
   shardId = ShardId.readShardId(in);
   int shardSize = in.readVInt();
   shards = new ShardStats[shardSize];
   for (int i = 0; i < shardSize; i++) {
     shards[i] = ShardStats.readShardStats(in);
   }
 }
 @Override
 public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) {
   if (shardId != null) {
     final IndexShard shard = indexService.getShardOrNull(shardId.id());
     if (shard != null) {
       shard.fieldData().onRemoval(shardId, fieldName, wasEvicted, sizeInBytes);
     }
   }
 }
 @Override
 public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) {
   if (shardId != null) {
     final IndexShard shard = indexService.getShardOrNull(shardId.id());
     if (shard != null) {
       shard.fieldData().onCache(shardId, fieldName, ramUsage);
     }
   }
 }
 private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException {
   logger.trace("listing store meta data for {}", shardId);
   long startTimeNS = System.nanoTime();
   boolean exists = false;
   try {
     IndexService indexService = indicesService.indexService(shardId.index().name());
     if (indexService != null) {
       IndexShard indexShard = indexService.shard(shardId.id());
       if (indexShard != null) {
         final Store store = indexShard.store();
         store.incRef();
         try {
           exists = true;
           return new StoreFilesMetaData(true, shardId, store.getMetadataOrEmpty());
         } finally {
           store.decRef();
         }
       }
     }
     // try and see if we an list unallocated
     IndexMetaData metaData = clusterService.state().metaData().index(shardId.index().name());
     if (metaData == null) {
       return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
     }
     String storeType = metaData.getSettings().get(IndexStoreModule.STORE_TYPE, "fs");
     if (!storeType.contains("fs")) {
       return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
     }
     final ShardPath shardPath =
         ShardPath.loadShardPath(logger, nodeEnv, shardId, metaData.getSettings());
     if (shardPath == null) {
       return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
     }
     return new StoreFilesMetaData(
         false, shardId, Store.readMetadataSnapshot(shardPath.resolveIndex(), logger));
   } finally {
     TimeValue took = new TimeValue(System.nanoTime() - startTimeNS, TimeUnit.NANOSECONDS);
     if (exists) {
       logger.debug("{} loaded store meta data (took [{}])", shardId, took);
     } else {
       logger.trace("{} didn't find any store meta data to load (took [{}])", shardId, took);
     }
   }
 }
  public static void main(String[] args) throws Exception {
    ShardId shardId = new ShardId(new Index("index"), 1);
    Settings settings = EMPTY_SETTINGS;

    //        Store store = new RamStore(shardId, settings);
    Store store = new ByteBufferStore(shardId, settings, null, new ByteBufferCache(settings));
    //        Store store = new NioFsStore(shardId, settings);

    store.deleteContent();

    ThreadPool threadPool = new ScalingThreadPool();
    SnapshotDeletionPolicy deletionPolicy =
        new SnapshotDeletionPolicy(new KeepOnlyLastDeletionPolicy(shardId, settings));
    Engine engine =
        new RobinEngine(
            shardId,
            settings,
            store,
            deletionPolicy,
            new MemoryTranslog(shardId, settings),
            new LogByteSizeMergePolicyProvider(store),
            new ConcurrentMergeSchedulerProvider(shardId, settings),
            new AnalysisService(shardId.index()),
            new SimilarityService(shardId.index()));
    engine.start();

    SimpleEngineBenchmark benchmark =
        new SimpleEngineBenchmark(store, engine)
            .numberOfContentItems(1000)
            .searcherThreads(50)
            .searcherIterations(10000)
            .writerThreads(10)
            .writerIterations(10000)
            .refreshSchedule(new TimeValue(1, TimeUnit.SECONDS))
            .flushSchedule(new TimeValue(1, TimeUnit.MINUTES))
            .create(false)
            .build();

    benchmark.run();

    engine.close();
    store.close();
    threadPool.shutdown();
  }
 /**
  * Checks whether we can perform a write based on the required active shard count setting. Returns
  * **null* if OK to proceed, or a string describing the reason to stop
  */
 protected String checkActiveShardCount() {
   final ShardId shardId = primary.routingEntry().shardId();
   final String indexName = shardId.getIndexName();
   final ClusterState state = clusterStateSupplier.get();
   assert state != null : "replication operation must have access to the cluster state";
   final ActiveShardCount waitForActiveShards = request.waitForActiveShards();
   if (waitForActiveShards == ActiveShardCount.NONE) {
     return null; // not waiting for any shards
   }
   IndexRoutingTable indexRoutingTable = state.getRoutingTable().index(indexName);
   if (indexRoutingTable == null) {
     logger.trace("[{}] index not found in the routing table", shardId);
     return "Index " + indexName + " not found in the routing table";
   }
   IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId.getId());
   if (shardRoutingTable == null) {
     logger.trace("[{}] shard not found in the routing table", shardId);
     return "Shard " + shardId + " not found in the routing table";
   }
   if (waitForActiveShards.enoughShardsActive(shardRoutingTable)) {
     return null;
   } else {
     final String resolvedShards =
         waitForActiveShards == ActiveShardCount.ALL
             ? Integer.toString(shardRoutingTable.shards().size())
             : waitForActiveShards.toString();
     logger.trace(
         "[{}] not enough active copies to meet shard count of [{}] (have {}, needed {}), scheduling a retry. op [{}], "
             + "request [{}]",
         shardId,
         waitForActiveShards,
         shardRoutingTable.activeShards().size(),
         resolvedShards,
         opType,
         request);
     return "Not enough active copies to meet shard count of ["
         + waitForActiveShards
         + "] (have "
         + shardRoutingTable.activeShards().size()
         + ", needed "
         + resolvedShards
         + ").";
   }
 }
 @Override
 public void onRemoval(ShardId shardId, Accountable accountable) {
   if (shardId != null) {
     final IndexShard shard = indexService.getShardOrNull(shardId.id());
     if (shard != null) {
       long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0L;
       shard.shardBitsetFilterCache().onRemoval(ramBytesUsed);
     }
   }
 }
 @Before
 public void setUp() throws Exception {
   super.setUp();
   defaultSettings =
       ImmutableSettings.builder()
           .put(RobinEngine.INDEX_COMPOUND_ON_FLUSH, getRandom().nextBoolean())
           .build(); // TODO randomize more settings
   threadPool = new ThreadPool();
   store = createStore();
   store.deleteContent();
   storeReplica = createStoreReplica();
   storeReplica.deleteContent();
   engineSettingsService = new IndexSettingsService(shardId.index(), EMPTY_SETTINGS);
   engine = createEngine(engineSettingsService, store, createTranslog());
   engine.start();
   replicaSettingsService = new IndexSettingsService(shardId.index(), EMPTY_SETTINGS);
   replicaEngine = createEngine(replicaSettingsService, storeReplica, createTranslogReplica());
   replicaEngine.start();
 }
 @Override
 public void readFrom(StreamInput in) throws IOException {
   super.readFrom(in);
   if (in.readBoolean()) {
     shardId = ShardId.readShardId(in);
   } else {
     shardId = null;
   }
   consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
   timeout = TimeValue.readTimeValue(in);
   index = in.readString();
 }
 @Override
 public void readFrom(StreamInput in) throws IOException {
   super.readFrom(in);
   request = newRequest();
   request.readFrom(in);
   if (in.getVersion().onOrAfter(Version.V_1_4_0)) {
     shardId = ShardId.readShardId(in);
   } else {
     // older nodes will send the concrete index as part of the request
     shardId = new ShardId(request.index(), in.readVInt());
   }
 }
 protected Engine createEngine(
     IndexSettingsService indexSettingsService, Store store, Translog translog) {
   return new RobinEngine(
       shardId,
       defaultSettings,
       threadPool,
       indexSettingsService,
       new ShardIndexingService(
           shardId,
           EMPTY_SETTINGS,
           new ShardSlowLogIndexingService(shardId, EMPTY_SETTINGS, indexSettingsService)),
       null,
       store,
       createSnapshotDeletionPolicy(),
       translog,
       createMergePolicy(),
       createMergeScheduler(),
       new AnalysisService(shardId.index()),
       new SimilarityService(shardId.index()),
       new CodecService(shardId.index()));
 }
  @Override
  protected GetFieldMappingsResponse shardOperation(
      final GetFieldMappingsIndexRequest request, ShardId shardId) throws ElasticsearchException {
    assert shardId != null;
    IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
    Collection<String> typeIntersection;
    if (request.types().length == 0) {
      typeIntersection = indexService.mapperService().types();

    } else {
      typeIntersection =
          Collections2.filter(
              indexService.mapperService().types(),
              new Predicate<String>() {

                @Override
                public boolean apply(String type) {
                  return Regex.simpleMatch(request.types(), type);
                }
              });
      if (typeIntersection.isEmpty()) {
        throw new TypeMissingException(shardId.index(), request.types());
      }
    }

    MapBuilder<String, ImmutableMap<String, FieldMappingMetaData>> typeMappings =
        new MapBuilder<>();
    for (String type : typeIntersection) {
      DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
      ImmutableMap<String, FieldMappingMetaData> fieldMapping =
          findFieldMappingsByType(documentMapper, request);
      if (!fieldMapping.isEmpty()) {
        typeMappings.put(type, fieldMapping);
      }
    }

    return new GetFieldMappingsResponse(
        ImmutableMap.of(shardId.getIndex(), typeMappings.immutableMap()));
  }
  @Override
  public boolean equals(Object o) {
    if (this == o) return true;
    if (o == null || getClass() != o.getClass()) return false;

    IndexShardRoutingTable that = (IndexShardRoutingTable) o;

    if (primaryAllocatedPostApi != that.primaryAllocatedPostApi) return false;
    if (!shardId.equals(that.shardId)) return false;
    if (!shards.equals(that.shards)) return false;

    return true;
  }