コード例 #1
0
 private Directory getStoreDirectory(String index, int shardId) {
   Set<String> nodes = cluster().nodesInclude("test");
   assertThat(nodes.isEmpty(), equalTo(false));
   IndicesService indicesService =
       cluster().getInstance(IndicesService.class, nodes.iterator().next());
   InternalIndexShard indexShard =
       (InternalIndexShard) (indicesService.indexService(index).shard(shardId));
   return indexShard.store().directory();
 }
コード例 #2
0
  @Override
  protected ShardTermlistResponse shardOperation(ShardTermlistRequest request)
      throws ElasticSearchException {
    synchronized (termlistMutex) {
      InternalIndexShard indexShard =
          (InternalIndexShard)
              indicesService.indexServiceSafe(request.index()).shardSafe(request.shardId());
      indexShard.store().directory();
      Engine.Searcher searcher = indexShard.searcher();
      try {
        Set<String> set = new CompactHashSet();

        Fields fields = MultiFields.getFields(searcher.reader());
        if (fields != null) {
          for (Iterator<String> it = fields.iterator(); it.hasNext(); ) {
            String field = it.next();
            if (field.charAt(0) == '_') {
              continue;
            }
            if (request.getField() == null || field.equals(request.getField())) {
              Terms terms = fields.terms(field);
              if (terms != null) {
                TermsEnum termsEnum = terms.iterator(null);
                BytesRef text;
                while ((text = termsEnum.next()) != null) {
                  set.add(text.utf8ToString());
                  System.out.println("field=" + field + "; text=" + text.utf8ToString());
                }
              }
            }
          }
        }
        return new ShardTermlistResponse(request.index(), request.shardId(), set);
      } catch (IOException ex) {
        throw new ElasticSearchException(ex.getMessage(), ex);
      }
    }
  }
コード例 #3
0
  @Override
  protected ShardStatus shardOperation(IndexShardStatusRequest request)
      throws ElasticSearchException {
    InternalIndexService indexService =
        (InternalIndexService) indicesService.indexServiceSafe(request.index());
    InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(request.shardId());
    ShardStatus shardStatus = new ShardStatus(indexShard.routingEntry());
    shardStatus.state = indexShard.state();
    try {
      shardStatus.storeSize = indexShard.store().estimateSize();
    } catch (IOException e) {
      // failure to get the store size...
    }
    if (indexShard.state() == IndexShardState.STARTED) {
      //            shardStatus.estimatedFlushableMemorySize =
      // indexShard.estimateFlushableMemorySize();
      shardStatus.translogId = indexShard.translog().currentId();
      shardStatus.translogOperations = indexShard.translog().estimatedNumberOfOperations();
      Engine.Searcher searcher = indexShard.searcher();
      try {
        shardStatus.docs = new DocsStatus();
        shardStatus.docs.numDocs = searcher.reader().numDocs();
        shardStatus.docs.maxDoc = searcher.reader().maxDoc();
        shardStatus.docs.deletedDocs = searcher.reader().numDeletedDocs();
      } finally {
        searcher.release();
      }

      shardStatus.mergeStats = indexShard.mergeScheduler().stats();
      shardStatus.refreshStats = indexShard.refreshStats();
      shardStatus.flushStats = indexShard.flushStats();
    }

    if (request.recovery) {
      // check on going recovery (from peer or gateway)
      RecoveryStatus peerRecoveryStatus = indexShard.peerRecoveryStatus();
      if (peerRecoveryStatus == null) {
        peerRecoveryStatus = peerRecoveryTarget.peerRecoveryStatus(indexShard.shardId());
      }
      if (peerRecoveryStatus != null) {
        PeerRecoveryStatus.Stage stage;
        switch (peerRecoveryStatus.stage()) {
          case INIT:
            stage = PeerRecoveryStatus.Stage.INIT;
            break;
          case INDEX:
            stage = PeerRecoveryStatus.Stage.INDEX;
            break;
          case TRANSLOG:
            stage = PeerRecoveryStatus.Stage.TRANSLOG;
            break;
          case FINALIZE:
            stage = PeerRecoveryStatus.Stage.FINALIZE;
            break;
          case DONE:
            stage = PeerRecoveryStatus.Stage.DONE;
            break;
          default:
            stage = PeerRecoveryStatus.Stage.INIT;
        }
        shardStatus.peerRecoveryStatus =
            new PeerRecoveryStatus(
                stage,
                peerRecoveryStatus.startTime(),
                peerRecoveryStatus.time(),
                peerRecoveryStatus.phase1TotalSize(),
                peerRecoveryStatus.phase1ExistingTotalSize(),
                peerRecoveryStatus.currentFilesSize(),
                peerRecoveryStatus.currentTranslogOperations());
      }

      IndexShardGatewayService gatewayService =
          indexService.shardInjector(request.shardId()).getInstance(IndexShardGatewayService.class);
      org.elasticsearch.index.gateway.RecoveryStatus gatewayRecoveryStatus =
          gatewayService.recoveryStatus();
      if (gatewayRecoveryStatus != null) {
        GatewayRecoveryStatus.Stage stage;
        switch (gatewayRecoveryStatus.stage()) {
          case INIT:
            stage = GatewayRecoveryStatus.Stage.INIT;
            break;
          case INDEX:
            stage = GatewayRecoveryStatus.Stage.INDEX;
            break;
          case TRANSLOG:
            stage = GatewayRecoveryStatus.Stage.TRANSLOG;
            break;
          case DONE:
            stage = GatewayRecoveryStatus.Stage.DONE;
            break;
          default:
            stage = GatewayRecoveryStatus.Stage.INIT;
        }
        shardStatus.gatewayRecoveryStatus =
            new GatewayRecoveryStatus(
                stage,
                gatewayRecoveryStatus.startTime(),
                gatewayRecoveryStatus.time(),
                gatewayRecoveryStatus.index().totalSize(),
                gatewayRecoveryStatus.index().reusedTotalSize(),
                gatewayRecoveryStatus.index().currentFilesSize(),
                gatewayRecoveryStatus.translog().currentTranslogOperations());
      }
    }

    if (request.snapshot) {
      IndexShardGatewayService gatewayService =
          indexService.shardInjector(request.shardId()).getInstance(IndexShardGatewayService.class);
      SnapshotStatus snapshotStatus = gatewayService.snapshotStatus();
      if (snapshotStatus != null) {
        GatewaySnapshotStatus.Stage stage;
        switch (snapshotStatus.stage()) {
          case DONE:
            stage = GatewaySnapshotStatus.Stage.DONE;
            break;
          case FAILURE:
            stage = GatewaySnapshotStatus.Stage.FAILURE;
            break;
          case TRANSLOG:
            stage = GatewaySnapshotStatus.Stage.TRANSLOG;
            break;
          case FINALIZE:
            stage = GatewaySnapshotStatus.Stage.FINALIZE;
            break;
          case INDEX:
            stage = GatewaySnapshotStatus.Stage.INDEX;
            break;
          default:
            stage = GatewaySnapshotStatus.Stage.NONE;
            break;
        }
        shardStatus.gatewaySnapshotStatus =
            new GatewaySnapshotStatus(
                stage,
                snapshotStatus.startTime(),
                snapshotStatus.time(),
                snapshotStatus.index().totalSize(),
                snapshotStatus.translog().expectedNumberOfOperations());
      }
    }

    return shardStatus;
  }
コード例 #4
0
  @Override
  public void recover(RecoveryStatus recoveryStatus) throws IndexShardGatewayRecoveryException {
    this.recoveryStatus = recoveryStatus;

    final ImmutableMap<String, BlobMetaData> blobs;
    try {
      blobs = blobContainer.listBlobs();
    } catch (IOException e) {
      throw new IndexShardGatewayRecoveryException(shardId, "Failed to list content of gateway", e);
    }

    List<CommitPoint> commitPointsList = Lists.newArrayList();
    boolean atLeastOneCommitPointExists = false;
    for (String name : blobs.keySet()) {
      if (name.startsWith("commit-")) {
        atLeastOneCommitPointExists = true;
        try {
          commitPointsList.add(CommitPoints.fromXContent(blobContainer.readBlobFully(name)));
        } catch (Exception e) {
          logger.warn("failed to read commit point [{}]", e, name);
        }
      }
    }
    if (atLeastOneCommitPointExists && commitPointsList.isEmpty()) {
      // no commit point managed to load, bail so we won't corrupt the index, will require manual
      // intervention
      throw new IndexShardGatewayRecoveryException(
          shardId, "Commit points exists but none could be loaded", null);
    }
    CommitPoints commitPoints = new CommitPoints(commitPointsList);

    if (commitPoints.commits().isEmpty()) {
      // no commit points, clean the store just so we won't recover wrong files
      try {
        indexShard.store().deleteContent();
      } catch (IOException e) {
        logger.warn("failed to clean store before starting shard", e);
      }
      recoveryStatus.index().startTime(System.currentTimeMillis());
      recoveryStatus.index().time(System.currentTimeMillis() - recoveryStatus.index().startTime());
      recoveryStatus.translog().startTime(System.currentTimeMillis());
      recoveryStatus
          .translog()
          .time(System.currentTimeMillis() - recoveryStatus.index().startTime());
      return;
    }

    for (CommitPoint commitPoint : commitPoints) {
      if (!commitPointExistsInBlobs(commitPoint, blobs)) {
        logger.warn(
            "listed commit_point [{}]/[{}], but not all files exists, ignoring",
            commitPoint.name(),
            commitPoint.version());
        continue;
      }
      try {
        recoveryStatus.index().startTime(System.currentTimeMillis());
        recoveryStatus.updateStage(RecoveryStatus.Stage.INDEX);
        recoverIndex(commitPoint, blobs);
        recoveryStatus
            .index()
            .time(System.currentTimeMillis() - recoveryStatus.index().startTime());

        recoveryStatus.translog().startTime(System.currentTimeMillis());
        recoveryStatus.updateStage(RecoveryStatus.Stage.TRANSLOG);
        recoverTranslog(commitPoint, blobs);
        recoveryStatus
            .translog()
            .time(System.currentTimeMillis() - recoveryStatus.index().startTime());
        return;
      } catch (Exception e) {
        throw new IndexShardGatewayRecoveryException(
            shardId,
            "failed to recover commit_point ["
                + commitPoint.name()
                + "]/["
                + commitPoint.version()
                + "]",
            e);
      }
    }
    throw new IndexShardGatewayRecoveryException(
        shardId, "No commit point data is available in gateway", null);
  }