Esempio n. 1
1
  public String getHardwareAddress() {
    TransportAddress transportAddress = httpServerTransport.boundAddress().publishAddress();
    if (!(transportAddress instanceof InetSocketTransportAddress)) {
      return null;
    }

    String hardwareAddress = null;
    InetAddress inetAddress =
        ((InetSocketTransportAddress) transportAddress).address().getAddress();
    try {
      NetworkInterface networkInterface = NetworkInterface.getByInetAddress(inetAddress);
      if (networkInterface != null) {
        if (networkInterface.getName().equals("lo")) {
          hardwareAddress = "loopback device";
        } else {
          byte[] hardwareAddressBytes = networkInterface.getHardwareAddress();
          StringBuilder sb = new StringBuilder(18);
          for (byte b : hardwareAddressBytes) {
            if (sb.length() > 0) sb.append(':');
            sb.append(String.format("%02x", b));
          }
          hardwareAddress = sb.toString();
        }
      }

    } catch (SocketException e) {
      if (logger.isTraceEnabled()) {
        logger.trace("Error getting network interface", e);
      }
    }
    return hardwareAddress;
  }
  @Override
  public void sendResponse(final RestResponse response) {

    final User user = this.request.getFromContext("searchguard_authenticated_user");
    final Session _session =
        sessionStore.getSession(SecurityUtil.getSearchGuardSessionIdFromCookie(request));

    if (user != null) {
      if (_session == null) {
        final Session session = sessionStore.createSession(user);
        log.trace("Create session and set cookie for {}", user.getName());
        final CookieEncoder encoder = new CookieEncoder(true);
        final Cookie cookie = new DefaultCookie("es_searchguard_session", session.getId());

        // TODO FUTURE check cookie domain/path
        // cookie.setDomain(arg0);
        // cookie.setPath(arg0);

        cookie.setDiscard(true);
        cookie.setSecure(((NettyHttpRequest) request).request() instanceof DefaultHttpsRequest);
        cookie.setMaxAge(60 * 60); // 1h
        cookie.setHttpOnly(true);
        encoder.addCookie(cookie);
        response.addHeader("Set-Cookie", encoder.encode());
      } else {

        // Set-Cookie: token=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT
        log.trace("There is already a session");
        // TODO FUTURE check cookie seesion validity, expire, ...

      }
    }

    channel.sendResponse(response);
  }
  /**
   * Perform phase 3 of the recovery process
   *
   * <p>Phase3 again takes a snapshot of the translog, however this time the snapshot is acquired
   * under a write lock. The translog operations are sent to the target node where they are
   * replayed.
   *
   * <p>{@code InternalEngine#recover} is responsible for taking the snapshot of the translog, and
   * after phase 3 completes the snapshots from all three phases are released.
   */
  @Override
  public void phase3(Translog.Snapshot snapshot) throws ElasticsearchException {
    if (shard.state() == IndexShardState.CLOSED) {
      throw new IndexShardClosedException(request.shardId());
    }
    cancellableThreads.checkForCancel();
    StopWatch stopWatch = new StopWatch().start();
    final int totalOperations;
    logger.trace(
        "[{}][{}] recovery [phase3] to {}: sending transaction log operations",
        indexName,
        shardId,
        request.targetNode());

    // Send the translog operations to the target node
    totalOperations = sendSnapshot(snapshot);

    cancellableThreads.execute(
        new Interruptable() {
          @Override
          public void run() throws InterruptedException {
            // Send the FINALIZE request to the target node. The finalize request
            // clears unreferenced translog files, refreshes the engine now that
            // new segments are available, and enables garbage collection of
            // tombstone files. The shard is also moved to the POST_RECOVERY phase
            // during this time
            transportService
                .submitRequest(
                    request.targetNode(),
                    RecoveryTarget.Actions.FINALIZE,
                    new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()),
                    TransportRequestOptions.options()
                        .withTimeout(recoverySettings.internalActionLongTimeout()),
                    EmptyTransportResponseHandler.INSTANCE_SAME)
                .txGet();
          }
        });

    if (request.markAsRelocated()) {
      // TODO what happens if the recovery process fails afterwards, we need to mark this back to
      // started
      try {
        shard.relocated("to " + request.targetNode());
      } catch (IllegalIndexShardStateException e) {
        // we can ignore this exception since, on the other node, when it moved to phase3
        // it will also send shard started, which might cause the index shard we work against
        // to move be closed by the time we get to the the relocated method
      }
    }
    stopWatch.stop();
    logger.trace(
        "[{}][{}] recovery [phase3] to {}: took [{}]",
        indexName,
        shardId,
        request.targetNode(),
        stopWatch.totalTime());
    response.phase3Time = stopWatch.totalTime().millis();
    response.phase3Operations = totalOperations;
  }
Esempio n. 4
0
  public static DigestBlob resumeTransfer(
      BlobContainer blobContainer, String digest, UUID transferId, long currentPos) {
    DigestBlob digestBlob = new DigestBlob(blobContainer, digest, transferId);
    digestBlob.file = getTmpFilePath(blobContainer, digest, transferId);

    try {
      logger.trace("Resuming DigestBlob {}. CurrentPos {}", digest, currentPos);

      digestBlob.headFileChannel = new FileOutputStream(digestBlob.file, false).getChannel();
      digestBlob.headLength = currentPos;
      digestBlob.headSize = new AtomicLong();
      digestBlob.headCatchedUpLatch = new CountDownLatch(1);

      RandomAccessFile raf = new RandomAccessFile(digestBlob.file, "rw");
      raf.setLength(currentPos);
      raf.close();

      FileOutputStream outputStream = new FileOutputStream(digestBlob.file, true);
      digestBlob.fileChannel = outputStream.getChannel();
    } catch (IOException ex) {
      logger.error("error resuming transfer of {}, id: {}", ex, digest, transferId);
      return null;
    }

    return digestBlob;
  }
  public void execute() throws Exception {
    final String activeShardCountFailure = checkActiveShardCount();
    final ShardRouting primaryRouting = primary.routingEntry();
    final ShardId primaryId = primaryRouting.shardId();
    if (activeShardCountFailure != null) {
      finishAsFailed(
          new UnavailableShardsException(
              primaryId,
              "{} Timeout: [{}], request: [{}]",
              activeShardCountFailure,
              request.timeout(),
              request));
      return;
    }

    totalShards.incrementAndGet();
    pendingShards.incrementAndGet();
    primaryResult = primary.perform(request);
    final ReplicaRequest replicaRequest = primaryResult.replicaRequest();
    assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term";
    if (logger.isTraceEnabled()) {
      logger.trace(
          "[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request);
    }

    performOnReplicas(primaryId, replicaRequest);

    successfulShards.incrementAndGet();
    decPendingAndFinishIfNeeded();
  }
  private void addQueryToStream(
      final Operation operation,
      final Timestamp<?> currentTimestamp,
      final DBObject update,
      final String collection)
      throws InterruptedException {
    if (logger.isTraceEnabled()) {
      logger.trace(
          "addQueryToStream - operation [{}], currentTimestamp [{}], update [{}]",
          operation,
          currentTimestamp,
          update);
    }

    if (collection == null) {
      for (String name : slurpedDb.getCollectionNames()) {
        DBCollection slurpedCollection = slurpedDb.getCollection(name);
        for (DBObject item : slurpedCollection.find(update, findKeys)) {
          addToStream(operation, currentTimestamp, item, collection);
        }
      }
    } else {
      DBCollection slurpedCollection = slurpedDb.getCollection(collection);
      for (DBObject item : slurpedCollection.find(update, findKeys)) {
        addToStream(operation, currentTimestamp, item, collection);
      }
    }
  }
Esempio n. 7
0
  public static NodeOperation withDownstream(
      ExecutionPhase executionPhase,
      ExecutionPhase downstreamExecutionPhase,
      byte inputId,
      String localNodeId) {
    if (downstreamExecutionPhase.executionNodes().isEmpty()) {
      if (executionPhase instanceof UpstreamPhase
          && executionPhase.executionNodes().size() == 1
          && executionPhase.executionNodes().contains(localNodeId)) {
        ((UpstreamPhase) executionPhase).distributionType(DistributionType.SAME_NODE);
        LOGGER.trace(
            "Phase uses SAME_NODE downstream, reason: ON HANDLER, executionNodes: {}, phase: {}",
            executionPhase.executionNodes(),
            executionPhase);
        return new NodeOperation(
            executionPhase,
            ImmutableList.<String>of(),
            downstreamExecutionPhase.executionPhaseId(),
            inputId);
      }
      return new NodeOperation(
          executionPhase,
          ImmutableList.of(ExecutionPhase.DIRECT_RETURN_DOWNSTREAM_NODE),
          downstreamExecutionPhase.executionPhaseId(),
          inputId);
    } else {
      if (executionPhase instanceof UpstreamPhase) {
        if (executionPhase.executionNodes().size() == 1
            && executionPhase.executionNodes().equals(downstreamExecutionPhase.executionNodes())) {
          ((UpstreamPhase) executionPhase).distributionType(DistributionType.SAME_NODE);
          LOGGER.trace(
              "Phase uses SAME_NODE downstream, reason: ON DOWNSTRREAM NODE, executionNodes: {}, phase: {}",
              executionPhase.executionNodes(),
              executionPhase);
        }
      }

      return new NodeOperation(
          executionPhase,
          downstreamExecutionPhase.executionNodes(),
          downstreamExecutionPhase.executionPhaseId(),
          inputId);
    }
  }
 /**
  * Checks whether we can perform a write based on the required active shard count setting. Returns
  * **null* if OK to proceed, or a string describing the reason to stop
  */
 protected String checkActiveShardCount() {
   final ShardId shardId = primary.routingEntry().shardId();
   final String indexName = shardId.getIndexName();
   final ClusterState state = clusterStateSupplier.get();
   assert state != null : "replication operation must have access to the cluster state";
   final ActiveShardCount waitForActiveShards = request.waitForActiveShards();
   if (waitForActiveShards == ActiveShardCount.NONE) {
     return null; // not waiting for any shards
   }
   IndexRoutingTable indexRoutingTable = state.getRoutingTable().index(indexName);
   if (indexRoutingTable == null) {
     logger.trace("[{}] index not found in the routing table", shardId);
     return "Index " + indexName + " not found in the routing table";
   }
   IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId.getId());
   if (shardRoutingTable == null) {
     logger.trace("[{}] shard not found in the routing table", shardId);
     return "Shard " + shardId + " not found in the routing table";
   }
   if (waitForActiveShards.enoughShardsActive(shardRoutingTable)) {
     return null;
   } else {
     final String resolvedShards =
         waitForActiveShards == ActiveShardCount.ALL
             ? Integer.toString(shardRoutingTable.shards().size())
             : waitForActiveShards.toString();
     logger.trace(
         "[{}] not enough active copies to meet shard count of [{}] (have {}, needed {}), scheduling a retry. op [{}], "
             + "request [{}]",
         shardId,
         waitForActiveShards,
         shardRoutingTable.activeShards().size(),
         resolvedShards,
         opType,
         request);
     return "Not enough active copies to meet shard count of ["
         + waitForActiveShards
         + "] (have "
         + shardRoutingTable.activeShards().size()
         + ", needed "
         + resolvedShards
         + ").";
   }
 }
Esempio n. 9
0
 public @Nullable String getClusterId() {
   // wait until clusterId is available (master has been elected)
   try {
     return clusterIdService.clusterId().get().value().toString();
   } catch (InterruptedException | ExecutionException e) {
     if (logger.isTraceEnabled()) {
       logger.trace("Error getting cluster id", e);
     }
     return null;
   }
 }
  private void performOnReplica(final ShardRouting shard, final ReplicaRequest replicaRequest) {
    if (logger.isTraceEnabled()) {
      logger.trace(
          "[{}] sending op [{}] to replica {} for request [{}]",
          shard.shardId(),
          opType,
          shard,
          replicaRequest);
    }

    totalShards.incrementAndGet();
    pendingShards.incrementAndGet();
    replicasProxy.performOn(
        shard,
        replicaRequest,
        new ActionListener<TransportResponse.Empty>() {
          @Override
          public void onResponse(TransportResponse.Empty empty) {
            successfulShards.incrementAndGet();
            decPendingAndFinishIfNeeded();
          }

          @Override
          public void onFailure(Exception replicaException) {
            logger.trace(
                "[{}] failure while performing [{}] on replica {}, request [{}]",
                replicaException,
                shard.shardId(),
                opType,
                shard,
                replicaRequest);
            if (ignoreReplicaException(replicaException)) {
              decPendingAndFinishIfNeeded();
            } else {
              RestStatus restStatus = ExceptionsHelper.status(replicaException);
              shardReplicaFailures.add(
                  new ReplicationResponse.ShardInfo.Failure(
                      shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false));
              String message =
                  String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard);
              logger.warn("[{}] {}", replicaException, shard.shardId(), message);
              replicasProxy.failShard(
                  shard,
                  primary.routingEntry(),
                  message,
                  replicaException,
                  ReplicationOperation.this::decPendingAndFinishIfNeeded,
                  ReplicationOperation.this::onPrimaryDemoted,
                  throwable -> decPendingAndFinishIfNeeded());
            }
          }
        });
  }
  public boolean isAllow(RestRequest request) {
    RestRequest.Method method = request.method();
    if (log.isTraceEnabled()) {
      log.trace(String.format("Checking rules for %s request [%s]...", method, request.path()));
    }

    Set<String> allowRules = allowRulesByMethod.get(request.method());
    String path = request.path();
    if (allowRules != null) {
      for (String allowRule : allowRules) {
        if (path.startsWith(allowRule)) {
          if (log.isTraceEnabled()) {
            log.trace(
                String.format(
                    "Find matching rule [%s] for %s request [%s]: allow", allowRule, method, path));
          }
          return true;
        }
      }
    }

    log.trace(String.format("No matching rules for %s request [%s]: reject", method, path));
    return false;
  }
 private void processAdminCommandOplogEntry(
     final DBObject entry, final Timestamp<?> startTimestamp) throws InterruptedException {
   if (logger.isTraceEnabled()) {
     logger.trace("processAdminCommandOplogEntry - [{}]", entry);
   }
   DBObject object = (DBObject) entry.get(MongoDBRiver.OPLOG_OBJECT);
   if (definition.isImportAllCollections()) {
     if (object.containsField(MongoDBRiver.OPLOG_RENAME_COLLECTION_COMMAND_OPERATION)
         && object.containsField(MongoDBRiver.OPLOG_TO)) {
       String to = object.get(MongoDBRiver.OPLOG_TO).toString();
       if (to.startsWith(definition.getMongoDb())) {
         String newCollection = getCollectionFromNamespace(to);
         DBCollection coll = slurpedDb.getCollection(newCollection);
         doInitialImport(coll);
       }
     }
   }
 }
 /** Moves the index folder found in <code>source</code> to <code>target</code> */
 void upgrade(final Index index, final Path source, final Path target) throws IOException {
   boolean success = false;
   try {
     Files.move(source, target, StandardCopyOption.ATOMIC_MOVE);
     success = true;
   } catch (NoSuchFileException | FileNotFoundException exception) {
     // thrown when the source is non-existent because the folder was renamed
     // by another node (shared FS) after we checked if the target exists
     logger.error(
         "multiple nodes trying to upgrade [{}] in parallel, retry upgrading with single node",
         exception,
         target);
     throw exception;
   } finally {
     if (success) {
       logger.info("{} moved from [{}] to [{}]", index, source, target);
       logger.trace("{} syncing directory [{}]", index, target);
       IOUtils.fsync(target, true);
     }
   }
 }
Esempio n. 14
0
 public MockEngineSupport(EngineConfig config, Class<? extends FilterDirectoryReader> wrapper) {
   Settings indexSettings = config.getIndexSettings();
   shardId = config.getShardId();
   filterCache = config.getQueryCache();
   filterCachingPolicy = config.getQueryCachingPolicy();
   final long seed = indexSettings.getAsLong(ESIntegTestCase.SETTING_INDEX_SEED, 0l);
   Random random = new Random(seed);
   final double ratio =
       indexSettings.getAsDouble(
           WRAP_READER_RATIO, 0.0d); // DISABLED by default - AssertingDR is crazy slow
   boolean wrapReader = random.nextDouble() < ratio;
   if (logger.isTraceEnabled()) {
     logger.trace(
         "Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]",
         this.getClass().getName(),
         shardId,
         seed,
         wrapReader);
   }
   mockContext = new MockContext(random, wrapReader, wrapper, indexSettings);
   this.searcherCloseable = new SearcherCloseable();
   LuceneTestCase.closeAfterSuite(searcherCloseable); // only one suite closeable per Engine
 }
  private void addToStream(
      final Operation operation,
      final Timestamp<?> currentTimestamp,
      final DBObject data,
      final String collection)
      throws InterruptedException {
    if (logger.isTraceEnabled()) {
      logger.trace(
          "addToStream - operation [{}], currentTimestamp [{}], data [{}], collection [{}]",
          operation,
          currentTimestamp,
          data,
          collection);
    }

    if (operation == Operation.DROP_DATABASE) {
      if (definition.isImportAllCollections()) {
        for (String name : slurpedDb.getCollectionNames()) {
          context
              .getStream()
              .put(
                  new MongoDBRiver.QueueEntry(
                      currentTimestamp, Operation.DROP_COLLECTION, data, name));
        }
      } else {
        context
            .getStream()
            .put(
                new MongoDBRiver.QueueEntry(
                    currentTimestamp, Operation.DROP_COLLECTION, data, collection));
      }
    } else {
      context
          .getStream()
          .put(new MongoDBRiver.QueueEntry(currentTimestamp, operation, data, collection));
    }
  }
Esempio n. 16
0
 public Context convert(
     WhereClause whereClause,
     MapperService mapperService,
     IndexFieldDataService indexFieldDataService,
     IndexCache indexCache)
     throws UnsupportedFeatureException {
   Context ctx = new Context(inputSymbolVisitor, mapperService, indexFieldDataService, indexCache);
   if (whereClause.noMatch()) {
     ctx.query = Queries.newMatchNoDocsQuery();
   } else if (!whereClause.hasQuery()) {
     ctx.query = Queries.newMatchAllQuery();
   } else {
     ctx.query = VISITOR.process(whereClause.query(), ctx);
   }
   if (LOGGER.isTraceEnabled()) {
     if (whereClause.hasQuery()) {
       LOGGER.trace(
           "WHERE CLAUSE [{}] -> LUCENE QUERY [{}] ",
           SymbolFormatter.format(whereClause.query()),
           ctx.query);
     }
   }
   return ctx;
 }
Esempio n. 17
0
  @Override
  public void fillRoles(final User user, final AuthCredentials optionalAuthCreds)
      throws AuthException {

    final String authenticatedUser = user.getName();

    if (optionalAuthCreds != null) {
      optionalAuthCreds.clear();
    }

    Entry entry = null;
    String dn = null;
    EntryCursor result = null;
    EntryCursor rolesResult = null;
    LdapConnection ldapConnection = null;

    try {

      ldapConnection = getConnection(settings);

      final String bindDn = settings.get(ConfigConstants.ARMOR_AUTHENTICATION_LDAP_BIND_DN, null);

      if (bindDn != null) {
        ldapConnection.bind(
            bindDn, settings.get(ConfigConstants.ARMOR_AUTHENTICATION_LDAP_PASSWORD, null));
      } else {
        ldapConnection.anonymousBind();
      }

      if (Dn.isValid(authenticatedUser)) {
        // assume dn
        log.trace("{} is a valid DN", authenticatedUser);
        entry = ldapConnection.lookup(authenticatedUser);

        if (entry == null) {
          throw new AuthException("No user '" + authenticatedUser + "' found");
        }

      } else {

        // TODO FUTURE all ldap searches: follow referrals
        result =
            ldapConnection.search(
                settings.get(ConfigConstants.ARMOR_AUTHENTICATION_LDAP_USERBASE, ""),
                settings
                    .get(
                        ConfigConstants.ARMOR_AUTHENTICATION_LDAP_USERSEARCH,
                        "(sAMAccountName={0})")
                    .replace("{0}", authenticatedUser),
                SearchScope.SUBTREE);

        if (!result.next()) {
          throw new AuthException("No user '" + authenticatedUser + "' found");
        }

        entry = result.get();

        if (result.next()) {
          throw new AuthException("More than user found");
        }
      }

      dn = entry.getDn().toString();

      log.trace("User found with DN {}", dn);

      final Set<String> userRolesDn = new HashSet<String>();

      // Roles as an attribute of the user entry
      // Role names may also be held as the values of an attribute in the user's directory entry.
      // Use userRoleName to specify the name of this attribute.
      final String userRoleName =
          settings.get(
              ConfigConstants.ARMOR_AUTHENTICATION_AUTHORIZATION_LDAP_USERROLENAME, "memberOf");
      if (entry.get(userRoleName) != null) {
        final Value[] userRoles =
            Iterators.toArray(entry.get(userRoleName).iterator(), Value.class);

        for (int i = 0; i < userRoles.length; i++) {
          final Value value = userRoles[i];
          final String possibleRoleDN = value.getString();
          if (Dn.isValid(possibleRoleDN)) {
            userRolesDn.add(possibleRoleDN);
          }
        }

        log.trace("User roles count: {}", userRolesDn.size());
      }

      final Map<Tuple<String, Dn>, Entry> roles = new HashMap<Tuple<String, Dn>, Entry>();
      final String roleName =
          settings.get(ConfigConstants.ARMOR_AUTHENTICATION_AUTHORIZATION_LDAP_ROLENAME, "name");

      // replace {2}
      final String userRoleAttribute =
          settings.get(
              ConfigConstants.ARMOR_AUTHENTICATION_AUTHORIZATION_LDAP_USERROLEATTRIBUTE, null);
      String userRoleAttributeValue = null;

      if (userRoleAttribute != null) {
        userRoleAttributeValue =
            entry.get(userRoleAttribute) == null ? null : entry.get(userRoleAttribute).getString();
      }

      rolesResult =
          ldapConnection.search(
              settings.get(ConfigConstants.ARMOR_AUTHENTICATION_AUTHORIZATION_LDAP_ROLEBASE, ""),
              settings
                  .get(
                      ConfigConstants.ARMOR_AUTHENTICATION_AUTHORIZATION_LDAP_ROLESEARCH,
                      "(member={0})")
                  .replace("{0}", dn)
                  .replace("{1}", authenticatedUser)
                  .replace("{2}", userRoleAttributeValue == null ? "{2}" : userRoleAttributeValue),
              SearchScope.SUBTREE);

      for (final Iterator iterator = rolesResult.iterator(); iterator.hasNext(); ) {
        final Entry searchResultEntry = (Entry) iterator.next();
        roles.put(
            new Tuple<String, Dn>(searchResultEntry.getDn().toString(), searchResultEntry.getDn()),
            searchResultEntry);
      }

      log.trace("non user roles count: {}", roles.size());

      for (final Iterator<String> it = userRolesDn.iterator(); it.hasNext(); ) {
        final String stringVal = it.next();
        // lookup
        final Entry userRole = ldapConnection.lookup(stringVal);
        roles.put(new Tuple<String, Dn>(stringVal, null), userRole);
      }

      // nested roles
      if (settings.getAsBoolean(
          ConfigConstants.ARMOR_AUTHENTICATION_AUTHORIZATION_LDAP_RESOLVE_NESTED_ROLES, false)) {

        log.trace("Evaluate nested roles");

        final Set<Entry> nestedReturn = new HashSet<Entry>(roles.values());

        for (final Iterator<java.util.Map.Entry<Tuple<String, Dn>, Entry>> iterator =
                roles.entrySet().iterator();
            iterator.hasNext(); ) {
          final java.util.Map.Entry<Tuple<String, Dn>, Entry> _entry = iterator.next();

          final Set<Entry> x = resolveNestedRoles(_entry.getKey(), ldapConnection, roleName);

          log.trace("{}. nested roles for {} {}", x.size(), _entry.getKey(), roleName);

          nestedReturn.addAll(x);
        }

        for (final Iterator iterator = nestedReturn.iterator(); iterator.hasNext(); ) {
          final Entry entry2 = (Entry) iterator.next();
          final String role = entry2.get(roleName).getString();
          user.addRole(role);
        }

        if (user instanceof LdapUser) {
          ((LdapUser) user).addRoleEntries(nestedReturn);
        }

      } else {

        for (final Iterator iterator = roles.values().iterator(); iterator.hasNext(); ) {
          final Entry entry2 = (Entry) iterator.next();
          final String role = entry2.get(roleName).getString();
          user.addRole(role);
        }

        if (user instanceof LdapUser) {
          ((LdapUser) user).addRoleEntries(roles.values());
        }
      }

    } catch (final Exception e) {
      log.error(e.toString(), e);
      throw new AuthException(e);
    } finally {
      if (result != null) {
        result.close();
      }

      if (rolesResult != null) {
        rolesResult.close();
      }

      SecurityUtil.unbindAndCloseSilently(ldapConnection);
    }
  }
  @SuppressWarnings("unchecked")
  public static synchronized MongoDBRiverDefinition parseSettings(
      String riverName,
      String riverIndexName,
      RiverSettings settings,
      ScriptService scriptService) {

    logger.trace("Parse river settings for {}", riverName);
    Preconditions.checkNotNull(riverName, "No riverName specified");
    Preconditions.checkNotNull(riverIndexName, "No riverIndexName specified");
    Preconditions.checkNotNull(settings, "No settings specified");

    Builder builder = new Builder();
    builder.riverName(riverName);
    builder.riverIndexName(riverIndexName);

    List<ServerAddress> mongoServers = new ArrayList<ServerAddress>();
    String mongoHost;
    int mongoPort;

    if (settings.settings().containsKey(MongoDBRiver.TYPE)) {
      Map<String, Object> mongoSettings =
          (Map<String, Object>) settings.settings().get(MongoDBRiver.TYPE);
      if (mongoSettings.containsKey(SERVERS_FIELD)) {
        Object mongoServersSettings = mongoSettings.get(SERVERS_FIELD);
        logger.trace("mongoServersSettings: " + mongoServersSettings);
        boolean array = XContentMapValues.isArray(mongoServersSettings);

        if (array) {
          ArrayList<Map<String, Object>> feeds =
              (ArrayList<Map<String, Object>>) mongoServersSettings;
          for (Map<String, Object> feed : feeds) {
            mongoHost = XContentMapValues.nodeStringValue(feed.get(HOST_FIELD), null);
            mongoPort = XContentMapValues.nodeIntegerValue(feed.get(PORT_FIELD), DEFAULT_DB_PORT);
            logger.trace("Server: " + mongoHost + " - " + mongoPort);
            try {
              mongoServers.add(new ServerAddress(mongoHost, mongoPort));
            } catch (UnknownHostException uhEx) {
              logger.warn("Cannot add mongo server {}:{}", uhEx, mongoHost, mongoPort);
            }
          }
        }
      } else {
        mongoHost =
            XContentMapValues.nodeStringValue(mongoSettings.get(HOST_FIELD), DEFAULT_DB_HOST);
        mongoPort =
            XContentMapValues.nodeIntegerValue(mongoSettings.get(PORT_FIELD), DEFAULT_DB_PORT);
        try {
          mongoServers.add(new ServerAddress(mongoHost, mongoPort));
        } catch (UnknownHostException uhEx) {
          logger.warn("Cannot add mongo server {}:{}", uhEx, mongoHost, mongoPort);
        }
      }
      builder.mongoServers(mongoServers);

      MongoClientOptions.Builder mongoClientOptionsBuilder =
          MongoClientOptions.builder().socketKeepAlive(true);

      // MongoDB options
      if (mongoSettings.containsKey(OPTIONS_FIELD)) {
        Map<String, Object> mongoOptionsSettings =
            (Map<String, Object>) mongoSettings.get(OPTIONS_FIELD);
        logger.trace("mongoOptionsSettings: " + mongoOptionsSettings);
        builder.mongoSecondaryReadPreference(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(SECONDARY_READ_PREFERENCE_FIELD), false));
        builder.connectTimeout(
            XContentMapValues.nodeIntegerValue(
                mongoOptionsSettings.get(CONNECT_TIMEOUT), DEFAULT_CONNECT_TIMEOUT));
        builder.socketTimeout(
            XContentMapValues.nodeIntegerValue(
                mongoOptionsSettings.get(SOCKET_TIMEOUT), DEFAULT_SOCKET_TIMEOUT));
        builder.dropCollection(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(DROP_COLLECTION_FIELD), false));
        String isMongos =
            XContentMapValues.nodeStringValue(mongoOptionsSettings.get(IS_MONGOS_FIELD), null);
        if (isMongos != null) {
          builder.isMongos(Boolean.valueOf(isMongos));
        }
        builder.mongoUseSSL(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(SSL_CONNECTION_FIELD), false));
        builder.mongoSSLVerifyCertificate(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(SSL_VERIFY_CERT_FIELD), true));
        builder.advancedTransformation(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(ADVANCED_TRANSFORMATION_FIELD), false));
        builder.skipInitialImport(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(SKIP_INITIAL_IMPORT_FIELD), false));
        builder.connectionsPerHost(
            XContentMapValues.nodeIntegerValue(
                mongoOptionsSettings.get(CONNECTIONS_PER_HOST), DEFAULT_CONNECTIONS_PER_HOST));
        builder.threadsAllowedToBlockForConnectionMultiplier(
            XContentMapValues.nodeIntegerValue(
                mongoOptionsSettings.get(THREADS_ALLOWED_TO_BLOCK_FOR_CONNECTION_MULTIPLIER),
                DEFAULT_THREADS_ALLOWED_TO_BLOCK_FOR_CONNECTION_MULTIPLIER));

        mongoClientOptionsBuilder
            .connectTimeout(builder.connectTimeout)
            .socketTimeout(builder.socketTimeout)
            .connectionsPerHost(builder.connectionsPerHost)
            .threadsAllowedToBlockForConnectionMultiplier(
                builder.threadsAllowedToBlockForConnectionMultiplier);

        if (builder.mongoSecondaryReadPreference) {
          mongoClientOptionsBuilder.readPreference(ReadPreference.secondaryPreferred());
        }

        if (builder.mongoUseSSL) {
          mongoClientOptionsBuilder.socketFactory(getSSLSocketFactory());
        }

        if (mongoOptionsSettings.containsKey(PARENT_TYPES_FIELD)) {
          Set<String> parentTypes = new HashSet<String>();
          Object parentTypesSettings = mongoOptionsSettings.get(PARENT_TYPES_FIELD);
          logger.trace("parentTypesSettings: " + parentTypesSettings);
          boolean array = XContentMapValues.isArray(parentTypesSettings);

          if (array) {
            ArrayList<String> fields = (ArrayList<String>) parentTypesSettings;
            for (String field : fields) {
              logger.trace("Field: " + field);
              parentTypes.add(field);
            }
          }

          builder.parentTypes(parentTypes);
        }

        if (mongoOptionsSettings.containsKey(STORE_STATISTICS_FIELD)) {
          Object storeStatistics = mongoOptionsSettings.get(STORE_STATISTICS_FIELD);
          boolean object = XContentMapValues.isObject(storeStatistics);
          if (object) {
            Map<String, Object> storeStatisticsSettings = (Map<String, Object>) storeStatistics;
            builder.storeStatistics(true);
            builder.statisticsIndexName(
                XContentMapValues.nodeStringValue(
                    storeStatisticsSettings.get(INDEX_OBJECT), riverName + "-stats"));
            builder.statisticsTypeName(
                XContentMapValues.nodeStringValue(
                    storeStatisticsSettings.get(TYPE_FIELD), "stats"));
          } else {
            builder.storeStatistics(XContentMapValues.nodeBooleanValue(storeStatistics, false));
            if (builder.storeStatistics) {
              builder.statisticsIndexName(riverName + "-stats");
              builder.statisticsTypeName("stats");
            }
          }
        }
        // builder.storeStatistics(XContentMapValues.nodeBooleanValue(mongoOptionsSettings.get(STORE_STATISTICS_FIELD),
        // false));
        builder.importAllCollections(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(IMPORT_ALL_COLLECTIONS_FIELD), false));
        builder.disableIndexRefresh(
            XContentMapValues.nodeBooleanValue(
                mongoOptionsSettings.get(DISABLE_INDEX_REFRESH_FIELD), false));
        builder.includeCollection(
            XContentMapValues.nodeStringValue(
                mongoOptionsSettings.get(INCLUDE_COLLECTION_FIELD), ""));

        if (mongoOptionsSettings.containsKey(INCLUDE_FIELDS_FIELD)) {
          Set<String> includeFields = new HashSet<String>();
          Object includeFieldsSettings = mongoOptionsSettings.get(INCLUDE_FIELDS_FIELD);
          logger.trace("includeFieldsSettings: " + includeFieldsSettings);
          boolean array = XContentMapValues.isArray(includeFieldsSettings);

          if (array) {
            ArrayList<String> fields = (ArrayList<String>) includeFieldsSettings;
            for (String field : fields) {
              logger.trace("Field: " + field);
              includeFields.add(field);
            }
          }

          if (!includeFields.contains(MongoDBRiver.MONGODB_ID_FIELD)) {
            includeFields.add(MongoDBRiver.MONGODB_ID_FIELD);
          }
          builder.includeFields(includeFields);
        } else if (mongoOptionsSettings.containsKey(EXCLUDE_FIELDS_FIELD)) {
          Set<String> excludeFields = new HashSet<String>();
          Object excludeFieldsSettings = mongoOptionsSettings.get(EXCLUDE_FIELDS_FIELD);
          logger.trace("excludeFieldsSettings: " + excludeFieldsSettings);
          boolean array = XContentMapValues.isArray(excludeFieldsSettings);

          if (array) {
            ArrayList<String> fields = (ArrayList<String>) excludeFieldsSettings;
            for (String field : fields) {
              logger.trace("Field: " + field);
              excludeFields.add(field);
            }
          }

          builder.excludeFields(excludeFields);
        }

        if (mongoOptionsSettings.containsKey(INITIAL_TIMESTAMP_FIELD)) {
          BSONTimestamp timeStamp = null;
          try {
            Map<String, Object> initalTimestampSettings =
                (Map<String, Object>) mongoOptionsSettings.get(INITIAL_TIMESTAMP_FIELD);
            String scriptType = "js";
            if (initalTimestampSettings.containsKey(INITIAL_TIMESTAMP_SCRIPT_TYPE_FIELD)) {
              scriptType =
                  initalTimestampSettings.get(INITIAL_TIMESTAMP_SCRIPT_TYPE_FIELD).toString();
            }
            if (initalTimestampSettings.containsKey(INITIAL_TIMESTAMP_SCRIPT_FIELD)) {

              ExecutableScript scriptExecutable =
                  scriptService.executable(
                      scriptType,
                      initalTimestampSettings.get(INITIAL_TIMESTAMP_SCRIPT_FIELD).toString(),
                      ScriptService.ScriptType.INLINE,
                      Maps.newHashMap());
              Object ctx = scriptExecutable.run();
              logger.trace("initialTimestamp script returned: {}", ctx);
              if (ctx != null) {
                long timestamp = Long.parseLong(ctx.toString());
                timeStamp = new BSONTimestamp((int) (new Date(timestamp).getTime() / 1000), 1);
              }
            }
          } catch (Throwable t) {
            logger.error("Could not set initial timestamp", t);
          } finally {
            builder.initialTimestamp(timeStamp);
          }
        }
      }
      builder.mongoClientOptions(mongoClientOptionsBuilder.build());

      // Credentials
      if (mongoSettings.containsKey(CREDENTIALS_FIELD)) {
        String dbCredential;
        String mau = "";
        String map = "";
        String maad = "";
        String mlu = "";
        String mlp = "";
        String mlad = "";
        // String mdu = "";
        // String mdp = "";
        Object mongoCredentialsSettings = mongoSettings.get(CREDENTIALS_FIELD);
        boolean array = XContentMapValues.isArray(mongoCredentialsSettings);

        if (array) {
          ArrayList<Map<String, Object>> credentials =
              (ArrayList<Map<String, Object>>) mongoCredentialsSettings;
          for (Map<String, Object> credential : credentials) {
            dbCredential = XContentMapValues.nodeStringValue(credential.get(DB_FIELD), null);
            if (ADMIN_DB_FIELD.equals(dbCredential)) {
              mau = XContentMapValues.nodeStringValue(credential.get(USER_FIELD), null);
              map = XContentMapValues.nodeStringValue(credential.get(PASSWORD_FIELD), null);
              maad = XContentMapValues.nodeStringValue(credential.get(AUTH_FIELD), null);
            } else if (LOCAL_DB_FIELD.equals(dbCredential)) {
              mlu = XContentMapValues.nodeStringValue(credential.get(USER_FIELD), null);
              mlp = XContentMapValues.nodeStringValue(credential.get(PASSWORD_FIELD), null);
              mlad = XContentMapValues.nodeStringValue(credential.get(AUTH_FIELD), null);
              // } else {
              // mdu = XContentMapValues.nodeStringValue(
              // credential.get(USER_FIELD), null);
              // mdp = XContentMapValues.nodeStringValue(
              // credential.get(PASSWORD_FIELD), null);
            }
          }
        }
        builder.mongoAdminUser(mau);
        builder.mongoAdminPassword(map);
        builder.mongoAdminAuthDatabase(maad);
        builder.mongoLocalUser(mlu);
        builder.mongoLocalPassword(mlp);
        builder.mongoLocalAuthDatabase(mlad);
        // mongoDbUser = mdu;
        // mongoDbPassword = mdp;
      }

      builder.mongoDb(XContentMapValues.nodeStringValue(mongoSettings.get(DB_FIELD), riverName));
      builder.mongoCollection(
          XContentMapValues.nodeStringValue(mongoSettings.get(COLLECTION_FIELD), riverName));
      builder.mongoGridFS(
          XContentMapValues.nodeBooleanValue(mongoSettings.get(GRIDFS_FIELD), false));
      if (mongoSettings.containsKey(FILTER_FIELD)) {
        String filter = XContentMapValues.nodeStringValue(mongoSettings.get(FILTER_FIELD), "");
        filter = removePrefix("o.", filter);
        builder.mongoCollectionFilter(convertToBasicDBObject(filter));
        // DBObject bsonObject = (DBObject) JSON.parse(filter);
        // builder.mongoOplogFilter(convertToBasicDBObject(addPrefix("o.",
        // filter)));
        builder.mongoOplogFilter(convertToBasicDBObject(removePrefix("o.", filter)));
        // } else {
        // builder.mongoOplogFilter("");
      }

      if (mongoSettings.containsKey(SCRIPT_FIELD)) {
        String scriptType = "js";
        builder.script(mongoSettings.get(SCRIPT_FIELD).toString());
        if (mongoSettings.containsKey("scriptType")) {
          scriptType = mongoSettings.get("scriptType").toString();
        } else if (mongoSettings.containsKey(SCRIPT_TYPE_FIELD)) {
          scriptType = mongoSettings.get(SCRIPT_TYPE_FIELD).toString();
        }
        builder.scriptType(scriptType);
      }
    } else {
      mongoHost = DEFAULT_DB_HOST;
      mongoPort = DEFAULT_DB_PORT;
      try {
        mongoServers.add(new ServerAddress(mongoHost, mongoPort));
        builder.mongoServers(mongoServers);
      } catch (UnknownHostException e) {
        e.printStackTrace();
      }
      builder.mongoDb(riverName);
      builder.mongoCollection(riverName);
    }

    if (settings.settings().containsKey(INDEX_OBJECT)) {
      Map<String, Object> indexSettings =
          (Map<String, Object>) settings.settings().get(INDEX_OBJECT);
      builder.indexName(
          XContentMapValues.nodeStringValue(indexSettings.get(NAME_FIELD), builder.mongoDb));
      builder.typeName(
          XContentMapValues.nodeStringValue(indexSettings.get(TYPE_FIELD), builder.mongoDb));

      Bulk.Builder bulkBuilder = new Bulk.Builder();
      if (indexSettings.containsKey(BULK_FIELD)) {
        Map<String, Object> bulkSettings = (Map<String, Object>) indexSettings.get(BULK_FIELD);
        int bulkActions =
            XContentMapValues.nodeIntegerValue(
                bulkSettings.get(ACTIONS_FIELD), DEFAULT_BULK_ACTIONS);
        bulkBuilder.bulkActions(bulkActions);
        String size =
            XContentMapValues.nodeStringValue(
                bulkSettings.get(SIZE_FIELD), DEFAULT_BULK_SIZE.toString());
        bulkBuilder.bulkSize(ByteSizeValue.parseBytesSizeValue(size));
        bulkBuilder.concurrentRequests(
            XContentMapValues.nodeIntegerValue(
                bulkSettings.get(CONCURRENT_REQUESTS_FIELD),
                EsExecutors.boundedNumberOfProcessors(ImmutableSettings.EMPTY)));
        bulkBuilder.flushInterval(
            XContentMapValues.nodeTimeValue(
                bulkSettings.get(FLUSH_INTERVAL_FIELD), DEFAULT_FLUSH_INTERVAL));
        builder.throttleSize(
            XContentMapValues.nodeIntegerValue(
                indexSettings.get(THROTTLE_SIZE_FIELD), bulkActions * 5));
      } else {
        int bulkActions =
            XContentMapValues.nodeIntegerValue(
                indexSettings.get(BULK_SIZE_FIELD), DEFAULT_BULK_ACTIONS);
        bulkBuilder.bulkActions(bulkActions);
        bulkBuilder.bulkSize(DEFAULT_BULK_SIZE);
        bulkBuilder.flushInterval(
            XContentMapValues.nodeTimeValue(
                indexSettings.get(BULK_TIMEOUT_FIELD), DEFAULT_FLUSH_INTERVAL));
        bulkBuilder.concurrentRequests(
            XContentMapValues.nodeIntegerValue(
                indexSettings.get(CONCURRENT_BULK_REQUESTS_FIELD),
                EsExecutors.boundedNumberOfProcessors(ImmutableSettings.EMPTY)));
        builder.throttleSize(
            XContentMapValues.nodeIntegerValue(
                indexSettings.get(THROTTLE_SIZE_FIELD), bulkActions * 5));
      }
      builder.bulk(bulkBuilder.build());
    } else {
      builder.indexName(builder.mongoDb);
      builder.typeName(builder.mongoDb);
      builder.bulk(new Bulk.Builder().build());
    }
    return builder.build();
  }
  private Timestamp<?> processOplogEntry(final DBObject entry, final Timestamp<?> startTimestamp)
      throws InterruptedException {
    // To support transactions, TokuMX wraps one or more operations in a single oplog entry, in a
    // list.
    // As long as clients are not transaction-aware, we can pretty safely assume there will only be
    // one operation in the list.
    // Supporting genuine multi-operation transactions will require a bit more logic here.
    flattenOps(entry);

    if (!isValidOplogEntry(entry, startTimestamp)) {
      return startTimestamp;
    }
    Operation operation = Operation.fromString(entry.get(MongoDBRiver.OPLOG_OPERATION).toString());
    String namespace = entry.get(MongoDBRiver.OPLOG_NAMESPACE).toString();
    String collection = null;
    Timestamp<?> oplogTimestamp = Timestamp.on(entry);
    DBObject object = (DBObject) entry.get(MongoDBRiver.OPLOG_OBJECT);

    if (definition.isImportAllCollections()) {
      if (namespace.startsWith(definition.getMongoDb()) && !namespace.equals(cmdOplogNamespace)) {
        collection = getCollectionFromNamespace(namespace);
      }
    } else {
      collection = definition.getMongoCollection();
    }

    if (namespace.equals(cmdOplogNamespace)) {
      if (object.containsField(MongoDBRiver.OPLOG_DROP_COMMAND_OPERATION)) {
        operation = Operation.DROP_COLLECTION;
        if (definition.isImportAllCollections()) {
          collection = object.get(MongoDBRiver.OPLOG_DROP_COMMAND_OPERATION).toString();
          if (collection.startsWith("tmp.mr.")) {
            return startTimestamp;
          }
        }
      }
      if (object.containsField(MongoDBRiver.OPLOG_DROP_DATABASE_COMMAND_OPERATION)) {
        operation = Operation.DROP_DATABASE;
      }
    }

    logger.trace("namespace: {} - operation: {}", namespace, operation);
    if (namespace.equals(MongoDBRiver.OPLOG_ADMIN_COMMAND)) {
      if (operation == Operation.COMMAND) {
        processAdminCommandOplogEntry(entry, startTimestamp);
        return startTimestamp;
      }
    }

    if (logger.isTraceEnabled()) {
      logger.trace("MongoDB object deserialized: {}", object.toString());
      logger.trace("collection: {}", collection);
      logger.trace("oplog entry - namespace [{}], operation [{}]", namespace, operation);
      logger.trace("oplog processing item {}", entry);
    }

    String objectId = getObjectIdFromOplogEntry(entry);
    if (operation == Operation.DELETE) {
      // Include only _id in data, as vanilla MongoDB does, so transformation scripts won't be
      // broken by Toku
      if (object.containsField(MongoDBRiver.MONGODB_ID_FIELD)) {
        if (object.keySet().size() > 1) {
          entry.put(
              MongoDBRiver.OPLOG_OBJECT,
              object = new BasicDBObject(MongoDBRiver.MONGODB_ID_FIELD, objectId));
        }
      } else {
        throw new NullPointerException(MongoDBRiver.MONGODB_ID_FIELD);
      }
    }

    if (definition.isMongoGridFS()
        && namespace.endsWith(MongoDBRiver.GRIDFS_FILES_SUFFIX)
        && (operation == Operation.INSERT || operation == Operation.UPDATE)) {
      if (objectId == null) {
        throw new NullPointerException(MongoDBRiver.MONGODB_ID_FIELD);
      }
      GridFS grid = new GridFS(mongo.getDB(definition.getMongoDb()), collection);
      GridFSDBFile file = grid.findOne(new ObjectId(objectId));
      if (file != null) {
        logger.info("Caught file: {} - {}", file.getId(), file.getFilename());
        object = file;
      } else {
        logger.warn("Cannot find file from id: {}", objectId);
      }
    }

    if (object instanceof GridFSDBFile) {
      if (objectId == null) {
        throw new NullPointerException(MongoDBRiver.MONGODB_ID_FIELD);
      }
      if (logger.isTraceEnabled()) {
        logger.trace("Add attachment: {}", objectId);
      }
      addToStream(operation, oplogTimestamp, applyFieldFilter(object), collection);
    } else {
      if (operation == Operation.UPDATE) {
        DBObject update = (DBObject) entry.get(MongoDBRiver.OPLOG_UPDATE);
        logger.debug("Updated item: {}", update);
        addQueryToStream(operation, oplogTimestamp, update, collection);
      } else {
        if (operation == Operation.INSERT) {
          addInsertToStream(oplogTimestamp, applyFieldFilter(object), collection);
        } else {
          addToStream(operation, oplogTimestamp, applyFieldFilter(object), collection);
        }
      }
    }
    return oplogTimestamp;
  }
Esempio n. 20
0
  public void close() {
    if (lifecycle.started()) {
      stop();
    }
    if (!lifecycle.moveToClosed()) {
      return;
    }

    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("{{}}[{}]: closing ...", Version.full(), JvmInfo.jvmInfo().pid());

    StopWatch stopWatch = new StopWatch("node_close");
    stopWatch.start("http");
    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).close();
    }
    stopWatch.stop().start("client");
    injector.getInstance(Client.class).close();
    stopWatch.stop().start("routing");
    injector.getInstance(RoutingService.class).close();
    stopWatch.stop().start("cluster");
    injector.getInstance(ClusterService.class).close();
    stopWatch.stop().start("discovery");
    injector.getInstance(DiscoveryService.class).close();
    stopWatch.stop().start("monitor");
    injector.getInstance(MonitorService.class).close();
    stopWatch.stop().start("gateway");
    injector.getInstance(GatewayService.class).close();
    stopWatch.stop().start("search");
    injector.getInstance(SearchService.class).close();
    stopWatch.stop().start("indexers");
    injector.getInstance(RiversManager.class).close();
    stopWatch.stop().start("indices_cluster");
    injector.getInstance(IndicesClusterStateService.class).close();
    stopWatch.stop().start("indices");
    injector.getInstance(IndicesService.class).close();
    stopWatch.stop().start("rest");
    injector.getInstance(RestController.class).close();
    stopWatch.stop().start("transport");
    injector.getInstance(TransportService.class).close();

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      stopWatch.stop().start("plugin(" + plugin.getName() + ")");
      injector.getInstance(plugin).close();
    }

    stopWatch.stop().start("node_cache");
    injector.getInstance(NodeCache.class).close();

    stopWatch.stop().start("script");
    injector.getInstance(ScriptService.class).close();

    stopWatch.stop().start("timer");
    injector.getInstance(TimerService.class).close();
    stopWatch.stop().start("thread_pool");
    injector.getInstance(ThreadPool.class).shutdown();
    try {
      injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
      // ignore
    }
    stopWatch.stop().start("thread_pool_force_shutdown");
    try {
      injector.getInstance(ThreadPool.class).shutdownNow();
    } catch (Exception e) {
      // ignore
    }
    stopWatch.stop();

    ThreadLocals.clearReferencesThreadLocals();

    if (logger.isTraceEnabled()) {
      logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint());
    }

    injector.getInstance(NodeEnvironment.class).close();
    Injectors.close(injector);

    logger.info("{{}}[{}]: closed", Version.full(), JvmInfo.jvmInfo().pid());
  }
Esempio n. 21
0
  // During concurrent close() calls we want to make sure that all of them return after the node has
  // completed it's shutdown cycle.
  // If not, the hook that is added in Bootstrap#setup() will be useless: close() might not be
  // executed, in case another (for example api) call
  // to close() has already set some lifecycles to stopped. In this case the process will be
  // terminated even if the first call to close() has not finished yet.
  public synchronized void close() {
    if (lifecycle.started()) {
      stop();
    }
    if (!lifecycle.moveToClosed()) {
      return;
    }

    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("closing ...");

    StopWatch stopWatch = new StopWatch("node_close");
    stopWatch.start("tribe");
    injector.getInstance(TribeService.class).close();
    stopWatch.stop().start("http");
    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).close();
    }

    stopWatch.stop().start("rivers");
    injector.getInstance(RiversManager.class).close();

    stopWatch.stop().start("snapshot_service");
    injector.getInstance(SnapshotsService.class).close();
    stopWatch.stop().start("client");
    Releasables.close(injector.getInstance(Client.class));
    stopWatch.stop().start("indices_cluster");
    injector.getInstance(IndicesClusterStateService.class).close();
    stopWatch.stop().start("indices");
    injector.getInstance(IndicesFilterCache.class).close();
    injector.getInstance(IndicesFieldDataCache.class).close();
    injector.getInstance(IndexingMemoryController.class).close();
    injector.getInstance(IndicesTTLService.class).close();
    injector.getInstance(IndicesService.class).close();
    stopWatch.stop().start("routing");
    injector.getInstance(RoutingService.class).close();
    stopWatch.stop().start("cluster");
    injector.getInstance(ClusterService.class).close();
    stopWatch.stop().start("discovery");
    injector.getInstance(DiscoveryService.class).close();
    stopWatch.stop().start("monitor");
    injector.getInstance(MonitorService.class).close();
    stopWatch.stop().start("gateway");
    injector.getInstance(GatewayService.class).close();
    stopWatch.stop().start("search");
    injector.getInstance(SearchService.class).close();
    stopWatch.stop().start("rest");
    injector.getInstance(RestController.class).close();
    stopWatch.stop().start("transport");
    injector.getInstance(TransportService.class).close();
    stopWatch.stop().start("percolator_service");
    injector.getInstance(PercolatorService.class).close();

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      stopWatch.stop().start("plugin(" + plugin.getName() + ")");
      injector.getInstance(plugin).close();
    }

    stopWatch.stop().start("script");
    injector.getInstance(ScriptService.class).close();

    stopWatch.stop().start("thread_pool");
    // TODO this should really use ThreadPool.terminate()
    injector.getInstance(ThreadPool.class).shutdown();
    try {
      injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
      // ignore
    }
    stopWatch.stop().start("thread_pool_force_shutdown");
    try {
      injector.getInstance(ThreadPool.class).shutdownNow();
    } catch (Exception e) {
      // ignore
    }
    stopWatch.stop();

    if (logger.isTraceEnabled()) {
      logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint());
    }

    injector.getInstance(NodeEnvironment.class).close();
    injector.getInstance(PageCacheRecycler.class).close();
    Injectors.close(injector);

    CachedStreams.clear();

    logger.info("closed");
  }
  /**
   * Perform phase2 of the recovery process
   *
   * <p>Phase2 takes a snapshot of the current translog *without* acquiring the write lock (however,
   * the translog snapshot is a point-in-time view of the translog). It then sends each translog
   * operation to the target node so it can be replayed into the new shard.
   *
   * <p>{@code InternalEngine#recover} is responsible for taking the snapshot of the translog and
   * releasing it once all 3 phases of recovery are complete
   */
  @Override
  public void phase2(Translog.Snapshot snapshot) throws ElasticsearchException {
    if (shard.state() == IndexShardState.CLOSED) {
      throw new IndexShardClosedException(request.shardId());
    }
    cancellableThreads.checkForCancel();
    logger.trace("{} recovery [phase2] to {}: start", request.shardId(), request.targetNode());
    StopWatch stopWatch = new StopWatch().start();
    cancellableThreads.execute(
        new Interruptable() {
          @Override
          public void run() throws InterruptedException {
            // Send a request preparing the new shard's translog to receive
            // operations. This ensures the shard engine is started and disables
            // garbage collection (not the JVM's GC!) of tombstone deletes
            transportService
                .submitRequest(
                    request.targetNode(),
                    RecoveryTarget.Actions.PREPARE_TRANSLOG,
                    new RecoveryPrepareForTranslogOperationsRequest(
                        request.recoveryId(),
                        request.shardId(),
                        shard.translog().estimatedNumberOfOperations()),
                    TransportRequestOptions.options()
                        .withTimeout(recoverySettings.internalActionTimeout()),
                    EmptyTransportResponseHandler.INSTANCE_SAME)
                .txGet();
          }
        });

    stopWatch.stop();
    response.startTime = stopWatch.totalTime().millis();
    logger.trace(
        "{} recovery [phase2] to {}: start took [{}]",
        request.shardId(),
        request.targetNode(),
        stopWatch.totalTime());

    logger.trace(
        "{} recovery [phase2] to {}: updating current mapping to master",
        request.shardId(),
        request.targetNode());
    // Ensure that the mappings are synced with the master node
    updateMappingOnMaster();

    logger.trace(
        "{} recovery [phase2] to {}: sending transaction log operations",
        request.shardId(),
        request.targetNode());
    stopWatch = new StopWatch().start();
    // Send all the snapshot's translog operations to the target
    int totalOperations = sendSnapshot(snapshot);
    stopWatch.stop();
    logger.trace(
        "{} recovery [phase2] to {}: took [{}]",
        request.shardId(),
        request.targetNode(),
        stopWatch.totalTime());
    response.phase2Time = stopWatch.totalTime().millis();
    response.phase2Operations = totalOperations;
  }
  @Override
  public void run() {
    while (context.getStatus() == Status.RUNNING) {
      try {
        if (!assignCollections()) {
          break; // failed to assign oplogCollection or
          // slurpedCollection
        }

        Timestamp<?> startTimestamp = null;
        if (!definition.isSkipInitialImport()) {
          if (!riverHasIndexedFromOplog() && definition.getInitialTimestamp() == null) {
            if (!isIndexEmpty()) {
              MongoDBRiverHelper.setRiverStatus(
                  client, definition.getRiverName(), Status.INITIAL_IMPORT_FAILED);
              break;
            }
            if (definition.isImportAllCollections()) {
              for (String name : slurpedDb.getCollectionNames()) {
                DBCollection collection = slurpedDb.getCollection(name);
                startTimestamp = doInitialImport(collection);
              }
            } else {
              DBCollection collection = slurpedDb.getCollection(definition.getMongoCollection());
              startTimestamp = doInitialImport(collection);
            }
          }
        } else {
          logger.info("Skip initial import from collection {}", definition.getMongoCollection());
        }

        // Slurp from oplog
        DBCursor cursor = null;
        try {
          cursor = oplogCursor(startTimestamp);
          if (cursor == null) {
            cursor = processFullOplog();
          }
          while (cursor.hasNext()) {
            DBObject item = cursor.next();
            startTimestamp = processOplogEntry(item, startTimestamp);
          }
          logger.debug("Before waiting for 500 ms");
          Thread.sleep(500);
        } catch (MongoException.CursorNotFound e) {
          logger.info(
              "Cursor {} has been closed. About to open a new cusor.", cursor.getCursorId());
          logger.debug("Total document inserted [{}]", totalDocuments.get());
        } catch (SlurperException sEx) {
          logger.warn("Exception in slurper", sEx);
          break;
        } catch (Exception ex) {
          logger.warn("Exception while looping in cursor", ex);
          Thread.currentThread().interrupt();
          break;
        } finally {
          if (cursor != null) {
            logger.trace("Closing oplog cursor");
            cursor.close();
          }
        }
      } catch (MongoInterruptedException mIEx) {
        logger.warn("Mongo driver has been interrupted", mIEx);
        if (mongo != null) {
          mongo.close();
          mongo = null;
        }
        Thread.currentThread().interrupt();
        break;
      } catch (MongoException e) {
        logger.error("Mongo gave an exception", e);
        try {
          Thread.sleep(5000);
        } catch (InterruptedException iEx) {
        }
      } catch (NoSuchElementException e) {
        logger.warn("A mongoDB cursor bug ?", e);
      } catch (InterruptedException e) {
        logger.info("river-mongodb slurper interrupted");
        Thread.currentThread().interrupt();
        break;
      }
    }
  }
  /**
   * Send the given snapshot's operations to this handler's target node.
   *
   * <p>Operations are bulked into a single request depending on an operation count limit or
   * size-in-bytes limit
   *
   * @return the total number of translog operations that were sent
   */
  protected int sendSnapshot(Translog.Snapshot snapshot) throws ElasticsearchException {
    int ops = 0;
    long size = 0;
    int totalOperations = 0;
    final List<Translog.Operation> operations = Lists.newArrayList();
    Translog.Operation operation = snapshot.next();

    final TransportRequestOptions recoveryOptions =
        TransportRequestOptions.options()
            .withCompress(recoverySettings.compress())
            .withType(TransportRequestOptions.Type.RECOVERY)
            .withTimeout(recoverySettings.internalActionLongTimeout());

    if (operation == null) {
      logger.trace(
          "[{}][{}] no translog operations (id: [{}]) to send to {}",
          indexName,
          shardId,
          snapshot.translogId(),
          request.targetNode());
    }
    while (operation != null) {
      if (shard.state() == IndexShardState.CLOSED) {
        throw new IndexShardClosedException(request.shardId());
      }
      cancellableThreads.checkForCancel();
      operations.add(operation);
      ops += 1;
      size += operation.estimateSize();
      totalOperations++;

      // Check if this request is past the size or bytes threshold, and
      // if so, send it off
      if (ops >= recoverySettings.translogOps()
          || size >= recoverySettings.translogSize().bytes()) {

        // don't throttle translog, since we lock for phase3 indexing,
        // so we need to move it as fast as possible. Note, since we
        // index docs to replicas while the index files are recovered
        // the lock can potentially be removed, in which case, it might
        // make sense to re-enable throttling in this phase
        //                if (recoverySettings.rateLimiter() != null) {
        //                    recoverySettings.rateLimiter().pause(size);
        //                }

        if (logger.isTraceEnabled()) {
          logger.trace(
              "[{}][{}] sending batch of [{}][{}] (total: [{}], id: [{}]) translog operations to {}",
              indexName,
              shardId,
              ops,
              new ByteSizeValue(size),
              shard.translog().estimatedNumberOfOperations(),
              snapshot.translogId(),
              request.targetNode());
        }
        cancellableThreads.execute(
            new Interruptable() {
              @Override
              public void run() throws InterruptedException {
                final RecoveryTranslogOperationsRequest translogOperationsRequest =
                    new RecoveryTranslogOperationsRequest(
                        request.recoveryId(),
                        request.shardId(),
                        operations,
                        shard.translog().estimatedNumberOfOperations());
                transportService
                    .submitRequest(
                        request.targetNode(),
                        RecoveryTarget.Actions.TRANSLOG_OPS,
                        translogOperationsRequest,
                        recoveryOptions,
                        EmptyTransportResponseHandler.INSTANCE_SAME)
                    .txGet();
              }
            });

        ops = 0;
        size = 0;
        operations.clear();
      }
      operation = snapshot.next();
    }
    // send the leftover
    if (logger.isTraceEnabled()) {
      logger.trace(
          "[{}][{}] sending final batch of [{}][{}] (total: [{}], id: [{}]) translog operations to {}",
          indexName,
          shardId,
          ops,
          new ByteSizeValue(size),
          shard.translog().estimatedNumberOfOperations(),
          snapshot.translogId(),
          request.targetNode());
    }
    if (!operations.isEmpty()) {
      cancellableThreads.execute(
          new Interruptable() {
            @Override
            public void run() throws InterruptedException {
              RecoveryTranslogOperationsRequest translogOperationsRequest =
                  new RecoveryTranslogOperationsRequest(
                      request.recoveryId(),
                      request.shardId(),
                      operations,
                      shard.translog().estimatedNumberOfOperations());
              transportService
                  .submitRequest(
                      request.targetNode(),
                      RecoveryTarget.Actions.TRANSLOG_OPS,
                      translogOperationsRequest,
                      recoveryOptions,
                      EmptyTransportResponseHandler.INSTANCE_SAME)
                  .txGet();
            }
          });
    }
    return totalOperations;
  }
  @Override
  public User authenticate(final AuthCredentials authCreds) throws AuthException {

    LdapConnection ldapConnection = null;
    final String user = authCreds.getUsername();

    final char[] password = authCreds.getPassword();
    authCreds.clear();

    EntryCursor result = null;

    try {

      ldapConnection = LDAPAuthorizator.getConnection(settings);

      final String bindDn = settings.get(ConfigConstants.ARMOR_AUTHENTICATION_LDAP_BIND_DN, null);

      if (bindDn != null) {
        ldapConnection.bind(
            bindDn, settings.get(ConfigConstants.ARMOR_AUTHENTICATION_LDAP_PASSWORD, null));
      } else {
        ldapConnection.anonymousBind();
      }

      result =
          ldapConnection.search(
              settings.get(ConfigConstants.ARMOR_AUTHENTICATION_LDAP_USERBASE, ""),
              settings
                  .get(ConfigConstants.ARMOR_AUTHENTICATION_LDAP_USERSEARCH, "(sAMAccountName={0})")
                  .replace("{0}", user),
              SearchScope.SUBTREE);

      if (!result.next()) {
        throw new AuthException("No user " + user + " found");
      }

      final Entry entry = result.get();
      final String dn = entry.getDn().toString();

      if (result.next()) {
        throw new AuthException("More than one user found");
      }

      log.trace("Disconnect {}", bindDn == null ? "anonymous" : bindDn);

      SecurityUtil.unbindAndCloseSilently(ldapConnection);
      ldapConnection = LDAPAuthorizator.getConnection(settings);

      log.trace("Try to authenticate dn {}", dn);

      ldapConnection.bind(dn, new String(password));

      final String usernameAttribute =
          settings.get(ConfigConstants.ARMOR_AUTHENTICATION_LDAP_USERNAME_ATTRIBUTE, null);
      String username = dn;

      if (usernameAttribute != null && entry.get(usernameAttribute) != null) {
        username = entry.get(usernameAttribute).getString();
      }

      log.debug("Authenticated username {}", username);

      return new LdapUser(username, entry);

    } catch (final Exception e) {
      log.error(e.toString(), e);
      throw new AuthException(e);
    } finally {
      if (result != null) {
        result.close();
      }

      SecurityUtil.unbindAndCloseSilently(ldapConnection);
    }
  }
Esempio n. 26
0
  public static LdapConnection getConnection(final Settings settings)
      throws KeyStoreException, NoSuchAlgorithmException, CertificateException,
          FileNotFoundException, IOException, LdapException {
    final boolean useSSL =
        settings.getAsBoolean(ConfigConstants.ARMOR_AUTHENTICATION_LDAP_LDAPS_SSL_ENABLED, false);
    final boolean useStartSSL =
        settings.getAsBoolean(
            ConfigConstants.ARMOR_AUTHENTICATION_LDAP_LDAPS_STARTTLS_ENABLED, false);
    final LdapConnectionConfig config = new LdapConnectionConfig();

    if (useSSL || useStartSSL) {
      // ## Truststore ##
      final KeyStore ts =
          KeyStore.getInstance(
              settings.get(ConfigConstants.ARMOR_AUTHENTICATION_LDAP_LDAPS_TRUSTSTORE_TYPE, "JKS"));
      ts.load(
          new FileInputStream(
              new File(
                  settings.get(
                      ConfigConstants.ARMOR_AUTHENTICATION_LDAP_LDAPS_TRUSTSTORE_FILEPATH, null))),
          settings
              .get(ConfigConstants.ARMOR_AUTHENTICATION_LDAP_LDAPS_TRUSTSTORE_PASSWORD, "changeit")
              .toCharArray());

      final TrustManagerFactory tmf =
          TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
      tmf.init(ts);

      config.setSslProtocol("TLS");
      config.setEnabledCipherSuites(SecurityUtil.ENABLED_SSL_CIPHERS);
      config.setTrustManagers(tmf.getTrustManagers());
    }

    config.setUseSsl(useSSL);
    config.setUseTls(useStartSSL);
    config.setTimeout(5000L); // 5 sec

    final String[] ldapHosts =
        settings.getAsArray(
            ConfigConstants.ARMOR_AUTHENTICATION_LDAP_HOST, new String[] {"localhost"});

    LdapConnection ldapConnection = null;

    for (int i = 0; i < ldapHosts.length; i++) {
      log.trace("Connect to {}", ldapHosts[i]);

      try {

        final String[] split = ldapHosts[i].split(":");

        config.setLdapHost(split[0]);

        if (split.length > 1) {
          config.setLdapPort(Integer.parseInt(split[1]));
        } else {
          config.setLdapPort(useSSL ? 636 : 389);
        }

        ldapConnection = new LdapNetworkConnection(config);
        ldapConnection.connect();
        if (!ldapConnection.isConnected()) {
          continue;
        } else {
          break;
        }

      } catch (final Exception e) {
        continue;
      }
    }

    if (ldapConnection == null || !ldapConnection.isConnected()) {
      throw new LdapException(
          "Unable to connect to any of those ldap servers " + Arrays.toString(ldapHosts));
    }

    return ldapConnection;
  }
Esempio n. 27
0
  protected Set<Entry> resolveNestedRoles(
      final Tuple<String, Dn> role, final LdapConnection ldapConnection, final String roleName)
      throws AuthException, LdapException {

    EntryCursor rolesResult = null;
    EntryCursor _result = null;
    try {

      final Set<Entry> result = new HashSet<Entry>();
      Dn roleDn = null;
      final boolean isRoleStringValidDn = Dn.isValid(role.v1());

      if (role.v2() != null) {
        roleDn = role.v2();
      } else {
        // lookup role
        if (isRoleStringValidDn) {
          roleDn = ldapConnection.lookup(role.v1()).getDn();
        } else {

          try {

            // search
            _result =
                ldapConnection.search(
                    settings.get(
                        ConfigConstants.ARMOR_AUTHENTICATION_AUTHORIZATION_LDAP_ROLEBASE, ""),
                    settings
                        .get(
                            ConfigConstants.ARMOR_AUTHENTICATION_AUTHORIZATION_LDAP_ROLESEARCH,
                            "(member={0})")
                        .replace("{1}", role.v1()),
                    SearchScope.SUBTREE);

            // one
            if (!_result.next()) {
              log.warn("Cannot resolve role '{}' (NOT FOUND)", role.v1());
            } else {

              //
              final Entry entry = _result.get();
              roleDn = entry.getDn();

              if (_result.next()) {
                log.warn("Cannot resolve role '{}' (MORE THAN ONE FOUND)", role.v1());
              }
            }
          } catch (final CursorException e) {
            log.warn("Cannot resolve role '{}' (EXCEPTION: {})", e, role.v1(), e.toString());
          } finally {
            if (_result != null) {
              _result.close();
            }
          }
        }
      }

      log.trace("role dn resolved to {}", roleDn);

      rolesResult =
          ldapConnection.search(
              settings.get(ConfigConstants.ARMOR_AUTHENTICATION_AUTHORIZATION_LDAP_ROLEBASE, ""),
              settings
                  .get(
                      ConfigConstants.ARMOR_AUTHENTICATION_AUTHORIZATION_LDAP_ROLESEARCH,
                      "(member={0})")
                  .replace("{0}", roleDn == null ? role.v1() : roleDn.toString())
                  .replace("{1}", role.v1()),
              SearchScope.SUBTREE);

      for (final Iterator iterator = rolesResult.iterator(); iterator.hasNext(); ) {
        final Entry searchResultEntry = (Entry) iterator.next();
        final String _role = searchResultEntry.get(roleName).getString();
        log.trace("nested l1 {}", searchResultEntry.getDn());
        final Set<Entry> in =
            resolveNestedRoles(
                new Tuple<String, Dn>(_role, searchResultEntry.getDn()), ldapConnection, roleName);

        for (final Iterator<Entry> iterator2 = in.iterator(); iterator2.hasNext(); ) {
          final Entry entry = iterator2.next();
          result.add(entry);
          log.trace("nested l2 {}", entry.getDn());
        }

        result.add(searchResultEntry);
      }

      return result;
    } finally {

      if (rolesResult != null) {
        rolesResult.close();
      }
    }
  }
  /**
   * Does an initial sync the same way MongoDB does. https://groups.google.com/
   * forum/?fromgroups=#!topic/mongodb-user/sOKlhD_E2ns
   *
   * @return the last oplog timestamp before the import began
   * @throws InterruptedException if the blocking queue stream is interrupted while waiting
   */
  protected Timestamp<?> doInitialImport(DBCollection collection) throws InterruptedException {
    // TODO: ensure the index type is empty
    // DBCollection slurpedCollection =
    // slurpedDb.getCollection(definition.getMongoCollection());

    logger.info("MongoDBRiver is beginning initial import of " + collection.getFullName());
    Timestamp<?> startTimestamp = getCurrentOplogTimestamp();
    boolean inProgress = true;
    String lastId = null;
    while (inProgress) {
      DBCursor cursor = null;
      try {
        if (definition.isDisableIndexRefresh()) {
          updateIndexRefresh(definition.getIndexName(), -1L);
        }
        if (!definition.isMongoGridFS()) {
          logger.info("Collection {} - count: {}", collection.getName(), collection.count());
          long count = 0;
          cursor =
              collection.find(
                  getFilterForInitialImport(definition.getMongoCollectionFilter(), lastId));
          while (cursor.hasNext()) {
            DBObject object = cursor.next();
            count++;
            if (cursor.hasNext()) {
              lastId = addInsertToStream(null, applyFieldFilter(object), collection.getName());
            } else {
              logger.debug("Last entry for initial import - add timestamp: {}", startTimestamp);
              lastId =
                  addInsertToStream(startTimestamp, applyFieldFilter(object), collection.getName());
            }
          }
          inProgress = false;
          logger.info("Number documents indexed: {}", count);
        } else {
          // TODO: To be optimized.
          // https://github.com/mongodb/mongo-java-driver/pull/48#issuecomment-25241988
          // possible option: Get the object id list from .fs
          // collection
          // then call GriDFS.findOne
          GridFS grid =
              new GridFS(mongo.getDB(definition.getMongoDb()), definition.getMongoCollection());

          cursor = grid.getFileList();
          while (cursor.hasNext()) {
            DBObject object = cursor.next();
            if (object instanceof GridFSDBFile) {
              GridFSDBFile file =
                  grid.findOne(new ObjectId(object.get(MongoDBRiver.MONGODB_ID_FIELD).toString()));
              if (cursor.hasNext()) {
                lastId = addInsertToStream(null, file);
              } else {
                logger.debug("Last entry for initial import - add timestamp: {}", startTimestamp);
                lastId = addInsertToStream(startTimestamp, file);
              }
            }
          }
          inProgress = false;
        }
      } catch (MongoException.CursorNotFound e) {
        logger.info(
            "Initial import - Cursor {} has been closed. About to open a new cusor.",
            cursor.getCursorId());
        logger.debug("Total document inserted [{}]", totalDocuments.get());
      } finally {
        if (cursor != null) {
          logger.trace("Closing initial import cursor");
          cursor.close();
        }
        if (definition.isDisableIndexRefresh()) {
          updateIndexRefresh(definition.getIndexName(), TimeValue.timeValueSeconds(1));
        }
      }
    }
    return startTimestamp;
  }
 @Override
 public void testFinished(Description description) throws Exception {
   logger.trace("Test {} finished", description.getDisplayName());
 }
  /**
   * Perform phase1 of the recovery operations. Once this {@link SnapshotIndexCommit} snapshot has
   * been performed no commit operations (files being fsync'd) are effectively allowed on this index
   * until all recovery phases are done
   *
   * <p>Phase1 examines the segment files on the target node and copies over the segments that are
   * missing. Only segments that have the same size and checksum can be reused
   *
   * <p>{@code InternalEngine#recover} is responsible for snapshotting the index and releasing the
   * snapshot once all 3 phases of recovery are complete
   */
  @Override
  public void phase1(final SnapshotIndexCommit snapshot) throws ElasticsearchException {
    cancellableThreads.checkForCancel();
    // Total size of segment files that are recovered
    long totalSize = 0;
    // Total size of segment files that were able to be re-used
    long existingTotalSize = 0;
    final Store store = shard.store();
    store.incRef();
    try {
      StopWatch stopWatch = new StopWatch().start();
      final Store.MetadataSnapshot recoverySourceMetadata = store.getMetadata(snapshot);
      for (String name : snapshot.getFiles()) {
        final StoreFileMetaData md = recoverySourceMetadata.get(name);
        if (md == null) {
          logger.info(
              "Snapshot differs from actual index for file: {} meta: {}",
              name,
              recoverySourceMetadata.asMap());
          throw new CorruptIndexException(
              "Snapshot differs from actual index - maybe index was removed metadata has "
                  + recoverySourceMetadata.asMap().size()
                  + " files");
        }
      }
      String recoverySourceSyncId = recoverySourceMetadata.getSyncId();
      String recoveryTargetSyncId = request.metadataSnapshot().getSyncId();
      final boolean recoverWithSyncId =
          recoverySourceSyncId != null && recoverySourceSyncId.equals(recoveryTargetSyncId);
      if (recoverWithSyncId) {
        final long numDocsTarget = request.metadataSnapshot().getNumDocs();
        final long numDocsSource = recoverySourceMetadata.getNumDocs();
        if (numDocsTarget != numDocsSource) {
          throw new IllegalStateException(
              "try to recover "
                  + request.shardId()
                  + " from primary shard with sync id but number of docs differ: "
                  + numDocsTarget
                  + " ("
                  + request.sourceNode().getName()
                  + ", primary) vs "
                  + numDocsSource
                  + "("
                  + request.targetNode().getName()
                  + ")");
        }
        // we shortcut recovery here because we have nothing to copy. but we must still start the
        // engine on the target.
        // so we don't return here
        logger.trace(
            "[{}][{}] skipping [phase1] to {} - identical sync id [{}] found on both source and target",
            indexName,
            shardId,
            request.targetNode(),
            recoverySourceSyncId);
      } else {

        // Generate a "diff" of all the identical, different, and missing
        // segment files on the target node, using the existing files on
        // the source node
        final Store.RecoveryDiff diff =
            recoverySourceMetadata.recoveryDiff(request.metadataSnapshot());
        for (StoreFileMetaData md : diff.identical) {
          response.phase1ExistingFileNames.add(md.name());
          response.phase1ExistingFileSizes.add(md.length());
          existingTotalSize += md.length();
          if (logger.isTraceEnabled()) {
            logger.trace(
                "[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]",
                indexName,
                shardId,
                request.targetNode(),
                md.name(),
                md.checksum(),
                md.length());
          }
          totalSize += md.length();
        }
        for (StoreFileMetaData md : Iterables.concat(diff.different, diff.missing)) {
          if (request.metadataSnapshot().asMap().containsKey(md.name())) {
            logger.trace(
                "[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]",
                indexName,
                shardId,
                request.targetNode(),
                md.name(),
                request.metadataSnapshot().get(md.name()),
                md);
          } else {
            logger.trace(
                "[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote",
                indexName,
                shardId,
                request.targetNode(),
                md.name());
          }
          response.phase1FileNames.add(md.name());
          response.phase1FileSizes.add(md.length());
          totalSize += md.length();
        }
        response.phase1TotalSize = totalSize;
        response.phase1ExistingTotalSize = existingTotalSize;

        logger.trace(
            "[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]",
            indexName,
            shardId,
            request.targetNode(),
            response.phase1FileNames.size(),
            new ByteSizeValue(totalSize),
            response.phase1ExistingFileNames.size(),
            new ByteSizeValue(existingTotalSize));
        cancellableThreads.execute(
            new Interruptable() {
              @Override
              public void run() throws InterruptedException {
                RecoveryFilesInfoRequest recoveryInfoFilesRequest =
                    new RecoveryFilesInfoRequest(
                        request.recoveryId(),
                        request.shardId(),
                        response.phase1FileNames,
                        response.phase1FileSizes,
                        response.phase1ExistingFileNames,
                        response.phase1ExistingFileSizes,
                        shard.translog().estimatedNumberOfOperations(),
                        response.phase1TotalSize,
                        response.phase1ExistingTotalSize);
                transportService
                    .submitRequest(
                        request.targetNode(),
                        RecoveryTarget.Actions.FILES_INFO,
                        recoveryInfoFilesRequest,
                        TransportRequestOptions.options()
                            .withTimeout(recoverySettings.internalActionTimeout()),
                        EmptyTransportResponseHandler.INSTANCE_SAME)
                    .txGet();
              }
            });

        // This latch will be used to wait until all files have been transferred to the target node
        final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
        final CopyOnWriteArrayList<Throwable> exceptions = new CopyOnWriteArrayList<>();
        final AtomicReference<Throwable> corruptedEngine = new AtomicReference<>();
        int fileIndex = 0;
        ThreadPoolExecutor pool;

        // How many bytes we've copied since we last called RateLimiter.pause
        final AtomicLong bytesSinceLastPause = new AtomicLong();

        for (final String name : response.phase1FileNames) {
          long fileSize = response.phase1FileSizes.get(fileIndex);

          // Files are split into two categories, files that are "small"
          // (under 5mb) and other files. Small files are transferred
          // using a separate thread pool dedicated to small files.
          //
          // The idea behind this is that while we are transferring an
          // older, large index, a user may create a new index, but that
          // index will not be able to recover until the large index
          // finishes, by using two different thread pools we can allow
          // tiny files (like segments for a brand new index) to be
          // recovered while ongoing large segment recoveries are
          // happening. It also allows these pools to be configured
          // separately.
          if (fileSize > RecoverySettings.SMALL_FILE_CUTOFF_BYTES) {
            pool = recoverySettings.concurrentStreamPool();
          } else {
            pool = recoverySettings.concurrentSmallFileStreamPool();
          }

          pool.execute(
              new AbstractRunnable() {
                @Override
                public void onFailure(Throwable t) {
                  // we either got rejected or the store can't be incremented / we are canceled
                  logger.debug("Failed to transfer file [" + name + "] on recovery");
                }

                public void onAfter() {
                  // Signify this file has completed by decrementing the latch
                  latch.countDown();
                }

                @Override
                protected void doRun() {
                  cancellableThreads.checkForCancel();
                  store.incRef();
                  final StoreFileMetaData md = recoverySourceMetadata.get(name);
                  try (final IndexInput indexInput =
                      store.directory().openInput(name, IOContext.READONCE)) {
                    final int BUFFER_SIZE = (int) recoverySettings.fileChunkSize().bytes();
                    final byte[] buf = new byte[BUFFER_SIZE];
                    boolean shouldCompressRequest = recoverySettings.compress();
                    if (CompressorFactory.isCompressed(indexInput)) {
                      shouldCompressRequest = false;
                    }

                    final long len = indexInput.length();
                    long readCount = 0;
                    final TransportRequestOptions requestOptions =
                        TransportRequestOptions.options()
                            .withCompress(shouldCompressRequest)
                            .withType(TransportRequestOptions.Type.RECOVERY)
                            .withTimeout(recoverySettings.internalActionTimeout());

                    while (readCount < len) {
                      if (shard.state()
                          == IndexShardState.CLOSED) { // check if the shard got closed on us
                        throw new IndexShardClosedException(shard.shardId());
                      }
                      int toRead =
                          readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE;
                      final long position = indexInput.getFilePointer();

                      // Pause using the rate limiter, if desired, to throttle the recovery
                      RateLimiter rl = recoverySettings.rateLimiter();
                      long throttleTimeInNanos = 0;
                      if (rl != null) {
                        long bytes = bytesSinceLastPause.addAndGet(toRead);
                        if (bytes > rl.getMinPauseCheckBytes()) {
                          // Time to pause
                          bytesSinceLastPause.addAndGet(-bytes);
                          throttleTimeInNanos = rl.pause(bytes);
                          shard.recoveryStats().addThrottleTime(throttleTimeInNanos);
                        }
                      }
                      indexInput.readBytes(buf, 0, toRead, false);
                      final BytesArray content = new BytesArray(buf, 0, toRead);
                      readCount += toRead;
                      final boolean lastChunk = readCount == len;
                      final RecoveryFileChunkRequest fileChunkRequest =
                          new RecoveryFileChunkRequest(
                              request.recoveryId(),
                              request.shardId(),
                              md,
                              position,
                              content,
                              lastChunk,
                              shard.translog().estimatedNumberOfOperations(),
                              throttleTimeInNanos);
                      cancellableThreads.execute(
                          new Interruptable() {
                            @Override
                            public void run() throws InterruptedException {
                              // Actually send the file chunk to the target node, waiting for it to
                              // complete
                              transportService
                                  .submitRequest(
                                      request.targetNode(),
                                      RecoveryTarget.Actions.FILE_CHUNK,
                                      fileChunkRequest,
                                      requestOptions,
                                      EmptyTransportResponseHandler.INSTANCE_SAME)
                                  .txGet();
                            }
                          });
                    }
                  } catch (Throwable e) {
                    final Throwable corruptIndexException;
                    if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) {
                      if (store.checkIntegrity(md)
                          == false) { // we are corrupted on the primary -- fail!
                        logger.warn(
                            "{} Corrupted file detected {} checksum mismatch", shard.shardId(), md);
                        if (corruptedEngine.compareAndSet(null, corruptIndexException) == false) {
                          // if we are not the first exception, add ourselves as suppressed to the
                          // main one:
                          corruptedEngine.get().addSuppressed(e);
                        }
                      } else { // corruption has happened on the way to replica
                        RemoteTransportException exception =
                            new RemoteTransportException(
                                "File corruption occurred on recovery but checksums are ok", null);
                        exception.addSuppressed(e);
                        exceptions.add(0, exception); // last exception first
                        logger.warn(
                            "{} Remote file corruption on node {}, recovering {}. local checksum OK",
                            corruptIndexException,
                            shard.shardId(),
                            request.targetNode(),
                            md);
                      }
                    } else {
                      exceptions.add(0, e); // last exceptions first
                    }
                  } finally {
                    store.decRef();
                  }
                }
              });
          fileIndex++;
        }

        cancellableThreads.execute(
            new Interruptable() {
              @Override
              public void run() throws InterruptedException {
                // Wait for all files that need to be transferred to finish transferring
                latch.await();
              }
            });

        if (corruptedEngine.get() != null) {
          throw corruptedEngine.get();
        } else {
          ExceptionsHelper.rethrowAndSuppress(exceptions);
        }

        cancellableThreads.execute(
            new Interruptable() {
              @Override
              public void run() throws InterruptedException {
                // Send the CLEAN_FILES request, which takes all of the files that
                // were transferred and renames them from their temporary file
                // names to the actual file names. It also writes checksums for
                // the files after they have been renamed.
                //
                // Once the files have been renamed, any other files that are not
                // related to this recovery (out of date segments, for example)
                // are deleted
                try {
                  transportService
                      .submitRequest(
                          request.targetNode(),
                          RecoveryTarget.Actions.CLEAN_FILES,
                          new RecoveryCleanFilesRequest(
                              request.recoveryId(),
                              shard.shardId(),
                              recoverySourceMetadata,
                              shard.translog().estimatedNumberOfOperations()),
                          TransportRequestOptions.options()
                              .withTimeout(recoverySettings.internalActionTimeout()),
                          EmptyTransportResponseHandler.INSTANCE_SAME)
                      .txGet();
                } catch (RemoteTransportException remoteException) {
                  final IOException corruptIndexException;
                  // we realized that after the index was copied and we wanted to finalize the
                  // recovery
                  // the index was corrupted:
                  //   - maybe due to a broken segments file on an empty index (transferred with no
                  // checksum)
                  //   - maybe due to old segments without checksums or length only checks
                  if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(remoteException))
                      != null) {
                    try {
                      final Store.MetadataSnapshot recoverySourceMetadata =
                          store.getMetadata(snapshot);
                      StoreFileMetaData[] metadata =
                          Iterables.toArray(recoverySourceMetadata, StoreFileMetaData.class);
                      ArrayUtil.timSort(
                          metadata,
                          new Comparator<StoreFileMetaData>() {
                            @Override
                            public int compare(StoreFileMetaData o1, StoreFileMetaData o2) {
                              return Long.compare(
                                  o1.length(), o2.length()); // check small files first
                            }
                          });
                      for (StoreFileMetaData md : metadata) {
                        logger.debug(
                            "{} checking integrity for file {} after remove corruption exception",
                            shard.shardId(),
                            md);
                        if (store.checkIntegrity(md)
                            == false) { // we are corrupted on the primary -- fail!
                          logger.warn(
                              "{} Corrupted file detected {} checksum mismatch",
                              shard.shardId(),
                              md);
                          throw corruptIndexException;
                        }
                      }
                    } catch (IOException ex) {
                      remoteException.addSuppressed(ex);
                      throw remoteException;
                    }
                    // corruption has happened on the way to replica
                    RemoteTransportException exception =
                        new RemoteTransportException(
                            "File corruption occurred on recovery but checksums are ok", null);
                    exception.addSuppressed(remoteException);
                    logger.warn(
                        "{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK",
                        corruptIndexException,
                        shard.shardId(),
                        request.targetNode());
                    throw exception;
                  } else {
                    throw remoteException;
                  }
                }
              }
            });
      }
      stopWatch.stop();
      logger.trace(
          "[{}][{}] recovery [phase1] to {}: took [{}]",
          indexName,
          shardId,
          request.targetNode(),
          stopWatch.totalTime());
      response.phase1Time = stopWatch.totalTime().millis();
    } catch (Throwable e) {
      throw new RecoverFilesRecoveryException(
          request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
    } finally {
      store.decRef();
    }
  }