public void testDisabledUpdateIndexedScriptsOnly() {
   assertAcked(
       client()
           .admin()
           .cluster()
           .preparePutStoredScript()
           .setScriptLang(GroovyScriptEngineService.NAME)
           .setId("script1")
           .setSource(new BytesArray("{\"script\":\"2\"}")));
   client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}").get();
   try {
     client()
         .prepareUpdate("test", "scriptTest", "1")
         .setScript(
             new Script(
                 "script1", ScriptService.ScriptType.STORED, GroovyScriptEngineService.NAME, null))
         .get();
     fail("update script should have been rejected");
   } catch (Exception e) {
     assertThat(e.getMessage(), containsString("failed to execute script"));
     assertThat(
         ExceptionsHelper.detailedMessage(e),
         containsString(
             "scripts of type [stored], operation [update] and lang [groovy] are disabled"));
   }
 }
Esempio n. 2
0
 /**
  * Helper for unzipping downloaded zips
  *
  * @param environment
  * @throws IOException
  */
 private void unzip(Environment environment, ZipFile zipFile, File targetFile, String targetPath)
     throws IOException {
   String baseDirSuffix = null;
   try {
     Enumeration<? extends ZipEntry> zipEntries = zipFile.entries();
     if (!zipEntries.hasMoreElements()) {
       logger.error("the zip archive has no entries");
     }
     ZipEntry firstEntry = zipEntries.nextElement();
     if (firstEntry.isDirectory()) {
       baseDirSuffix = firstEntry.getName();
     } else {
       zipEntries = zipFile.entries();
     }
     while (zipEntries.hasMoreElements()) {
       ZipEntry zipEntry = zipEntries.nextElement();
       if (zipEntry.isDirectory()) {
         continue;
       }
       String zipEntryName = zipEntry.getName();
       zipEntryName = zipEntryName.replace('\\', '/');
       if (baseDirSuffix != null && zipEntryName.startsWith(baseDirSuffix)) {
         zipEntryName = zipEntryName.substring(baseDirSuffix.length());
       }
       File target = new File(targetFile, zipEntryName);
       FileSystemUtils.mkdirs(target.getParentFile());
       Streams.copy(zipFile.getInputStream(zipEntry), new FileOutputStream(target));
     }
   } catch (IOException e) {
     logger.error(
         "failed to extract zip ["
             + zipFile.getName()
             + "]: "
             + ExceptionsHelper.detailedMessage(e));
     return;
   } finally {
     try {
       zipFile.close();
     } catch (IOException e) {
       // ignore
     }
   }
   File binFile = new File(targetFile, "bin");
   if (binFile.exists() && binFile.isDirectory()) {
     File toLocation = new File(new File(environment.homeFile(), "bin"), targetPath);
     logger.info("found bin, moving to " + toLocation.getAbsolutePath());
     FileSystemUtils.deleteRecursively(toLocation);
     binFile.renameTo(toLocation);
   }
   if (!new File(targetFile, "_site").exists()) {
     if (!FileSystemUtils.hasExtensions(targetFile, ".class", ".jar")) {
       logger.info("identified as a _site plugin, moving to _site structure ...");
       File site = new File(targetFile, "_site");
       File tmpLocation = new File(environment.pluginsFile(), targetPath + ".tmp");
       targetFile.renameTo(tmpLocation);
       FileSystemUtils.mkdirs(targetFile);
       tmpLocation.renameTo(site);
     }
   }
 }
  public void testUnsupportedFeatures() throws IOException {
    XContentBuilder mapping =
        XContentBuilder.builder(JsonXContent.jsonXContent)
            .startObject()
            .startObject("type")
            .startObject(FieldNamesFieldMapper.NAME)
            // by setting randomly index to no we also test the pre-1.3 behavior
            .field("index", randomFrom("no", "not_analyzed"))
            .field("store", randomFrom("no", "yes"))
            .endObject()
            .endObject()
            .endObject();

    try {
      assertAcked(
          prepareCreate("test")
              .setSettings(
                  Settings.builder()
                      .put(
                          "index.routing.allocation.exclude._name",
                          backwardsCluster().newNodePattern())
                      .put(indexSettings()))
              .addMapping("type", mapping));
    } catch (MapperParsingException ex) {
      assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class));
      assertThat(
          ExceptionsHelper.detailedMessage(ex)
              .contains(
                  "type=_field_names is not supported on indices created before version 1.3.0"),
          equalTo(true));
    }
  }
  public void verify(
      String repository, String verificationToken, final ActionListener<VerifyResponse> listener) {
    final DiscoveryNodes discoNodes = clusterService.state().nodes();
    final DiscoveryNode localNode = discoNodes.localNode();

    final ObjectContainer<DiscoveryNode> masterAndDataNodes =
        discoNodes.masterAndDataNodes().values();
    final List<DiscoveryNode> nodes = newArrayList();
    for (ObjectCursor<DiscoveryNode> cursor : masterAndDataNodes) {
      DiscoveryNode node = cursor.value;
      Version version = node.getVersion();
      // Verification wasn't supported before v1.4.0 - no reason to send verification request to
      // these nodes
      if (version != null && version.onOrAfter(Version.V_1_4_0)) {
        nodes.add(node);
      }
    }
    final CopyOnWriteArrayList<VerificationFailure> errors = new CopyOnWriteArrayList<>();
    final AtomicInteger counter = new AtomicInteger(nodes.size());
    for (final DiscoveryNode node : nodes) {
      if (node.equals(localNode)) {
        try {
          doVerify(repository, verificationToken);
        } catch (Throwable t) {
          logger.warn("[{}] failed to verify repository", t, repository);
          errors.add(new VerificationFailure(node.id(), ExceptionsHelper.detailedMessage(t)));
        }
        if (counter.decrementAndGet() == 0) {
          finishVerification(listener, nodes, errors);
        }
      } else {
        transportService.sendRequest(
            node,
            ACTION_NAME,
            new VerifyNodeRepositoryRequest(repository, verificationToken),
            new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
              @Override
              public void handleResponse(TransportResponse.Empty response) {
                if (counter.decrementAndGet() == 0) {
                  finishVerification(listener, nodes, errors);
                }
              }

              @Override
              public void handleException(TransportException exp) {
                errors.add(
                    new VerificationFailure(node.id(), ExceptionsHelper.detailedMessage(exp)));
                if (counter.decrementAndGet() == 0) {
                  finishVerification(listener, nodes, errors);
                }
              }
            });
      }
    }
  }
  @Override
  protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, int shardId)
      throws ElasticSearchException {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(shardId);

    if (request.refresh() && !request.realtime()) {
      indexShard.refresh(
          new Engine.Refresh("refresh_flag_mget").force(TransportGetAction.REFRESH_FORCE));
    }

    MultiGetShardResponse response = new MultiGetShardResponse();
    for (int i = 0; i < request.locations.size(); i++) {
      String type = request.types.get(i);
      String id = request.ids.get(i);
      String[] fields = request.fields.get(i);

      long version = request.versions.get(i);
      VersionType versionType = request.versionTypes.get(i);
      if (versionType == null) {
        versionType = VersionType.INTERNAL;
      }

      FetchSourceContext fetchSourceContext = request.fetchSourceContexts.get(i);
      try {
        GetResult getResult =
            indexShard
                .getService()
                .get(
                    type, id, fields, request.realtime(), version, versionType, fetchSourceContext);
        response.add(request.locations.get(i), new GetResponse(getResult));
      } catch (Throwable t) {
        if (TransportActions.isShardNotAvailableException(t)) {
          throw (ElasticSearchException) t;
        } else {
          logger.debug(
              "[{}][{}] failed to execute multi_get for [{}]/[{}]",
              t,
              request.index(),
              shardId,
              type,
              id);
          response.add(
              request.locations.get(i),
              new MultiGetResponse.Failure(
                  request.index(), type, id, ExceptionsHelper.detailedMessage(t)));
        }
      }
    }

    return response;
  }
  /**
   * @param type the ec2 hostname type to discover.
   * @return the appropriate host resolved from ec2 meta-data.
   * @throws IOException if ec2 meta-data cannot be obtained.
   * @see CustomNameResolver#resolveIfPossible(String)
   */
  public InetAddress resolve(Ec2HostnameType type, boolean warnOnFailure) {
    URLConnection urlConnection = null;
    InputStream in = null;
    try {
      URL url = new URL(AwsEc2Service.EC2_METADATA_URL + type.ec2Name);
      logger.debug("obtaining ec2 hostname from ec2 meta-data url {}", url);
      urlConnection = url.openConnection();
      urlConnection.setConnectTimeout(2000);
      in = urlConnection.getInputStream();
      BufferedReader urlReader = new BufferedReader(new InputStreamReader(in));

      String metadataResult = urlReader.readLine();
      if (metadataResult == null || metadataResult.length() == 0) {
        logger.error("no ec2 metadata returned from {}", url);
        return null;
      }
      return InetAddress.getByName(metadataResult);
    } catch (IOException e) {
      if (warnOnFailure) {
        logger.warn(
            "failed to get metadata for ["
                + type.configName
                + "]: "
                + ExceptionsHelper.detailedMessage(e));
      } else {
        logger.debug(
            "failed to get metadata for ["
                + type.configName
                + "]: "
                + ExceptionsHelper.detailedMessage(e));
      }
      return null;
    } finally {
      IOUtils.closeWhileHandlingException(in);
    }
  }
  @Override
  protected ShardResponse processRequestItems(
      ShardId shardId, ShardUpsertRequest request, AtomicBoolean killed) {
    ShardResponse shardResponse = new ShardResponse();
    DocTableInfo tableInfo = schemas.getWritableTable(TableIdent.fromIndexName(request.index()));
    for (int i = 0; i < request.itemIndices().size(); i++) {
      int location = request.itemIndices().get(i);
      ShardUpsertRequest.Item item = request.items().get(i);
      if (killed.get()) {
        throw new CancellationException();
      }
      try {
        indexItem(
            tableInfo,
            request,
            item,
            shardId,
            item.insertValues() != null, // try insert first
            0);
        shardResponse.add(location);
      } catch (Throwable t) {
        if (!TransportActions.isShardNotAvailableException(t) && !request.continueOnError()) {
          throw t;
        } else {
          logger.debug(
              "{} failed to execute upsert for [{}]/[{}]",
              t,
              request.shardId(),
              request.type(),
              item.id());
          shardResponse.add(
              location,
              new ShardResponse.Failure(
                  item.id(),
                  ExceptionsHelper.detailedMessage(t),
                  (t instanceof VersionConflictEngineException)));
        }
      }
    }

    return shardResponse;
  }
  @Override
  protected MultiTermVectorsShardResponse shardOperation(
      MultiTermVectorsShardRequest request, int shardId) throws ElasticsearchException {

    MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
    for (int i = 0; i < request.locations.size(); i++) {
      TermVectorRequest termVectorRequest = request.requests.get(i);

      try {
        IndexService indexService = indicesService.indexServiceSafe(request.index());
        IndexShard indexShard = indexService.shardSafe(shardId);
        TermVectorResponse termVectorResponse =
            indexShard.termVectorService().getTermVector(termVectorRequest);
        response.add(request.locations.get(i), termVectorResponse);
      } catch (Throwable t) {
        if (TransportActions.isShardNotAvailableException(t)) {
          throw (ElasticsearchException) t;
        } else {
          logger.debug(
              "[{}][{}] failed to execute multi term vectors for [{}]/[{}]",
              t,
              request.index(),
              shardId,
              termVectorRequest.type(),
              termVectorRequest.id());
          response.add(
              request.locations.get(i),
              new MultiTermVectorsResponse.Failure(
                  request.index(),
                  termVectorRequest.type(),
                  termVectorRequest.id(),
                  ExceptionsHelper.detailedMessage(t)));
        }
      }
    }

    return response;
  }
 @Override
 protected Response shardOperation(Request request, ShardId shardId) {
   // TODO: Look into combining the shard req's docs into one in memory index.
   Response response = new Response();
   response.items = new ArrayList<>(request.items.size());
   for (Request.Item item : request.items) {
     Response.Item responseItem;
     int slot = item.slot;
     try {
       responseItem = new Response.Item(slot, percolatorService.percolate(item.request));
     } catch (Throwable t) {
       if (TransportActions.isShardNotAvailableException(t)) {
         throw (ElasticsearchException) t;
       } else {
         logger.debug("{} failed to multi percolate", t, request.shardId());
         responseItem =
             new Response.Item(slot, new StringText(ExceptionsHelper.detailedMessage(t)));
       }
     }
     response.items.add(responseItem);
   }
   return response;
 }
 // test to make sure expressions are not allowed to be used as mapping scripts
 public void testInvalidMappingScript() throws Exception {
   try {
     createIndex("test_index");
     ensureGreen("test_index");
     XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
     builder.startObject("transform");
     builder.field("script", "1.0");
     builder.field("lang", ExpressionScriptEngineService.NAME);
     builder.endObject();
     builder.startObject("properties");
     builder.startObject("double_field");
     builder.field("type", "double");
     builder.endObject();
     builder.endObject();
     builder.endObject();
     client()
         .admin()
         .indices()
         .preparePutMapping("test_index")
         .setType("trans_test")
         .setSource(builder)
         .get();
     client().prepareIndex("test_index", "trans_test", "1").setSource("double_field", 0.0).get();
     fail("Expression scripts should not be allowed to run as mapping scripts.");
   } catch (Exception e) {
     String message = ExceptionsHelper.detailedMessage(e);
     assertThat(
         message + " should have contained failed to parse",
         message.contains("failed to parse"),
         equalTo(true));
     assertThat(
         message + " should have contained not supported",
         message.contains("not supported"),
         equalTo(true));
   }
 }
Esempio n. 11
0
  private void extract(PluginHandle pluginHandle, Terminal terminal, Path pluginFile)
      throws IOException {
    // unzip plugin to a staging temp dir, named for the plugin
    Path tmp = Files.createTempDirectory(environment.tmpFile(), null);
    Path root = tmp.resolve(pluginHandle.name);
    unzipPlugin(pluginFile, root);

    // find the actual root (in case its unzipped with extra directory wrapping)
    root = findPluginRoot(root);

    // read and validate the plugin descriptor
    PluginInfo info = PluginInfo.readFromProperties(root);
    terminal.println(VERBOSE, "%s", info);

    // update name in handle based on 'name' property found in descriptor file
    pluginHandle = new PluginHandle(info.getName(), pluginHandle.version, pluginHandle.user);
    final Path extractLocation = pluginHandle.extractedDir(environment);
    if (Files.exists(extractLocation)) {
      throw new IOException(
          "plugin directory "
              + extractLocation.toAbsolutePath()
              + " already exists. To update the plugin, uninstall it first using 'remove "
              + pluginHandle.name
              + "' command");
    }

    // check for jar hell before any copying
    if (info.isJvm()) {
      jarHellCheck(root, info.isIsolated());
    }

    // install plugin
    FileSystemUtils.copyDirectoryRecursively(root, extractLocation);
    terminal.println("Installed %s into %s", pluginHandle.name, extractLocation.toAbsolutePath());

    // cleanup
    tryToDeletePath(terminal, tmp, pluginFile);

    // take care of bin/ by moving and applying permissions if needed
    Path sourcePluginBinDirectory = extractLocation.resolve("bin");
    Path destPluginBinDirectory = pluginHandle.binDir(environment);
    boolean needToCopyBinDirectory = Files.exists(sourcePluginBinDirectory);
    if (needToCopyBinDirectory) {
      if (Files.exists(destPluginBinDirectory) && !Files.isDirectory(destPluginBinDirectory)) {
        tryToDeletePath(terminal, extractLocation);
        throw new IOException(
            "plugin bin directory " + destPluginBinDirectory + " is not a directory");
      }

      try {
        copyBinDirectory(
            sourcePluginBinDirectory, destPluginBinDirectory, pluginHandle.name, terminal);
      } catch (IOException e) {
        // rollback and remove potentially before installed leftovers
        terminal.printError(
            "Error copying bin directory [%s] to [%s], cleaning up, reason: %s",
            sourcePluginBinDirectory, destPluginBinDirectory, ExceptionsHelper.detailedMessage(e));
        tryToDeletePath(terminal, extractLocation, pluginHandle.binDir(environment));
        throw e;
      }
    }

    Path sourceConfigDirectory = extractLocation.resolve("config");
    Path destConfigDirectory = pluginHandle.configDir(environment);
    boolean needToCopyConfigDirectory = Files.exists(sourceConfigDirectory);
    if (needToCopyConfigDirectory) {
      if (Files.exists(destConfigDirectory) && !Files.isDirectory(destConfigDirectory)) {
        tryToDeletePath(terminal, extractLocation, destPluginBinDirectory);
        throw new IOException(
            "plugin config directory " + destConfigDirectory + " is not a directory");
      }

      try {
        terminal.println(
            VERBOSE, "Found config, moving to %s", destConfigDirectory.toAbsolutePath());
        moveFilesWithoutOverwriting(sourceConfigDirectory, destConfigDirectory, ".new");

        if (Environment.getFileStore(destConfigDirectory)
            .supportsFileAttributeView(PosixFileAttributeView.class)) {
          // We copy owner, group and permissions from the parent ES_CONFIG directory, assuming they
          // were properly set depending
          // on how es was installed in the first place: can be root:elasticsearch (750) if es was
          // installed from rpm/deb packages
          // or most likely elasticsearch:elasticsearch if installed from tar/zip. As for
          // permissions we don't rely on umask.
          final PosixFileAttributes parentDirAttributes =
              Files.getFileAttributeView(
                      destConfigDirectory.getParent(), PosixFileAttributeView.class)
                  .readAttributes();
          // for files though, we make sure not to copy execute permissions from the parent dir and
          // leave them untouched
          final Set<PosixFilePermission> baseFilePermissions = new HashSet<>();
          for (PosixFilePermission posixFilePermission : parentDirAttributes.permissions()) {
            switch (posixFilePermission) {
              case OWNER_EXECUTE:
              case GROUP_EXECUTE:
              case OTHERS_EXECUTE:
                break;
              default:
                baseFilePermissions.add(posixFilePermission);
            }
          }
          Files.walkFileTree(
              destConfigDirectory,
              new SimpleFileVisitor<Path>() {
                @Override
                public FileVisitResult visitFile(Path file, BasicFileAttributes attrs)
                    throws IOException {
                  if (attrs.isRegularFile()) {
                    Set<PosixFilePermission> newFilePermissions =
                        new HashSet<>(baseFilePermissions);
                    Set<PosixFilePermission> currentFilePermissions =
                        Files.getPosixFilePermissions(file);
                    for (PosixFilePermission posixFilePermission : currentFilePermissions) {
                      switch (posixFilePermission) {
                        case OWNER_EXECUTE:
                        case GROUP_EXECUTE:
                        case OTHERS_EXECUTE:
                          newFilePermissions.add(posixFilePermission);
                      }
                    }
                    setPosixFileAttributes(
                        file,
                        parentDirAttributes.owner(),
                        parentDirAttributes.group(),
                        newFilePermissions);
                  }
                  return FileVisitResult.CONTINUE;
                }

                @Override
                public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs)
                    throws IOException {
                  setPosixFileAttributes(
                      dir,
                      parentDirAttributes.owner(),
                      parentDirAttributes.group(),
                      parentDirAttributes.permissions());
                  return FileVisitResult.CONTINUE;
                }
              });
        } else {
          terminal.println(
              VERBOSE, "Skipping posix permissions - filestore doesn't support posix permission");
        }

        terminal.println(
            VERBOSE,
            "Installed %s into %s",
            pluginHandle.name,
            destConfigDirectory.toAbsolutePath());
      } catch (IOException e) {
        terminal.printError(
            "Error copying config directory [%s] to [%s], cleaning up, reason: %s",
            sourceConfigDirectory, destConfigDirectory, ExceptionsHelper.detailedMessage(e));
        tryToDeletePath(terminal, extractLocation, destPluginBinDirectory, destConfigDirectory);
        throw e;
      }
    }
  }
Esempio n. 12
0
  private Path download(PluginHandle pluginHandle, Terminal terminal) throws IOException {
    Path pluginFile = pluginHandle.newDistroFile(environment);

    HttpDownloadHelper downloadHelper = new HttpDownloadHelper();
    boolean downloaded = false;
    boolean verified = false;
    HttpDownloadHelper.DownloadProgress progress;
    if (outputMode == OutputMode.SILENT) {
      progress = new HttpDownloadHelper.NullProgress();
    } else {
      progress = new HttpDownloadHelper.VerboseProgress(terminal.writer());
    }

    // first, try directly from the URL provided
    if (url != null) {
      URL pluginUrl = url;
      boolean isSecureProcotol = "https".equalsIgnoreCase(pluginUrl.getProtocol());
      boolean isAuthInfoSet = !Strings.isNullOrEmpty(pluginUrl.getUserInfo());
      if (isAuthInfoSet && !isSecureProcotol) {
        throw new IOException("Basic auth is only supported for HTTPS!");
      }

      terminal.println("Trying %s ...", pluginUrl.toExternalForm());
      try {
        downloadHelper.download(pluginUrl, pluginFile, progress, this.timeout);
        downloaded = true;
        terminal.println("Verifying %s checksums if available ...", pluginUrl.toExternalForm());
        Tuple<URL, Path> sha1Info =
            pluginHandle.newChecksumUrlAndFile(environment, pluginUrl, "sha1");
        verified =
            downloadHelper.downloadAndVerifyChecksum(
                sha1Info.v1(),
                pluginFile,
                sha1Info.v2(),
                progress,
                this.timeout,
                HttpDownloadHelper.SHA1_CHECKSUM);
        Tuple<URL, Path> md5Info =
            pluginHandle.newChecksumUrlAndFile(environment, pluginUrl, "md5");
        verified =
            verified
                || downloadHelper.downloadAndVerifyChecksum(
                    md5Info.v1(),
                    pluginFile,
                    md5Info.v2(),
                    progress,
                    this.timeout,
                    HttpDownloadHelper.MD5_CHECKSUM);
      } catch (ElasticsearchTimeoutException | ElasticsearchCorruptionException e) {
        throw e;
      } catch (Exception e) {
        // ignore
        terminal.println("Failed: %s", ExceptionsHelper.detailedMessage(e));
      }
    } else {
      if (PluginHandle.isOfficialPlugin(
          pluginHandle.name, pluginHandle.user, pluginHandle.version)) {
        checkForOfficialPlugins(pluginHandle.name);
      }
    }

    if (!downloaded && url == null) {
      // We try all possible locations
      for (URL url : pluginHandle.urls()) {
        terminal.println("Trying %s ...", url.toExternalForm());
        try {
          downloadHelper.download(url, pluginFile, progress, this.timeout);
          downloaded = true;
          terminal.println("Verifying %s checksums if available ...", url.toExternalForm());
          Tuple<URL, Path> sha1Info = pluginHandle.newChecksumUrlAndFile(environment, url, "sha1");
          verified =
              downloadHelper.downloadAndVerifyChecksum(
                  sha1Info.v1(),
                  pluginFile,
                  sha1Info.v2(),
                  progress,
                  this.timeout,
                  HttpDownloadHelper.SHA1_CHECKSUM);
          Tuple<URL, Path> md5Info = pluginHandle.newChecksumUrlAndFile(environment, url, "md5");
          verified =
              verified
                  || downloadHelper.downloadAndVerifyChecksum(
                      md5Info.v1(),
                      pluginFile,
                      md5Info.v2(),
                      progress,
                      this.timeout,
                      HttpDownloadHelper.MD5_CHECKSUM);
          break;
        } catch (ElasticsearchTimeoutException | ElasticsearchCorruptionException e) {
          throw e;
        } catch (Exception e) {
          terminal.println(VERBOSE, "Failed: %s", ExceptionsHelper.detailedMessage(e));
        }
      }
    }

    if (!downloaded) {
      // try to cleanup what we downloaded
      IOUtils.deleteFilesIgnoringExceptions(pluginFile);
      throw new IOException(
          "failed to download out of all possible locations..., use --verbose to get detailed information");
    }

    if (verified == false) {
      terminal.println(
          "NOTE: Unable to verify checksum for downloaded plugin (unable to find .sha1 or .md5 file to verify)");
    }
    return pluginFile;
  }
 @Override
 public void onRecoveryFailure(
     RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
   fail(ExceptionsHelper.detailedMessage(e));
 }
 @Override
 protected PrimaryResponse<IngestShardResponse, IngestShardRequest> shardOperationOnPrimary(
     ClusterState clusterState, PrimaryOperationRequest shardRequest) {
   final IngestShardRequest request = shardRequest.request;
   IndexShard indexShard =
       indicesService
           .indexServiceSafe(shardRequest.request.index())
           .shardSafe(shardRequest.shardId);
   int successSize = 0;
   List<IngestItemFailure> failure = newLinkedList();
   int size = request.items().size();
   long[] versions = new long[size];
   Set<Tuple<String, String>> mappingsToUpdate = newHashSet();
   for (int i = 0; i < size; i++) {
     IngestItemRequest item = request.items().get(i);
     if (item.request() instanceof IndexRequest) {
       IndexRequest indexRequest = (IndexRequest) item.request();
       Engine.IndexingOperation op = null;
       try {
         // validate, if routing is required, that we got routing
         MappingMetaData mappingMd =
             clusterState.metaData().index(request.index()).mappingOrDefault(indexRequest.type());
         if (mappingMd != null && mappingMd.routing().required()) {
           if (indexRequest.routing() == null) {
             throw new RoutingMissingException(
                 indexRequest.index(), indexRequest.type(), indexRequest.id());
           }
         }
         SourceToParse sourceToParse =
             SourceToParse.source(SourceToParse.Origin.PRIMARY, indexRequest.source())
                 .type(indexRequest.type())
                 .id(indexRequest.id())
                 .routing(indexRequest.routing())
                 .parent(indexRequest.parent())
                 .timestamp(indexRequest.timestamp())
                 .ttl(indexRequest.ttl());
         long version;
         if (indexRequest.opType() == IndexRequest.OpType.INDEX) {
           Engine.Index index =
               indexShard
                   .prepareIndex(sourceToParse)
                   .version(indexRequest.version())
                   .versionType(indexRequest.versionType())
                   .origin(Engine.Operation.Origin.PRIMARY);
           op = index;
           indexShard.index(index);
           version = index.version();
         } else {
           Engine.Create create =
               indexShard
                   .prepareCreate(sourceToParse)
                   .version(indexRequest.version())
                   .versionType(indexRequest.versionType())
                   .origin(Engine.Operation.Origin.PRIMARY);
           op = create;
           indexShard.create(create);
           version = create.version();
         }
         versions[i] = indexRequest.version();
         // update the version on request so it will happen on the replicas
         indexRequest.version(version);
         successSize++;
       } catch (Throwable e) {
         // rethrow the failure if we are going to retry on primary and let parent failure to
         // handle it
         if (retryPrimaryException(e)) {
           // restore updated versions...
           for (int j = 0; j < i; j++) {
             applyVersion(request.items().get(j), versions[j]);
           }
           logger.error(e.getMessage(), e);
           throw new ElasticsearchException(e.getMessage());
         }
         if (e instanceof ElasticsearchException
             && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
           logger.error(
               "[{}][{}] failed to execute bulk item (index) {}",
               e,
               shardRequest.request.index(),
               shardRequest.shardId,
               indexRequest);
         } else {
           logger.error(
               "[{}][{}] failed to execute bulk item (index) {}",
               e,
               shardRequest.request.index(),
               shardRequest.shardId,
               indexRequest);
         }
         failure.add(new IngestItemFailure(item.id(), ExceptionsHelper.detailedMessage(e)));
         // nullify the request so it won't execute on the replicas
         request.items().set(i, null);
       } finally {
         // update mapping on master if needed, we won't update changes to the same type, since
         // once its changed, it won't have mappers added
         if (op != null && op.parsedDoc().mappingsModified()) {
           mappingsToUpdate.add(Tuple.tuple(indexRequest.index(), indexRequest.type()));
         }
       }
     } else if (item.request() instanceof DeleteRequest) {
       DeleteRequest deleteRequest = (DeleteRequest) item.request();
       try {
         Engine.Delete delete =
             indexShard
                 .prepareDelete(deleteRequest.type(), deleteRequest.id(), deleteRequest.version())
                 .versionType(deleteRequest.versionType())
                 .origin(Engine.Operation.Origin.PRIMARY);
         indexShard.delete(delete);
         // update the request with teh version so it will go to the replicas
         deleteRequest.version(delete.version());
         successSize++;
       } catch (Throwable e) {
         // rethrow the failure if we are going to retry on primary and let parent failure to
         // handle it
         if (retryPrimaryException(e)) {
           // restore updated versions...
           for (int j = 0; j < i; j++) {
             applyVersion(request.items().get(j), versions[j]);
           }
           logger.error(e.getMessage(), e);
           throw new ElasticsearchException(e.getMessage());
         }
         if (e instanceof ElasticsearchException
             && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
           logger.trace(
               "[{}][{}] failed to execute bulk item (delete) {}",
               e,
               shardRequest.request.index(),
               shardRequest.shardId,
               deleteRequest);
         } else {
           logger.debug(
               "[{}][{}] failed to execute bulk item (delete) {}",
               e,
               shardRequest.request.index(),
               shardRequest.shardId,
               deleteRequest);
         }
         failure.add(new IngestItemFailure(item.id(), ExceptionsHelper.detailedMessage(e)));
         // nullify the request so it won't execute on the replicas
         request.items().set(i, null);
       }
     }
   }
   if (!mappingsToUpdate.isEmpty()) {
     for (Tuple<String, String> mappingToUpdate : mappingsToUpdate) {
       logger.info("mapping update {} {}", mappingToUpdate.v1(), mappingToUpdate.v2());
       updateMappingOnMaster(mappingToUpdate.v1(), mappingToUpdate.v2());
     }
   }
   IngestShardResponse response =
       new IngestShardResponse(
           new ShardId(request.index(), request.shardId()), successSize, failure);
   return new PrimaryResponse<IngestShardResponse, IngestShardRequest>(
       shardRequest.request, response, null);
 }