Ejemplo n.º 1
0
 /**
  * Helper for unzipping downloaded zips
  *
  * @param environment
  * @throws IOException
  */
 private void unzip(Environment environment, ZipFile zipFile, File targetFile, String targetPath)
     throws IOException {
   String baseDirSuffix = null;
   try {
     Enumeration<? extends ZipEntry> zipEntries = zipFile.entries();
     if (!zipEntries.hasMoreElements()) {
       logger.error("the zip archive has no entries");
     }
     ZipEntry firstEntry = zipEntries.nextElement();
     if (firstEntry.isDirectory()) {
       baseDirSuffix = firstEntry.getName();
     } else {
       zipEntries = zipFile.entries();
     }
     while (zipEntries.hasMoreElements()) {
       ZipEntry zipEntry = zipEntries.nextElement();
       if (zipEntry.isDirectory()) {
         continue;
       }
       String zipEntryName = zipEntry.getName();
       zipEntryName = zipEntryName.replace('\\', '/');
       if (baseDirSuffix != null && zipEntryName.startsWith(baseDirSuffix)) {
         zipEntryName = zipEntryName.substring(baseDirSuffix.length());
       }
       File target = new File(targetFile, zipEntryName);
       FileSystemUtils.mkdirs(target.getParentFile());
       Streams.copy(zipFile.getInputStream(zipEntry), new FileOutputStream(target));
     }
   } catch (IOException e) {
     logger.error(
         "failed to extract zip ["
             + zipFile.getName()
             + "]: "
             + ExceptionsHelper.detailedMessage(e));
     return;
   } finally {
     try {
       zipFile.close();
     } catch (IOException e) {
       // ignore
     }
   }
   File binFile = new File(targetFile, "bin");
   if (binFile.exists() && binFile.isDirectory()) {
     File toLocation = new File(new File(environment.homeFile(), "bin"), targetPath);
     logger.info("found bin, moving to " + toLocation.getAbsolutePath());
     FileSystemUtils.deleteRecursively(toLocation);
     binFile.renameTo(toLocation);
   }
   if (!new File(targetFile, "_site").exists()) {
     if (!FileSystemUtils.hasExtensions(targetFile, ".class", ".jar")) {
       logger.info("identified as a _site plugin, moving to _site structure ...");
       File site = new File(targetFile, "_site");
       File tmpLocation = new File(environment.pluginsFile(), targetPath + ".tmp");
       targetFile.renameTo(tmpLocation);
       FileSystemUtils.mkdirs(targetFile);
       tmpLocation.renameTo(site);
     }
   }
 }
Ejemplo n.º 2
0
 @Override
 public void fullDelete() throws IOException {
   FileSystemUtils.deleteRecursively(fsDirectory().getFile());
   // if we are the last ones, delete also the actual index
   String[] list = fsDirectory().getFile().getParentFile().list();
   if (list == null || list.length == 0) {
     FileSystemUtils.deleteRecursively(fsDirectory().getFile().getParentFile());
   }
 }
Ejemplo n.º 3
0
  /**
   * Loads the hunspell dictionary for the given local.
   *
   * @param locale The locale of the hunspell dictionary to be loaded.
   * @param nodeSettings The node level settings
   * @param env The node environment (from which the conf path will be resolved)
   * @return The loaded Hunspell dictionary
   * @throws Exception when loading fails (due to IO errors or malformed dictionary files)
   */
  private Dictionary loadDictionary(String locale, Settings nodeSettings, Environment env)
      throws Exception {
    if (logger.isDebugEnabled()) {
      logger.debug("Loading hunspell dictionary [{}]...", locale);
    }
    Path dicDir = hunspellDir.resolve(locale);
    if (FileSystemUtils.isAccessibleDirectory(dicDir, logger) == false) {
      throw new ElasticsearchException(
          String.format(Locale.ROOT, "Could not find hunspell dictionary [%s]", locale));
    }

    // merging node settings with hunspell dictionary specific settings
    Settings dictSettings = HUNSPELL_DICTIONARY_OPTIONS.get(nodeSettings);
    nodeSettings = loadDictionarySettings(dicDir, dictSettings.getByPrefix(locale));

    boolean ignoreCase = nodeSettings.getAsBoolean("ignore_case", defaultIgnoreCase);

    Path[] affixFiles = FileSystemUtils.files(dicDir, "*.aff");
    if (affixFiles.length == 0) {
      throw new ElasticsearchException(
          String.format(Locale.ROOT, "Missing affix file for hunspell dictionary [%s]", locale));
    }
    if (affixFiles.length != 1) {
      throw new ElasticsearchException(
          String.format(
              Locale.ROOT, "Too many affix files exist for hunspell dictionary [%s]", locale));
    }
    InputStream affixStream = null;

    Path[] dicFiles = FileSystemUtils.files(dicDir, "*.dic");
    List<InputStream> dicStreams = new ArrayList<>(dicFiles.length);
    try {

      for (int i = 0; i < dicFiles.length; i++) {
        dicStreams.add(Files.newInputStream(dicFiles[i]));
      }

      affixStream = Files.newInputStream(affixFiles[0]);

      try (Directory tmp = new SimpleFSDirectory(env.tmpFile())) {
        return new Dictionary(tmp, "hunspell", affixStream, dicStreams, ignoreCase);
      }

    } catch (Exception e) {
      logger.error("Could not load hunspell dictionary [{}]", e, locale);
      throw e;
    } finally {
      IOUtils.close(affixStream);
      IOUtils.close(dicStreams);
    }
  }
Ejemplo n.º 4
0
 /**
  * Delete a plugin from the legacy location
  *
  * @param name the plugin name
  * @throws IOException
  */
 public void removePlugin(String name) throws IOException {
   File pluginToDelete = new File(environment.pluginsFile(), name);
   if (pluginToDelete.exists()) {
     FileSystemUtils.deleteRecursively(pluginToDelete, true);
   }
   pluginToDelete = new File(environment.pluginsFile(), name + ".zip");
   if (pluginToDelete.exists()) {
     pluginToDelete.delete();
   }
   File binLocation = new File(new File(environment.homeFile(), "bin"), name);
   if (binLocation.exists()) {
     FileSystemUtils.deleteRecursively(binLocation);
   }
 }
Ejemplo n.º 5
0
 private void copyPlugin(File p1) throws IOException {
   FileSystemUtils.mkdirs(p1);
   // copy plugin
   File dir = new File(p1, "org/elasticsearch/plugins/isolation/");
   FileSystemUtils.mkdirs(dir);
   copyFile(
       getClass().getResourceAsStream("isolation/DummyClass.class"),
       new File(dir, "DummyClass.class"));
   copyFile(
       getClass().getResourceAsStream("isolation/IsolatedPlugin.class"),
       new File(dir, "IsolatedPlugin.class"));
   copyFile(
       getClass().getResourceAsStream("isolation/es-plugin.properties"),
       new File(p1, "es-plugin.properties"));
 }
Ejemplo n.º 6
0
  /**
   * Fetches a list of words from the specified settings file. The list should either be available
   * at the key specified by settingsPrefix or in a file specified by settingsPrefix + _path.
   *
   * @throws IllegalArgumentException If the word list cannot be found at either key.
   */
  public static List<String> getWordList(Environment env, Settings settings, String settingPrefix) {
    String wordListPath = settings.get(settingPrefix + "_path", null);

    if (wordListPath == null) {
      String[] explicitWordList = settings.getAsArray(settingPrefix, null);
      if (explicitWordList == null) {
        return null;
      } else {
        return Arrays.asList(explicitWordList);
      }
    }

    final Path wordListFile = env.configFile().resolve(wordListPath);

    try (BufferedReader reader =
        FileSystemUtils.newBufferedReader(wordListFile.toUri().toURL(), StandardCharsets.UTF_8)) {
      return loadWordList(reader, "#");
    } catch (IOException ioe) {
      String message =
          String.format(
              Locale.ROOT,
              "IOException while reading %s_path: %s",
              settingPrefix,
              ioe.getMessage());
      throw new IllegalArgumentException(message);
    }
  }
Ejemplo n.º 7
0
  /** check a candidate plugin for jar hell before installing it */
  private void jarHellCheck(Path candidate, boolean isolated) throws IOException {
    // create list of current jars in classpath
    final List<URL> jars = new ArrayList<>(Arrays.asList(JarHell.parseClassPath()));

    // read existing bundles. this does some checks on the installation too.
    List<Bundle> bundles = PluginsService.getPluginBundles(environment.pluginsFile());

    // if we aren't isolated, we need to jarhellcheck against any other non-isolated plugins
    // thats always the first bundle
    if (isolated == false) {
      jars.addAll(bundles.get(0).urls);
    }

    // add plugin jars to the list
    Path pluginJars[] = FileSystemUtils.files(candidate, "*.jar");
    for (Path jar : pluginJars) {
      jars.add(jar.toUri().toURL());
    }

    // check combined (current classpath + new jars to-be-added)
    try {
      JarHell.checkJarHell(jars.toArray(new URL[jars.size()]));
    } catch (Exception ex) {
      throw new RuntimeException(ex);
    }
  }
  String loadIndex(String indexFile) throws Exception {
    Path unzipDir = createTempDir();
    Path unzipDataDir = unzipDir.resolve("data");
    String indexName =
        indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-");

    // decompress the index
    Path backwardsIndex = getDataPath(indexFile);
    try (InputStream stream = Files.newInputStream(backwardsIndex)) {
      TestUtil.unzip(stream, unzipDir);
    }

    // check it is unique
    assertTrue(Files.exists(unzipDataDir));
    Path[] list = FileSystemUtils.files(unzipDataDir);
    if (list.length != 1) {
      throw new IllegalStateException("Backwards index must contain exactly one cluster");
    }

    // the bwc scripts packs the indices under this path
    Path src = list[0].resolve("nodes/0/indices/" + indexName);
    assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src));

    if (randomBoolean()) {
      logger.info("--> injecting index [{}] into single data path", indexName);
      copyIndex(logger, src, indexName, singleDataPath);
    } else {
      logger.info("--> injecting index [{}] into multi data path", indexName);
      copyIndex(logger, src, indexName, multiDataPath);
    }
    return indexName;
  }
Ejemplo n.º 9
0
  private Path getNodeDir(String indexFile) throws IOException {
    Path unzipDir = createTempDir();
    Path unzipDataDir = unzipDir.resolve("data");

    // decompress the index
    Path backwardsIndex = getBwcIndicesPath().resolve(indexFile);
    try (InputStream stream = Files.newInputStream(backwardsIndex)) {
      TestUtil.unzip(stream, unzipDir);
    }

    // check it is unique
    assertTrue(Files.exists(unzipDataDir));
    Path[] list = FileSystemUtils.files(unzipDataDir);
    if (list.length != 1) {
      throw new IllegalStateException("Backwards index must contain exactly one cluster");
    }

    int zipIndex = indexFile.indexOf(".zip");
    final Version version = Version.fromString(indexFile.substring("index-".length(), zipIndex));
    if (version.before(Version.V_5_0_0_alpha1)) {
      // the bwc scripts packs the indices under this path
      return list[0].resolve("nodes/0/");
    } else {
      // after 5.0.0, data folders do not include the cluster name
      return list[0].resolve("0");
    }
  }
Ejemplo n.º 10
0
  private void writeIndex(
      String reason, IndexMetaData indexMetaData, @Nullable IndexMetaData previousIndexMetaData)
      throws Exception {
    logger.trace("[{}] writing state, reason [{}]", indexMetaData.index(), reason);
    XContentBuilder builder = XContentFactory.contentBuilder(format, new BytesStreamOutput());
    builder.startObject();
    IndexMetaData.Builder.toXContent(indexMetaData, builder, formatParams);
    builder.endObject();
    builder.flush();

    String stateFileName = "state-" + indexMetaData.version();
    Exception lastFailure = null;
    boolean wroteAtLeastOnce = false;
    for (File indexLocation : nodeEnv.indexLocations(new Index(indexMetaData.index()))) {
      File stateLocation = new File(indexLocation, "_state");
      FileSystemUtils.mkdirs(stateLocation);
      File stateFile = new File(stateLocation, stateFileName);

      FileOutputStream fos = null;
      try {
        fos = new FileOutputStream(stateFile);
        BytesReference bytes = builder.bytes();
        fos.write(bytes.array(), bytes.arrayOffset(), bytes.length());
        fos.getChannel().force(true);
        fos.close();
        wroteAtLeastOnce = true;
      } catch (Exception e) {
        lastFailure = e;
      } finally {
        IOUtils.closeWhileHandlingException(fos);
      }
    }

    if (!wroteAtLeastOnce) {
      logger.warn("[{}]: failed to state", lastFailure, indexMetaData.index());
      throw new IOException(
          "failed to write state for [" + indexMetaData.index() + "]", lastFailure);
    }

    // delete the old files
    if (previousIndexMetaData != null
        && previousIndexMetaData.version() != indexMetaData.version()) {
      for (File indexLocation : nodeEnv.indexLocations(new Index(indexMetaData.index()))) {
        File[] files = new File(indexLocation, "_state").listFiles();
        if (files == null) {
          continue;
        }
        for (File file : files) {
          if (!file.getName().startsWith("state-")) {
            continue;
          }
          if (file.getName().equals(stateFileName)) {
            continue;
          }
          file.delete();
        }
      }
    }
  }
Ejemplo n.º 11
0
  private void writeGlobalState(
      String reason, MetaData metaData, @Nullable MetaData previousMetaData) throws Exception {
    logger.trace("[_global] writing state, reason [{}]", reason);
    // create metadata to write with just the global state
    MetaData globalMetaData = MetaData.builder().metaData(metaData).removeAllIndices().build();

    XContentBuilder builder = XContentFactory.contentBuilder(format);
    builder.startObject();
    MetaData.Builder.toXContent(globalMetaData, builder, formatParams);
    builder.endObject();
    builder.flush();

    String globalFileName = "global-" + globalMetaData.version();
    Exception lastFailure = null;
    boolean wroteAtLeastOnce = false;
    for (File dataLocation : nodeEnv.nodeDataLocations()) {
      File stateLocation = new File(dataLocation, "_state");
      FileSystemUtils.mkdirs(stateLocation);
      File stateFile = new File(stateLocation, globalFileName);

      FileOutputStream fos = null;
      try {
        fos = new FileOutputStream(stateFile);
        BytesReference bytes = builder.bytes();
        fos.write(bytes.array(), bytes.arrayOffset(), bytes.length());
        fos.getChannel().force(true);
        fos.close();
        wroteAtLeastOnce = true;
      } catch (Exception e) {
        lastFailure = e;
      } finally {
        IOUtils.closeWhileHandlingException(fos);
      }
    }

    if (!wroteAtLeastOnce) {
      logger.warn("[_global]: failed to write global state", lastFailure);
      throw new IOException("failed to write global state", lastFailure);
    }

    // delete the old files
    for (File dataLocation : nodeEnv.nodeDataLocations()) {
      File[] files = new File(dataLocation, "_state").listFiles();
      if (files == null) {
        continue;
      }
      for (File file : files) {
        if (!file.getName().startsWith("global-")) {
          continue;
        }
        if (file.getName().equals(globalFileName)) {
          continue;
        }
        file.delete();
      }
    }
  }
Ejemplo n.º 12
0
 public static String randomNodeName(URL nodeNames) {
   try {
     int numberOfNames = 0;
     try (BufferedReader reader = FileSystemUtils.newBufferedReader(nodeNames, Charsets.UTF_8)) {
       while (reader.readLine() != null) {
         numberOfNames++;
       }
     }
     try (BufferedReader reader = FileSystemUtils.newBufferedReader(nodeNames, Charsets.UTF_8)) {
       int number = ((ThreadLocalRandom.current().nextInt(numberOfNames)) % numberOfNames);
       for (int i = 0; i < number; i++) {
         reader.readLine();
       }
       return reader.readLine();
     }
   } catch (IOException e) {
     return null;
   }
 }
Ejemplo n.º 13
0
 private void deleteIndex(String index) {
   logger.trace("[{}] delete index state", index);
   File[] indexLocations = nodeEnv.indexLocations(new Index(index));
   for (File indexLocation : indexLocations) {
     if (!indexLocation.exists()) {
       continue;
     }
     FileSystemUtils.deleteRecursively(new File(indexLocation, "_state"));
   }
 }
Ejemplo n.º 14
0
 private void wipeDataDirectories() {
   if (!dataDirToClean.isEmpty()) {
     logger.info("Wipe data directory for all nodes locations: {}", this.dataDirToClean);
     try {
       FileSystemUtils.deleteRecursively(dataDirToClean.toArray(new File[dataDirToClean.size()]));
     } finally {
       this.dataDirToClean.clear();
     }
   }
 }
Ejemplo n.º 15
0
 @Test
 public void testResolveFileResource() throws IOException {
   Environment environment = newEnvironment();
   URL url = environment.resolveConfig("org/elasticsearch/common/cli/tool.help");
   assertNotNull(url);
   try (BufferedReader reader = FileSystemUtils.newBufferedReader(url, Charsets.UTF_8)) {
     String string = Streams.copyToString(reader);
     assertEquals(string, "tool help");
   }
 }
Ejemplo n.º 16
0
 @Override
 public void run() {
   synchronized (danglingMutex) {
     DanglingIndex remove = danglingIndices.remove(index);
     // no longer there...
     if (remove == null) {
       return;
     }
     logger.info("[{}] deleting dangling index", index);
     FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(index)));
   }
 }
Ejemplo n.º 17
0
  @Before
  public void beforeTest() throws Exception {
    props = new Properties();
    props.putAll(System.getProperties());

    logger.info("Installing plugins into folder {}", PLUGIN_DIR);
    FileSystemUtils.mkdirs(PLUGIN_DIR);

    // copy plugin
    copyPlugin(new File(PLUGIN_DIR, "plugin-v1"));
    copyPlugin(new File(PLUGIN_DIR, "plugin-v2"));
  }
Ejemplo n.º 18
0
 @Test
 public void testResolveJaredResource() throws IOException {
   Environment environment = newEnvironment();
   URL url =
       environment.resolveConfig(
           "META-INF/MANIFEST.MF"); // this works because there is one jar having this file in the
                                    // classpath
   assertNotNull(url);
   try (BufferedReader reader = FileSystemUtils.newBufferedReader(url, Charsets.UTF_8)) {
     String string = Streams.copyToString(reader);
     assertTrue(string, string.contains("Manifest-Version"));
   }
 }
Ejemplo n.º 19
0
  @SuppressForbidden(reason = "discover nested jar")
  private void discoverJars(URI libPath, List<URL> cp, boolean optional) {
    try {
      Path[] jars = FileSystemUtils.files(PathUtils.get(libPath), "*.jar");

      for (Path path : jars) {
        cp.add(path.toUri().toURL());
      }
    } catch (IOException ex) {
      if (!optional) {
        throw new IllegalStateException("Cannot compute plugin classpath", ex);
      }
    }
  }
Ejemplo n.º 20
0
 /**
  * we check whether we need to remove the top-level folder while extracting sometimes (e.g.
  * github) the downloaded archive contains a top-level folder which needs to be removed
  */
 private Path findPluginRoot(Path dir) throws IOException {
   if (Files.exists(dir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES))) {
     return dir;
   } else {
     final Path[] topLevelFiles = FileSystemUtils.files(dir);
     if (topLevelFiles.length == 1 && Files.isDirectory(topLevelFiles[0])) {
       Path subdir = topLevelFiles[0];
       if (Files.exists(subdir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES))) {
         return subdir;
       }
     }
   }
   throw new RuntimeException(
       "Could not find plugin descriptor '" + PluginInfo.ES_PLUGIN_PROPERTIES + "' in plugin zip");
 }
  private void setupElasticsearchServer() throws Exception {
    logger.debug("*** setupElasticsearchServer ***");
    try {
      Tuple<Settings, Environment> initialSettings =
          InternalSettingsPerparer.prepareSettings(settings, true);
      if (!initialSettings.v2().configFile().exists()) {
        FileSystemUtils.mkdirs(initialSettings.v2().configFile());
      }

      if (!initialSettings.v2().logsFile().exists()) {
        FileSystemUtils.mkdirs(initialSettings.v2().logsFile());
      }

      if (!initialSettings.v2().pluginsFile().exists()) {
        FileSystemUtils.mkdirs(initialSettings.v2().pluginsFile());
        if (settings.getByPrefix("plugins") != null) {
          PluginManager pluginManager = new PluginManager(initialSettings.v2(), null);

          Map<String, String> plugins = settings.getByPrefix("plugins").getAsMap();
          for (String key : plugins.keySet()) {
            pluginManager.downloadAndExtract(plugins.get(key), false);
          }
        }
      } else {
        logger.info(
            "Plugin {} has been already installed.", settings.get("plugins.mapper-attachments"));
        logger.info(
            "Plugin {} has been already installed.", settings.get("plugins.lang-javascript"));
      }

      node = nodeBuilder().local(true).settings(settings).node();
    } catch (Exception ex) {
      logger.error("setupElasticsearchServer failed", ex);
      throw ex;
    }
  }
Ejemplo n.º 22
0
 @Override
 public void run() {
   while (running) {
     estimatedTimeInMillis = System.currentTimeMillis();
     try {
       Thread.sleep(interval);
     } catch (InterruptedException e) {
       running = false;
       return;
     }
     try {
       FileSystemUtils.checkMkdirsStall(estimatedTimeInMillis);
     } catch (Exception e) {
       // ignore
     }
   }
 }
Ejemplo n.º 23
0
 /**
  * Deletes a shard data directory. Note: this method assumes that the shard lock is acquired. This
  * method will also attempt to acquire the write locks for the shard's paths before deleting the
  * data, but this is best effort, as the lock is released before the deletion happens in order to
  * allow the folder to be deleted
  *
  * @param lock the shards lock
  * @throws IOException if an IOException occurs
  * @throws ElasticsearchException if the write.lock is not acquirable
  */
 public void deleteShardDirectoryUnderLock(ShardLock lock, IndexSettings indexSettings)
     throws IOException {
   final ShardId shardId = lock.getShardId();
   assert isShardLocked(shardId) : "shard " + shardId + " is not locked";
   final Path[] paths = availableShardPaths(shardId);
   logger.trace("acquiring locks for {}, paths: [{}]", shardId, paths);
   acquireFSLockForPaths(indexSettings, paths);
   IOUtils.rm(paths);
   if (indexSettings.hasCustomDataPath()) {
     Path customLocation = resolveCustomLocation(indexSettings, shardId);
     logger.trace("acquiring lock for {}, custom path: [{}]", shardId, customLocation);
     acquireFSLockForPaths(indexSettings, customLocation);
     logger.trace("deleting custom shard {} directory [{}]", shardId, customLocation);
     IOUtils.rm(customLocation);
   }
   logger.trace("deleted shard {} directory, paths: [{}]", shardId, paths);
   assert FileSystemUtils.exists(paths) == false;
 }
Ejemplo n.º 24
0
 void restart(RestartCallback callback) throws Exception {
   assert callback != null;
   if (!node.isClosed()) {
     node.close();
   }
   Settings newSettings = callback.onNodeStopped(name);
   if (newSettings == null) {
     newSettings = ImmutableSettings.EMPTY;
   }
   if (callback.clearData(name)) {
     NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, node);
     if (nodeEnv.hasNodeFile()) {
       FileSystemUtils.deleteRecursively(nodeEnv.nodeDataLocations());
     }
   }
   node = (InternalNode) nodeBuilder().settings(node.settings()).settings(newSettings).node();
   resetClient();
 }
Ejemplo n.º 25
0
  /**
   * @return null If no settings set for "settingsPrefix" then return <code>null</code>.
   * @throws IllegalArgumentException If the Reader can not be instantiated.
   */
  public static Reader getReaderFromFile(Environment env, Settings settings, String settingPrefix) {
    String filePath = settings.get(settingPrefix, null);

    if (filePath == null) {
      return null;
    }

    final Path path = env.configFile().resolve(filePath);

    try {
      return FileSystemUtils.newBufferedReader(path.toUri().toURL(), StandardCharsets.UTF_8);
    } catch (IOException ioe) {
      String message =
          String.format(
              Locale.ROOT,
              "IOException while reading %s_path: %s",
              settingPrefix,
              ioe.getMessage());
      throw new IllegalArgumentException(message);
    }
  }
Ejemplo n.º 26
0
  @Override
  public void clusterChanged(ClusterChangedEvent event) {
    if (event.state().blocks().disableStatePersistence()) {
      // reset the current metadata, we need to start fresh...
      this.currentMetaData = null;
      return;
    }

    MetaData newMetaData = event.state().metaData();

    // delete indices that were there before, but are deleted now
    // we need to do it so they won't be detected as dangling
    if (currentMetaData != null) {
      // only delete indices when we already received a state (currentMetaData != null)
      for (IndexMetaData current : currentMetaData) {
        if (!newMetaData.hasIndex(current.index())) {
          logger.debug(
              "[{}] deleting index that is no longer part of the metadata (indices: [{}])",
              current.index(),
              newMetaData.indices().keys());
          if (nodeEnv.hasNodeFile()) {
            FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index())));
          }
          try {
            nodeIndexDeletedAction.nodeIndexStoreDeleted(
                event.state(), current.index(), event.state().nodes().localNodeId());
          } catch (Exception e) {
            logger.debug(
                "[{}] failed to notify master on local index store deletion", e, current.index());
          }
        }
      }
    }

    currentMetaData = newMetaData;
  }
  private void testLoad(boolean fullRecovery) {
    logger.info("Running with fullRecover [{}]", fullRecovery);

    startNode("server1");

    logger.info("Running Cluster Health (waiting for node to startup properly)");
    ClusterHealthResponse clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForGreenStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));

    // get the environment, so we can clear the work dir when needed
    Environment environment =
        ((InternalNode) node("server1")).injector().getInstance(Environment.class);

    logger.info("--> creating test index ...");
    client("server1").admin().indices().prepareCreate("test").execute().actionGet();

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("--> refreshing and checking count");
    client("server1").admin().indices().prepareRefresh().execute().actionGet();
    assertThat(
        client("server1").prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(),
        equalTo(0l));

    logger.info("--> indexing 1234 docs");
    for (long i = 0; i < 1234; i++) {
      client("server1")
          .prepareIndex("test", "type1", Long.toString(i))
          .setCreate(
              true) // make sure we use create, so if we recover wrongly, we will get increments...
          .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map())
          .execute()
          .actionGet();

      // snapshot every 100 so we get some actions going on in the gateway
      if ((i % 11) == 0) {
        client("server1").admin().indices().prepareGatewaySnapshot().execute().actionGet();
      }
      // flush every once is a while, so we get different data
      if ((i % 55) == 0) {
        client("server1").admin().indices().prepareFlush().execute().actionGet();
      }
    }

    logger.info("--> refreshing and checking count");
    client("server1").admin().indices().prepareRefresh().execute().actionGet();
    assertThat(
        client("server1").prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(),
        equalTo(1234l));

    logger.info("--> closing the server");
    closeNode("server1");
    if (fullRecovery) {
      logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
      FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
      logger.info(
          "Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
    }

    startNode("server1");

    logger.info("--> running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("--> done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("--> checking count");
    assertThat(
        client("server1").prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(),
        equalTo(1234l));

    logger.info("--> checking reuse / recovery status");
    IndicesStatusResponse statusResponse =
        client("server1").admin().indices().prepareStatus().setRecovery(true).execute().actionGet();
    for (IndexShardStatus indexShardStatus : statusResponse.getIndex("test")) {
      for (ShardStatus shardStatus : indexShardStatus) {
        if (shardStatus.getShardRouting().primary()) {
          if (fullRecovery || !isPersistentStorage()) {
            assertThat(
                shardStatus.getGatewayRecoveryStatus().getReusedIndexSize().bytes(), equalTo(0l));
          } else {
            assertThat(
                shardStatus.getGatewayRecoveryStatus().getReusedIndexSize().bytes(),
                greaterThan(
                    shardStatus.getGatewayRecoveryStatus().getIndexSize().bytes()
                        - 8196 /* segments file and others */));
          }
        }
      }
    }
  }
  @Test
  @Slow
  public void testSnapshotOperations() throws Exception {
    startNode("server1", getClassDefaultSettings());

    // get the environment, so we can clear the work dir when needed
    Environment environment =
        ((InternalNode) node("server1")).injector().getInstance(Environment.class);

    logger.info("Running Cluster Health (waiting for node to startup properly)");
    ClusterHealthResponse clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForGreenStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));

    // Translog tests

    logger.info("Creating index [{}]", "test");
    client("server1").admin().indices().prepareCreate("test").execute().actionGet();

    // create a mapping
    PutMappingResponse putMappingResponse =
        client("server1")
            .admin()
            .indices()
            .preparePutMapping("test")
            .setType("type1")
            .setSource(mappingSource())
            .execute()
            .actionGet();
    assertThat(putMappingResponse.isAcknowledged(), equalTo(true));

    // verify that mapping is there
    ClusterStateResponse clusterState =
        client("server1").admin().cluster().state(clusterStateRequest()).actionGet();
    assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());

    // create two and delete the first
    logger.info("Indexing #1");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")))
        .actionGet();
    logger.info("Indexing #2");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("2").source(source("2", "test")))
        .actionGet();

    // perform snapshot to the index
    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Deleting #1");
    client("server1").delete(deleteRequest("test").type("type1").id("1")).actionGet();

    // perform snapshot to the index
    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    // do it again, it should be a no op
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (only translog should be populated)");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    // verify that mapping is there
    clusterState = client("server1").admin().cluster().state(clusterStateRequest()).actionGet();
    assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());

    logger.info("Getting #1, should not exists");
    GetResponse getResponse =
        client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));

    // Now flush and add some data (so we have index recovery as well)
    logger.info(
        "Flushing, so we have actual content in the index files (#2 should be in the index)");
    client("server1").admin().indices().flush(flushRequest("test")).actionGet();
    logger.info("Indexing #3, so we have something in the translog as well");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("3").source(source("3", "test")))
        .actionGet();

    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (both index and translog) and reuse work dir");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (from the translog)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info("Closing the server");
    closeNode("server1");
    logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
    FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
    logger.info(
        "Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (from the translog)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info(
        "Flushing, so we have actual content in the index files (#3 should be in the index now as well)");
    client("server1").admin().indices().flush(flushRequest("test")).actionGet();

    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (just from the index, nothing in the translog)");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info("Deleting the index");
    client("server1").admin().indices().delete(deleteIndexRequest("test")).actionGet();
  }
Ejemplo n.º 29
0
  private void copyBinDirectory(
      Path sourcePluginBinDirectory,
      Path destPluginBinDirectory,
      String pluginName,
      Terminal terminal)
      throws IOException {
    boolean canCopyFromSource =
        Files.exists(sourcePluginBinDirectory)
            && Files.isReadable(sourcePluginBinDirectory)
            && Files.isDirectory(sourcePluginBinDirectory);
    if (canCopyFromSource) {
      terminal.println(VERBOSE, "Found bin, moving to %s", destPluginBinDirectory.toAbsolutePath());
      if (Files.exists(destPluginBinDirectory)) {
        IOUtils.rm(destPluginBinDirectory);
      }
      try {
        Files.createDirectories(destPluginBinDirectory.getParent());
        FileSystemUtils.move(sourcePluginBinDirectory, destPluginBinDirectory);
      } catch (IOException e) {
        throw new IOException(
            "Could not move [" + sourcePluginBinDirectory + "] to [" + destPluginBinDirectory + "]",
            e);
      }
      if (Environment.getFileStore(destPluginBinDirectory)
          .supportsFileAttributeView(PosixFileAttributeView.class)) {
        final PosixFileAttributes parentDirAttributes =
            Files.getFileAttributeView(
                    destPluginBinDirectory.getParent(), PosixFileAttributeView.class)
                .readAttributes();
        // copy permissions from parent bin directory
        final Set<PosixFilePermission> filePermissions = new HashSet<>();
        for (PosixFilePermission posixFilePermission : parentDirAttributes.permissions()) {
          switch (posixFilePermission) {
            case OWNER_EXECUTE:
            case GROUP_EXECUTE:
            case OTHERS_EXECUTE:
              break;
            default:
              filePermissions.add(posixFilePermission);
          }
        }
        // add file execute permissions to existing perms, so execution will work.
        filePermissions.add(PosixFilePermission.OWNER_EXECUTE);
        filePermissions.add(PosixFilePermission.GROUP_EXECUTE);
        filePermissions.add(PosixFilePermission.OTHERS_EXECUTE);
        Files.walkFileTree(
            destPluginBinDirectory,
            new SimpleFileVisitor<Path>() {
              @Override
              public FileVisitResult visitFile(Path file, BasicFileAttributes attrs)
                  throws IOException {
                if (attrs.isRegularFile()) {
                  setPosixFileAttributes(
                      file,
                      parentDirAttributes.owner(),
                      parentDirAttributes.group(),
                      filePermissions);
                }
                return FileVisitResult.CONTINUE;
              }

              @Override
              public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs)
                  throws IOException {
                setPosixFileAttributes(
                    dir,
                    parentDirAttributes.owner(),
                    parentDirAttributes.group(),
                    parentDirAttributes.permissions());
                return FileVisitResult.CONTINUE;
              }
            });
      } else {
        terminal.println(
            VERBOSE, "Skipping posix permissions - filestore doesn't support posix permission");
      }
      terminal.println(
          VERBOSE, "Installed %s into %s", pluginName, destPluginBinDirectory.toAbsolutePath());
    }
  }
Ejemplo n.º 30
0
  private void extract(PluginHandle pluginHandle, Terminal terminal, Path pluginFile)
      throws IOException {
    // unzip plugin to a staging temp dir, named for the plugin
    Path tmp = Files.createTempDirectory(environment.tmpFile(), null);
    Path root = tmp.resolve(pluginHandle.name);
    unzipPlugin(pluginFile, root);

    // find the actual root (in case its unzipped with extra directory wrapping)
    root = findPluginRoot(root);

    // read and validate the plugin descriptor
    PluginInfo info = PluginInfo.readFromProperties(root);
    terminal.println(VERBOSE, "%s", info);

    // update name in handle based on 'name' property found in descriptor file
    pluginHandle = new PluginHandle(info.getName(), pluginHandle.version, pluginHandle.user);
    final Path extractLocation = pluginHandle.extractedDir(environment);
    if (Files.exists(extractLocation)) {
      throw new IOException(
          "plugin directory "
              + extractLocation.toAbsolutePath()
              + " already exists. To update the plugin, uninstall it first using 'remove "
              + pluginHandle.name
              + "' command");
    }

    // check for jar hell before any copying
    if (info.isJvm()) {
      jarHellCheck(root, info.isIsolated());
    }

    // install plugin
    FileSystemUtils.copyDirectoryRecursively(root, extractLocation);
    terminal.println("Installed %s into %s", pluginHandle.name, extractLocation.toAbsolutePath());

    // cleanup
    tryToDeletePath(terminal, tmp, pluginFile);

    // take care of bin/ by moving and applying permissions if needed
    Path sourcePluginBinDirectory = extractLocation.resolve("bin");
    Path destPluginBinDirectory = pluginHandle.binDir(environment);
    boolean needToCopyBinDirectory = Files.exists(sourcePluginBinDirectory);
    if (needToCopyBinDirectory) {
      if (Files.exists(destPluginBinDirectory) && !Files.isDirectory(destPluginBinDirectory)) {
        tryToDeletePath(terminal, extractLocation);
        throw new IOException(
            "plugin bin directory " + destPluginBinDirectory + " is not a directory");
      }

      try {
        copyBinDirectory(
            sourcePluginBinDirectory, destPluginBinDirectory, pluginHandle.name, terminal);
      } catch (IOException e) {
        // rollback and remove potentially before installed leftovers
        terminal.printError(
            "Error copying bin directory [%s] to [%s], cleaning up, reason: %s",
            sourcePluginBinDirectory, destPluginBinDirectory, ExceptionsHelper.detailedMessage(e));
        tryToDeletePath(terminal, extractLocation, pluginHandle.binDir(environment));
        throw e;
      }
    }

    Path sourceConfigDirectory = extractLocation.resolve("config");
    Path destConfigDirectory = pluginHandle.configDir(environment);
    boolean needToCopyConfigDirectory = Files.exists(sourceConfigDirectory);
    if (needToCopyConfigDirectory) {
      if (Files.exists(destConfigDirectory) && !Files.isDirectory(destConfigDirectory)) {
        tryToDeletePath(terminal, extractLocation, destPluginBinDirectory);
        throw new IOException(
            "plugin config directory " + destConfigDirectory + " is not a directory");
      }

      try {
        terminal.println(
            VERBOSE, "Found config, moving to %s", destConfigDirectory.toAbsolutePath());
        moveFilesWithoutOverwriting(sourceConfigDirectory, destConfigDirectory, ".new");

        if (Environment.getFileStore(destConfigDirectory)
            .supportsFileAttributeView(PosixFileAttributeView.class)) {
          // We copy owner, group and permissions from the parent ES_CONFIG directory, assuming they
          // were properly set depending
          // on how es was installed in the first place: can be root:elasticsearch (750) if es was
          // installed from rpm/deb packages
          // or most likely elasticsearch:elasticsearch if installed from tar/zip. As for
          // permissions we don't rely on umask.
          final PosixFileAttributes parentDirAttributes =
              Files.getFileAttributeView(
                      destConfigDirectory.getParent(), PosixFileAttributeView.class)
                  .readAttributes();
          // for files though, we make sure not to copy execute permissions from the parent dir and
          // leave them untouched
          final Set<PosixFilePermission> baseFilePermissions = new HashSet<>();
          for (PosixFilePermission posixFilePermission : parentDirAttributes.permissions()) {
            switch (posixFilePermission) {
              case OWNER_EXECUTE:
              case GROUP_EXECUTE:
              case OTHERS_EXECUTE:
                break;
              default:
                baseFilePermissions.add(posixFilePermission);
            }
          }
          Files.walkFileTree(
              destConfigDirectory,
              new SimpleFileVisitor<Path>() {
                @Override
                public FileVisitResult visitFile(Path file, BasicFileAttributes attrs)
                    throws IOException {
                  if (attrs.isRegularFile()) {
                    Set<PosixFilePermission> newFilePermissions =
                        new HashSet<>(baseFilePermissions);
                    Set<PosixFilePermission> currentFilePermissions =
                        Files.getPosixFilePermissions(file);
                    for (PosixFilePermission posixFilePermission : currentFilePermissions) {
                      switch (posixFilePermission) {
                        case OWNER_EXECUTE:
                        case GROUP_EXECUTE:
                        case OTHERS_EXECUTE:
                          newFilePermissions.add(posixFilePermission);
                      }
                    }
                    setPosixFileAttributes(
                        file,
                        parentDirAttributes.owner(),
                        parentDirAttributes.group(),
                        newFilePermissions);
                  }
                  return FileVisitResult.CONTINUE;
                }

                @Override
                public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs)
                    throws IOException {
                  setPosixFileAttributes(
                      dir,
                      parentDirAttributes.owner(),
                      parentDirAttributes.group(),
                      parentDirAttributes.permissions());
                  return FileVisitResult.CONTINUE;
                }
              });
        } else {
          terminal.println(
              VERBOSE, "Skipping posix permissions - filestore doesn't support posix permission");
        }

        terminal.println(
            VERBOSE,
            "Installed %s into %s",
            pluginHandle.name,
            destConfigDirectory.toAbsolutePath());
      } catch (IOException e) {
        terminal.printError(
            "Error copying config directory [%s] to [%s], cleaning up, reason: %s",
            sourceConfigDirectory, destConfigDirectory, ExceptionsHelper.detailedMessage(e));
        tryToDeletePath(terminal, extractLocation, destPluginBinDirectory, destConfigDirectory);
        throw e;
      }
    }
  }