private void closeShard( String reason, ShardId sId, IndexShard indexShard, Store store, IndexEventListener listener) { final int shardId = sId.id(); final Settings indexSettings = this.getIndexSettings().getSettings(); try { try { listener.beforeIndexShardClosed(sId, indexShard, indexSettings); } finally { // this logic is tricky, we want to close the engine so we rollback the changes done to it // and close the shard so no operations are allowed to it if (indexShard != null) { try { // only flush we are we closed (closed index or shutdown) and if we are not deleted final boolean flushEngine = deleted.get() == false && closed.get(); indexShard.close(reason, flushEngine); } catch (Exception e) { logger.debug("[{}] failed to close index shard", e, shardId); // ignore } } // call this before we close the store, so we can release resources for it listener.afterIndexShardClosed(sId, indexShard, indexSettings); } } finally { try { store.close(); } catch (Exception e) { logger.warn( "[{}] failed to close store on shard removal (reason: [{}])", e, shardId, reason); } } }
private void onShardClose(ShardLock lock, boolean ownsShard) { if (deleted.get()) { // we remove that shards content if this index has been deleted try { if (ownsShard) { try { eventListener.beforeIndexShardDeleted(lock.getShardId(), indexSettings.getSettings()); } finally { shardStoreDeleter.deleteShardStore("delete index", lock, indexSettings); eventListener.afterIndexShardDeleted(lock.getShardId(), indexSettings.getSettings()); } } } catch (IOException e) { shardStoreDeleter.addPendingDelete(lock.getShardId(), indexSettings); logger.debug( "[{}] failed to delete shard content - scheduled a retry", e, lock.getShardId().id()); } } }
public synchronized IndexShard createShard(ShardRouting routing) throws IOException { final boolean primary = routing.primary(); /* * TODO: we execute this in parallel but it's a synced method. Yet, we might * be able to serialize the execution via the cluster state in the future. for now we just * keep it synced. */ if (closed.get()) { throw new IllegalStateException("Can't create shard " + routing.shardId() + ", closed"); } final Settings indexSettings = this.indexSettings.getSettings(); final ShardId shardId = routing.shardId(); boolean success = false; Store store = null; IndexShard indexShard = null; ShardLock lock = null; try { lock = nodeEnv.shardLock(shardId, TimeUnit.SECONDS.toMillis(5)); eventListener.beforeIndexShardCreated(shardId, indexSettings); ShardPath path; try { path = ShardPath.loadShardPath(logger, nodeEnv, shardId, this.indexSettings); } catch (IllegalStateException ex) { logger.warn("{} failed to load shard path, trying to remove leftover", shardId); try { ShardPath.deleteLeftoverShardDirectory(logger, nodeEnv, lock, this.indexSettings); path = ShardPath.loadShardPath(logger, nodeEnv, shardId, this.indexSettings); } catch (Exception inner) { ex.addSuppressed(inner); throw ex; } } if (path == null) { // TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard // will be, e.g. for a shard // that's being relocated/replicated we know how large it will become once it's done // copying: // Count up how many shards are currently on each data path: Map<Path, Integer> dataPathToShardCount = new HashMap<>(); for (IndexShard shard : this) { Path dataPath = shard.shardPath().getRootStatePath(); Integer curCount = dataPathToShardCount.get(dataPath); if (curCount == null) { curCount = 0; } dataPathToShardCount.put(dataPath, curCount + 1); } path = ShardPath.selectNewPathForShard( nodeEnv, shardId, this.indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), dataPathToShardCount); logger.debug("{} creating using a new path [{}]", shardId, path); } else { logger.debug("{} creating using an existing path [{}]", shardId, path); } if (shards.containsKey(shardId.id())) { throw new IndexShardAlreadyExistsException(shardId + " already exists"); } logger.debug("creating shard_id {}", shardId); // if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the // primary. final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false || (primary && IndexMetaData.isOnSharedFilesystem(indexSettings)); final Engine.Warmer engineWarmer = (searcher) -> { IndexShard shard = getShardOrNull(shardId.getId()); if (shard != null) { warmer.warm(searcher, shard, IndexService.this.indexSettings); } }; store = new Store( shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener( shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId))); if (useShadowEngine(primary, indexSettings)) { indexShard = new ShadowIndexShard( routing, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer, searchOperationListeners); // no indexing listeners - shadow engines don't index } else { indexShard = new IndexShard( routing, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer, searchOperationListeners, indexingOperationListeners); } eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap(); success = true; return indexShard; } catch (ShardLockObtainFailedException e) { throw new IOException("failed to obtain in-memory shard lock", e); } finally { if (success == false) { if (lock != null) { IOUtils.closeWhileHandlingException(lock); } closeShard("initialization failed", shardId, indexShard, store, eventListener); } } }