@Inject public LocalGatewayMetaState( Settings settings, ThreadPool threadPool, NodeEnvironment nodeEnv, TransportNodesListGatewayMetaState nodesListGatewayMetaState, LocalAllocateDangledIndices allocateDangledIndices, NodeIndexDeletedAction nodeIndexDeletedAction) throws Exception { super(settings); this.nodeEnv = nodeEnv; this.threadPool = threadPool; this.format = XContentType.fromRestContentType(settings.get("format", "smile")); this.allocateDangledIndices = allocateDangledIndices; this.nodeIndexDeletedAction = nodeIndexDeletedAction; nodesListGatewayMetaState.init(this); if (this.format == XContentType.SMILE) { Map<String, String> params = Maps.newHashMap(); params.put("binary", "true"); formatParams = new ToXContent.MapParams(params); Map<String, String> globalOnlyParams = Maps.newHashMap(); globalOnlyParams.put("binary", "true"); globalOnlyParams.put(MetaData.PERSISTENT_ONLY_PARAM, "true"); globalOnlyParams.put(MetaData.GLOBAL_ONLY_PARAM, "true"); globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams); } else { formatParams = ToXContent.EMPTY_PARAMS; Map<String, String> globalOnlyParams = Maps.newHashMap(); globalOnlyParams.put(MetaData.PERSISTENT_ONLY_PARAM, "true"); globalOnlyParams.put(MetaData.GLOBAL_ONLY_PARAM, "true"); globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams); } this.autoImportDangled = AutoImportDangledState.fromString( settings.get( "gateway.local.auto_import_dangled", AutoImportDangledState.YES.toString())); this.danglingTimeout = settings.getAsTime("gateway.local.dangling_timeout", TimeValue.timeValueHours(2)); logger.debug( "using gateway.local.auto_import_dangled [{}], with gateway.local.dangling_timeout [{}]", this.autoImportDangled, this.danglingTimeout); if (DiscoveryNode.masterNode(settings)) { try { pre019Upgrade(); long start = System.currentTimeMillis(); loadState(); logger.debug( "took {} to load state", TimeValue.timeValueMillis(System.currentTimeMillis() - start)); } catch (Exception e) { logger.error("failed to read local state, exiting...", e); throw e; } } }
@Override public void clusterChanged(ClusterChangedEvent event) { if (event.state().blocks().disableStatePersistence()) { // reset the current metadata, we need to start fresh... this.currentMetaData = null; return; } MetaData newMetaData = event.state().metaData(); // we don't check if metaData changed, since we might be called several times and we need to // check dangling... boolean success = true; // only applied to master node, writing the global and index level states if (event.state().nodes().localNode().masterNode()) { // check if the global state changed? if (currentMetaData == null || !MetaData.isGlobalStateEquals(currentMetaData, newMetaData)) { try { writeGlobalState("changed", newMetaData, currentMetaData); } catch (Exception e) { success = false; } } // check and write changes in indices for (IndexMetaData indexMetaData : newMetaData) { String writeReason = null; IndexMetaData currentIndexMetaData; if (currentMetaData == null) { // a new event..., check from the state stored currentIndexMetaData = loadIndex(indexMetaData.index()); } else { currentIndexMetaData = currentMetaData.index(indexMetaData.index()); } if (currentIndexMetaData == null) { writeReason = "freshly created"; } else if (currentIndexMetaData.version() != indexMetaData.version()) { writeReason = "version changed from [" + currentIndexMetaData.version() + "] to [" + indexMetaData.version() + "]"; } // we update the writeReason only if we really need to write it if (writeReason == null) { continue; } try { writeIndex(writeReason, indexMetaData, currentIndexMetaData); } catch (Exception e) { success = false; } } } // delete indices that were there before, but are deleted now // we need to do it so they won't be detected as dangling if (nodeEnv.hasNodeFile()) { if (currentMetaData != null) { // only delete indices when we already received a state (currentMetaData != null) // and we had a go at processing dangling indices at least once // this will also delete the _state of the index itself for (IndexMetaData current : currentMetaData) { if (danglingIndices.containsKey(current.index())) { continue; } if (!newMetaData.hasIndex(current.index())) { logger.debug( "[{}] deleting index that is no longer part of the metadata (indices: [{}])", current.index(), newMetaData.indices().keySet()); FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index()))); } } } } // handle dangling indices, we handle those for all nodes that have a node file (data or master) if (nodeEnv.hasNodeFile()) { if (danglingTimeout.millis() >= 0) { synchronized (danglingMutex) { for (String danglingIndex : danglingIndices.keySet()) { if (newMetaData.hasIndex(danglingIndex)) { logger.debug("[{}] no longer dangling (created), removing", danglingIndex); DanglingIndex removed = danglingIndices.remove(danglingIndex); removed.future.cancel(false); } } // delete indices that are no longer part of the metadata try { for (String indexName : nodeEnv.findAllIndices()) { // if we have the index on the metadata, don't delete it if (newMetaData.hasIndex(indexName)) { continue; } if (danglingIndices.containsKey(indexName)) { // already dangling, continue continue; } IndexMetaData indexMetaData = loadIndex(indexName); if (indexMetaData != null) { if (danglingTimeout.millis() == 0) { logger.info( "[{}] dangling index, exists on local file system, but not in cluster metadata, timeout set to 0, deleting now", indexName); FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(indexName))); } else { logger.info( "[{}] dangling index, exists on local file system, but not in cluster metadata, scheduling to delete in [{}], auto import to cluster state [{}]", indexName, danglingTimeout, autoImportDangled); danglingIndices.put( indexName, new DanglingIndex( indexName, threadPool.schedule( danglingTimeout, ThreadPool.Names.SAME, new RemoveDanglingIndex(indexName)))); } } } } catch (Exception e) { logger.warn("failed to find dangling indices", e); } } } if (autoImportDangled.shouldImport() && !danglingIndices.isEmpty()) { final List<IndexMetaData> dangled = Lists.newArrayList(); for (String indexName : danglingIndices.keySet()) { IndexMetaData indexMetaData = loadIndex(indexName); if (indexMetaData == null) { logger.debug("failed to find state for dangling index [{}]", indexName); continue; } // we might have someone copying over an index, renaming the directory, handle that if (!indexMetaData.index().equals(indexName)) { logger.info( "dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.index()); indexMetaData = IndexMetaData.newIndexMetaDataBuilder(indexMetaData).index(indexName).build(); } if (autoImportDangled == AutoImportDangledState.CLOSED) { indexMetaData = IndexMetaData.newIndexMetaDataBuilder(indexMetaData) .state(IndexMetaData.State.CLOSE) .build(); } if (indexMetaData != null) { dangled.add(indexMetaData); } } IndexMetaData[] dangledIndices = dangled.toArray(new IndexMetaData[dangled.size()]); try { allocateDangledIndices.allocateDangled( dangledIndices, new LocalAllocateDangledIndices.Listener() { @Override public void onResponse( LocalAllocateDangledIndices.AllocateDangledResponse response) { logger.trace("allocated dangled"); } @Override public void onFailure(Throwable e) { logger.info("failed to send allocated dangled", e); } }); } catch (Exception e) { logger.warn("failed to send allocate dangled", e); } } } if (success) { currentMetaData = newMetaData; } }