public CoreDescriptor getCoreDescriptor(String coreName) { // TODO make this less hideous! for (CoreDescriptor cd : getCoreDescriptors()) { if (cd.getName().equals(coreName)) return cd; } return null; }
/** * Recreates a SolrCore. While the new core is loading, requests will continue to be dispatched to * and processed by the old core * * @param name the name of the SolrCore to reload */ public void reload(String name) { SolrCore core = solrCores.getCoreFromAnyList(name, false); if (core == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + name); CoreDescriptor cd = core.getCoreDescriptor(); try { solrCores.waitAddPendingCoreOps(name); ConfigSet coreConfig = coreConfigService.getConfig(cd); log.info( "Reloading SolrCore '{}' using configuration from {}", cd.getName(), coreConfig.getName()); SolrCore newCore = core.reload(coreConfig); registerCore(name, newCore, false); } catch (SolrCoreState.CoreIsClosedException e) { throw e; } catch (Exception e) { coreInitFailures.put(cd.getName(), new CoreLoadFailure(cd, e)); throw new SolrException( ErrorCode.SERVER_ERROR, "Unable to reload core [" + cd.getName() + "]", e); } finally { solrCores.removeFromPendingOps(name); } }
@Override public String getDataHome(CoreDescriptor cd) throws IOException { if (hdfsDataDir == null) { throw new SolrException( ErrorCode.SERVER_ERROR, "You must set the " + this.getClass().getSimpleName() + " param " + HDFS_HOME + " for relative dataDir paths to work"); } // by default, we go off the instance directory String path; if (cd.getCloudDescriptor() != null) { path = URLEncoder.encode(cd.getCloudDescriptor().getCollectionName(), "UTF-8") + "/" + URLEncoder.encode(cd.getCloudDescriptor().getCoreNodeName(), "UTF-8"); } else { path = cd.getName(); } return normalize( SolrResourceLoader.normalizeDir( ZkController.trimLeadingAndTrailingSlashes(hdfsDataDir) + "/" + path + "/" + cd.getDataDir())); }
protected SolrCore registerCore(String name, SolrCore core, boolean registerInZk) { if (core == null) { throw new RuntimeException("Can not register a null core."); } // We can register a core when creating them via the admin UI, so we need to ensure that the // dynamic descriptors // are up to date CoreDescriptor cd = core.getCoreDescriptor(); if ((cd.isTransient() || !cd.isLoadOnStartup()) && solrCores.getDynamicDescriptor(name) == null) { // Store it away for later use. includes non-transient but not // loaded at startup cores. solrCores.putDynamicDescriptor(name, cd); } SolrCore old; if (isShutDown) { core.close(); throw new IllegalStateException("This CoreContainer has been closed"); } if (cd.isTransient()) { old = solrCores.putTransientCore(cfg, name, core, loader); } else { old = solrCores.putCore(name, core); } /* * set both the name of the descriptor and the name of the * core, since the descriptors name is used for persisting. */ core.setName(name); coreInitFailures.remove(name); if (old == null || old == core) { log.info("registering core: " + name); if (registerInZk) { zkSys.registerInZk(core, false); } return null; } else { log.info("replacing core: " + name); old.close(); if (registerInZk) { zkSys.registerInZk(core, false); } return old; } }
private static void checkForDuplicateCoreNames(List<CoreDescriptor> cds) { Map<String, Path> addedCores = Maps.newHashMap(); for (CoreDescriptor cd : cds) { final String name = cd.getName(); if (addedCores.containsKey(name)) throw new SolrException( ErrorCode.SERVER_ERROR, String.format( Locale.ROOT, "Found multiple cores with the name [%s], with instancedirs [%s] and [%s]", name, addedCores.get(name), cd.getInstanceDir())); addedCores.put(name, cd.getInstanceDir()); } }
/** * Creates a new core based on a CoreDescriptor. * * @param dcore a core descriptor * @param publishState publish core state to the cluster if true * @return the newly created core */ private SolrCore create(CoreDescriptor dcore, boolean publishState) { if (isShutDown) { throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Solr has been shutdown."); } SolrCore core = null; try { MDCLoggingContext.setCore(core); SolrIdentifierValidator.validateCoreName(dcore.getName()); if (zkSys.getZkController() != null) { zkSys.getZkController().preRegister(dcore); } ConfigSet coreConfig = coreConfigService.getConfig(dcore); log.info( "Creating SolrCore '{}' using configuration from {}", dcore.getName(), coreConfig.getName()); core = new SolrCore(dcore, coreConfig); // always kick off recovery if we are in non-Cloud mode if (!isZooKeeperAware() && core.getUpdateHandler().getUpdateLog() != null) { core.getUpdateHandler().getUpdateLog().recoverFromLog(); } registerCore(dcore.getName(), core, publishState); return core; } catch (Exception e) { coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e)); log.error("Error creating core [{}]: {}", dcore.getName(), e.getMessage(), e); final SolrException solrException = new SolrException( ErrorCode.SERVER_ERROR, "Unable to create core [" + dcore.getName() + "]", e); if (core != null && !core.isClosed()) IOUtils.closeQuietly(core); throw solrException; } catch (Throwable t) { SolrException e = new SolrException( ErrorCode.SERVER_ERROR, "JVM Error creating core [" + dcore.getName() + "]: " + t.getMessage(), t); log.error("Error creating core [{}]: {}", dcore.getName(), t.getMessage(), t); coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e)); if (core != null && !core.isClosed()) IOUtils.closeQuietly(core); throw t; } finally { MDCLoggingContext.clear(); } }
/** * Handle 'CREATE' action. * * @param req * @param rsp * @return true if a modification has resulted that requires persistance of the CoreContainer * configuration. * @throws SolrException in case of a configuration error. */ protected boolean handleCreateAction(SolrQueryRequest req, SolrQueryResponse rsp) throws SolrException { try { SolrParams params = req.getParams(); String name = params.get(CoreAdminParams.NAME); CoreDescriptor dcore = new CoreDescriptor(coreContainer, name, params.get(CoreAdminParams.INSTANCE_DIR)); // fillup optional parameters String opts = params.get(CoreAdminParams.CONFIG); if (opts != null) dcore.setConfigName(opts); opts = params.get(CoreAdminParams.SCHEMA); if (opts != null) dcore.setSchemaName(opts); opts = params.get(CoreAdminParams.DATA_DIR); if (opts != null) dcore.setDataDir(opts); // Process all property.name=value parameters and set them as name=value core properties Properties coreProperties = new Properties(); Iterator<String> parameterNamesIterator = params.getParameterNamesIterator(); while (parameterNamesIterator.hasNext()) { String parameterName = parameterNamesIterator.next(); if (parameterName.startsWith(CoreAdminParams.PROPERTY_PREFIX)) { String parameterValue = params.get(parameterName); String propertyName = parameterName.substring(CoreAdminParams.PROPERTY_PREFIX.length()); // skip prefix coreProperties.put(propertyName, parameterValue); } } dcore.setCoreProperties(coreProperties); SolrCore core = coreContainer.create(dcore); coreContainer.register(name, core, false); rsp.add("core", core.getName()); return coreContainer.isPersistent(); } catch (Exception ex) { throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "Error executing default implementation of CREATE", ex); } }
/** * Creates a new core in a specified instance directory, publishing the core state to the cluster * * @param coreName the core name * @param instancePath the instance directory * @param parameters the core parameters * @return the newly created core */ public SolrCore create(String coreName, Path instancePath, Map<String, String> parameters) { CoreDescriptor cd = new CoreDescriptor(this, coreName, instancePath, parameters); // TODO: There's a race here, isn't there? if (getAllCoreNames().contains(coreName)) { log.warn("Creating a core with existing name is not allowed"); // TODO: Shouldn't this be a BAD_REQUEST? throw new SolrException( ErrorCode.SERVER_ERROR, "Core with name '" + coreName + "' already exists."); } boolean preExisitingZkEntry = false; try { if (getZkController() != null) { if (!Overseer.isLegacy(getZkController().getZkStateReader())) { if (cd.getCloudDescriptor().getCoreNodeName() == null) { throw new SolrException( ErrorCode.SERVER_ERROR, "non legacy mode coreNodeName missing " + parameters.toString()); } } preExisitingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd); } SolrCore core = create(cd, true); // only write out the descriptor if the core is successfully created coresLocator.create(this, cd); return core; } catch (Exception ex) { if (isZooKeeperAware() && !preExisitingZkEntry) { try { getZkController().unregister(coreName, cd); } catch (InterruptedException e) { Thread.currentThread().interrupt(); SolrException.log(log, null, e); } catch (KeeperException e) { SolrException.log(log, null, e); } } Throwable tc = ex; Throwable c = null; do { tc = tc.getCause(); if (tc != null) { c = tc; } } while (tc != null); String rootMsg = ""; if (c != null) { rootMsg = " Caused by: " + c.getMessage(); } throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "Error CREATEing SolrCore '" + coreName + "': " + ex.getMessage() + rootMsg, ex); } }
/** Load the cores defined for this CoreContainer */ public void load() { log.info("Loading cores into CoreContainer [instanceDir={}]", loader.getInstancePath()); // add the sharedLib to the shared resource loader before initializing cfg based plugins String libDir = cfg.getSharedLibDirectory(); if (libDir != null) { Path libPath = loader.getInstancePath().resolve(libDir); try { loader.addToClassLoader(SolrResourceLoader.getURLs(libPath)); loader.reloadLuceneSPI(); } catch (IOException e) { log.warn("Couldn't add files from {} to classpath: {}", libPath, e.getMessage()); } } shardHandlerFactory = ShardHandlerFactory.newInstance(cfg.getShardHandlerFactoryPluginInfo(), loader); updateShardHandler = new UpdateShardHandler(cfg.getUpdateShardHandlerConfig()); solrCores.allocateLazyCores(cfg.getTransientCacheSize(), loader); logging = LogWatcher.newRegisteredLogWatcher(cfg.getLogWatcherConfig(), loader); hostName = cfg.getNodeName(); zkSys.initZooKeeper(this, solrHome, cfg.getCloudConfig()); if (isZooKeeperAware()) pkiAuthenticationPlugin = new PKIAuthenticationPlugin(this, zkSys.getZkController().getNodeName()); ZkStateReader.ConfigData securityConfig = isZooKeeperAware() ? getZkController().getZkStateReader().getSecurityProps(false) : new ZkStateReader.ConfigData(EMPTY_MAP, -1); initializeAuthorizationPlugin((Map<String, Object>) securityConfig.data.get("authorization")); initializeAuthenticationPlugin((Map<String, Object>) securityConfig.data.get("authentication")); this.backupRepoFactory = new BackupRepositoryFactory(cfg.getBackupRepositoryPlugins()); containerHandlers.put(ZK_PATH, new ZookeeperInfoHandler(this)); securityConfHandler = new SecurityConfHandler(this); collectionsHandler = createHandler(cfg.getCollectionsHandlerClass(), CollectionsHandler.class); containerHandlers.put(COLLECTIONS_HANDLER_PATH, collectionsHandler); infoHandler = createHandler(cfg.getInfoHandlerClass(), InfoHandler.class); containerHandlers.put(INFO_HANDLER_PATH, infoHandler); coreAdminHandler = createHandler(cfg.getCoreAdminHandlerClass(), CoreAdminHandler.class); containerHandlers.put(CORES_HANDLER_PATH, coreAdminHandler); configSetsHandler = createHandler(cfg.getConfigSetsHandlerClass(), ConfigSetsHandler.class); containerHandlers.put(CONFIGSETS_HANDLER_PATH, configSetsHandler); containerHandlers.put(AUTHZ_PATH, securityConfHandler); containerHandlers.put(AUTHC_PATH, securityConfHandler); if (pkiAuthenticationPlugin != null) containerHandlers.put( PKIAuthenticationPlugin.PATH, pkiAuthenticationPlugin.getRequestHandler()); coreConfigService = ConfigSetService.createConfigSetService(cfg, loader, zkSys.zkController); containerProperties.putAll(cfg.getSolrProperties()); // setup executor to load cores in parallel ExecutorService coreLoadExecutor = ExecutorUtil.newMDCAwareFixedThreadPool( cfg.getCoreLoadThreadCount( isZooKeeperAware() ? DEFAULT_CORE_LOAD_THREADS_IN_CLOUD : DEFAULT_CORE_LOAD_THREADS), new DefaultSolrThreadFactory("coreLoadExecutor")); final List<Future<SolrCore>> futures = new ArrayList<>(); try { List<CoreDescriptor> cds = coresLocator.discover(this); if (isZooKeeperAware()) { // sort the cores if it is in SolrCloud. In standalone node the order does not matter CoreSorter coreComparator = new CoreSorter().init(this); cds = new ArrayList<>(cds); // make a copy Collections.sort(cds, coreComparator::compare); } checkForDuplicateCoreNames(cds); for (final CoreDescriptor cd : cds) { if (cd.isTransient() || !cd.isLoadOnStartup()) { solrCores.putDynamicDescriptor(cd.getName(), cd); } else if (asyncSolrCoreLoad) { solrCores.markCoreAsLoading(cd); } if (cd.isLoadOnStartup()) { futures.add( coreLoadExecutor.submit( () -> { SolrCore core; try { if (zkSys.getZkController() != null) { zkSys.getZkController().throwErrorIfReplicaReplaced(cd); } core = create(cd, false); } finally { if (asyncSolrCoreLoad) { solrCores.markCoreAsNotLoading(cd); } } try { zkSys.registerInZk(core, true); } catch (RuntimeException e) { SolrException.log(log, "Error registering SolrCore", e); } return core; })); } } // Start the background thread backgroundCloser = new CloserThread(this, solrCores, cfg); backgroundCloser.start(); } finally { if (asyncSolrCoreLoad && futures != null) { coreContainerWorkExecutor.submit( (Runnable) () -> { try { for (Future<SolrCore> future : futures) { try { future.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } catch (ExecutionException e) { log.error("Error waiting for SolrCore to be created", e); } } } finally { ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor); } }); } else { ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor); } } if (isZooKeeperAware()) { zkSys.getZkController().checkOverseerDesignate(); } }