private void checkDiskSpace() throws NoWritableLedgerDirException, DiskChecker.DiskErrorException { ledgerDirsManager.checkAllDirs(); if (indexDirsManager != ledgerDirsManager) { indexDirsManager.checkAllDirs(); } }
// internal shutdown method to let shutdown bookie gracefully // when encountering exception synchronized int shutdown(int exitCode) { try { if (running) { // avoid shutdown twice // the exitCode only set when first shutdown usually due to exception found this.exitCode = exitCode; // mark bookie as in shutting down progress shuttingdown = true; // Shutdown Sync thread syncThread.shutdown(); // Shutdown disk checker ledgerDirsManager.shutdown(); if (indexDirsManager != ledgerDirsManager) { indexDirsManager.shutdown(); } // Shutdown journal journal.shutdown(); this.join(); // Shutdown the EntryLogger which has the GarbageCollector Thread running ledgerStorage.shutdown(); // close Ledger Manager try { activeLedgerManager.close(); activeLedgerManagerFactory.uninitialize(); } catch (IOException ie) { LOG.error("Failed to close active ledger manager : ", ie); } // Shutdown the ZK client if (zk != null) zk.close(); // Shutdown State Service stateService.shutdown(); // setting running to false here, so watch thread in bookie server know it only after bookie // shut down running = false; } } catch (InterruptedException ie) { LOG.error("Interrupted during shutting down bookie : ", ie); } return this.exitCode; }
@Override public synchronized void start() { setDaemon(true); LOG.info("I'm starting a bookie with journal directory {}", journalDirectory.getName()); // Start DiskChecker thread ledgerDirsManager.start(); if (indexDirsManager != ledgerDirsManager) { indexDirsManager.start(); } // start sync thread first, so during replaying journals, we could do checkpoint // which reduce the chance that we need to replay journals again if bookie restarted // again before finished journal replays. syncThread.start(); // replay journals try { readJournal(); } catch (IOException ioe) { LOG.error("Exception while replaying journals, shutting down", ioe); shutdown(ExitCode.BOOKIE_EXCEPTION); return; } catch (BookieException be) { LOG.error("Exception while replaying journals, shutting down", be); shutdown(ExitCode.BOOKIE_EXCEPTION); return; } // Do a fully flush after journal replay try { syncThread.requestFlush().get(); } catch (InterruptedException e) { LOG.warn("Interrupting the fully flush after replaying journals : ", e); Thread.currentThread().interrupt(); } catch (ExecutionException e) { LOG.error("Error on executing a fully flush after replaying journals."); shutdown(ExitCode.BOOKIE_EXCEPTION); } // start bookie thread super.start(); // After successful bookie startup, register listener for disk // error/full notifications. ledgerDirsManager.addLedgerDirsListener(getLedgerDirsListener()); if (indexDirsManager != ledgerDirsManager) { indexDirsManager.addLedgerDirsListener(getLedgerDirsListener()); } ledgerStorage.start(); // set running here. // since bookie server use running as a flag to tell bookie server whether it is alive // if setting it in bookie thread, the watcher might run before bookie thread. running = true; try { registerBookie(true).get(); } catch (Exception ie) { LOG.error("Couldn't register bookie with zookeeper, shutting down : ", ie); shutdown(ExitCode.ZK_REG_FAIL); } }
/** * Check that the environment for the bookie is correct. This means that the configuration has * stayed the same as the first run and the filesystem structure is up to date. */ private void checkEnvironment(ZooKeeper zk) throws BookieException, IOException { List<File> allLedgerDirs = new ArrayList<File>( ledgerDirsManager.getAllLedgerDirs().size() + indexDirsManager.getAllLedgerDirs().size()); allLedgerDirs.addAll(ledgerDirsManager.getAllLedgerDirs()); if (indexDirsManager != ledgerDirsManager) { allLedgerDirs.addAll(indexDirsManager.getAllLedgerDirs()); } if (zk == null) { // exists only for testing, just make sure directories are correct checkDirectoryStructure(journalDirectory); for (File dir : allLedgerDirs) { checkDirectoryStructure(dir); } return; } try { String instanceId = getInstanceId(zk); boolean newEnv = false; Cookie masterCookie = Cookie.generateCookie(conf); if (null != instanceId) { masterCookie.setInstanceId(instanceId); } try { Cookie zkCookie = Cookie.readFromZooKeeper(zk, conf); masterCookie.verify(zkCookie); } catch (KeeperException.NoNodeException nne) { newEnv = true; } List<File> missedCookieDirs = new ArrayList<File>(); checkDirectoryStructure(journalDirectory); // try to read cookie from journal directory try { Cookie journalCookie = Cookie.readFromDirectory(journalDirectory); journalCookie.verify(masterCookie); } catch (FileNotFoundException fnf) { missedCookieDirs.add(journalDirectory); } for (File dir : allLedgerDirs) { checkDirectoryStructure(dir); try { Cookie c = Cookie.readFromDirectory(dir); c.verify(masterCookie); } catch (FileNotFoundException fnf) { missedCookieDirs.add(dir); } } if (!newEnv && missedCookieDirs.size() > 0) { LOG.error( "Cookie exists in zookeeper, but not in all local directories. " + " Directories missing cookie file are " + missedCookieDirs); throw new BookieException.InvalidCookieException(); } if (newEnv) { if (missedCookieDirs.size() > 0) { LOG.debug("Directories missing cookie file are {}", missedCookieDirs); masterCookie.writeToDirectory(journalDirectory); for (File dir : allLedgerDirs) { masterCookie.writeToDirectory(dir); } } masterCookie.writeToZooKeeper(zk, conf); } } catch (KeeperException ke) { LOG.error("Couldn't access cookie in zookeeper", ke); throw new BookieException.InvalidCookieException(ke); } catch (UnknownHostException uhe) { LOG.error("Couldn't check cookies, networking is broken", uhe); throw new BookieException.InvalidCookieException(uhe); } catch (IOException ioe) { LOG.error("Error accessing cookie on disks", ioe); throw new BookieException.InvalidCookieException(ioe); } catch (InterruptedException ie) { LOG.error("Thread interrupted while checking cookies, exiting", ie); throw new BookieException.InvalidCookieException(ie); } }