private void checkDiskSpace() throws NoWritableLedgerDirException, DiskChecker.DiskErrorException { ledgerDirsManager.checkAllDirs(); if (indexDirsManager != ledgerDirsManager) { indexDirsManager.checkAllDirs(); } }
public IndexPersistenceMgr( int pageSize, int entriesPerPage, ServerConfiguration conf, SnapshotMap<Long, Boolean> activeLedgers, LedgerDirsManager ledgerDirsManager, StatsLogger statsLogger) throws IOException { this.openFileLimit = conf.getOpenFileLimit(); this.activeLedgers = activeLedgers; this.ledgerDirsManager = ledgerDirsManager; this.pageSize = pageSize; this.entriesPerPage = entriesPerPage; LOG.info("openFileLimit = {}", openFileLimit); // Retrieve all of the active ledgers. getActiveLedgers(); ledgerDirsManager.addLedgerDirsListener(getLedgerDirsListener()); // Expose Stats evictedLedgersCounter = statsLogger.getCounter(LEDGER_CACHE_NUM_EVICTED_LEDGERS); statsLogger.registerGauge( NUM_OPEN_LEDGERS, new Gauge<Integer>() { @Override public Integer getDefaultValue() { return 0; } @Override public Integer getSample() { return getNumOpenLedgers(); } }); }
private void relocateIndexFileAndFlushHeader(long ledger, FileInfo fi) throws IOException { File currentDir = getLedgerDirForLedger(fi); if (ledgerDirsManager.isDirFull(currentDir)) { moveLedgerIndexFile(ledger, fi); } fi.flushHeader(); }
// internal shutdown method to let shutdown bookie gracefully // when encountering exception synchronized int shutdown(int exitCode) { try { if (running) { // avoid shutdown twice // the exitCode only set when first shutdown usually due to exception found this.exitCode = exitCode; // mark bookie as in shutting down progress shuttingdown = true; // Shutdown Sync thread syncThread.shutdown(); // Shutdown disk checker ledgerDirsManager.shutdown(); if (indexDirsManager != ledgerDirsManager) { indexDirsManager.shutdown(); } // Shutdown journal journal.shutdown(); this.join(); // Shutdown the EntryLogger which has the GarbageCollector Thread running ledgerStorage.shutdown(); // close Ledger Manager try { activeLedgerManager.close(); activeLedgerManagerFactory.uninitialize(); } catch (IOException ie) { LOG.error("Failed to close active ledger manager : ", ie); } // Shutdown the ZK client if (zk != null) zk.close(); // Shutdown State Service stateService.shutdown(); // setting running to false here, so watch thread in bookie server know it only after bookie // shut down running = false; } } catch (InterruptedException ie) { LOG.error("Interrupted during shutting down bookie : ", ie); } return this.exitCode; }
private File findIndexFile(long ledgerId) throws IOException { String ledgerName = getLedgerName(ledgerId); for (File d : ledgerDirsManager.getAllLedgerDirs()) { File lf = new File(d, ledgerName); if (lf.exists()) { return lf; } } return null; }
/** * This method will look within the ledger directories for the ledger index files. That will * comprise the set of active ledgers this particular BookieServer knows about that have not yet * been deleted by the BookKeeper Client. This is called only once during initialization. */ private void getActiveLedgers() throws IOException { // Ledger index files are stored in a file hierarchy with a parent and // grandParent directory. We'll have to go two levels deep into these // directories to find the index files. for (File ledgerDirectory : ledgerDirsManager.getAllLedgerDirs()) { File[] grandParents = ledgerDirectory.listFiles(); if (grandParents == null) { continue; } for (File grandParent : grandParents) { if (grandParent.isDirectory()) { File[] parents = grandParent.listFiles(); if (parents == null) { continue; } for (File parent : parents) { if (parent.isDirectory()) { File[] indexFiles = parent.listFiles(); if (indexFiles == null) { continue; } for (File index : indexFiles) { if (!index.isFile() || (!index.getName().endsWith(IDX) && !index.getName().endsWith(RLOC))) { continue; } // We've found a ledger index file. The file // name is the HexString representation of the // ledgerId. String ledgerIdInHex = index.getName().replace(RLOC, "").replace(IDX, ""); if (index.getName().endsWith(RLOC)) { if (findIndexFile(Long.parseLong(ledgerIdInHex)) != null) { if (!index.delete()) { LOG.warn("Deleting the rloc file " + index + " failed"); } continue; } else { File dest = new File(index.getParentFile(), ledgerIdInHex + IDX); if (!index.renameTo(dest)) { throw new IOException( "Renaming rloc file " + index + " to index file has failed"); } } } activeLedgers.put(Long.parseLong(ledgerIdInHex, 16), true); } } } } } } }
/** * Get a new index file for ledger excluding directory <code>excludedDir</code>. * * @param ledger Ledger id. * @param excludedDir The ledger directory to exclude. * @return new index file object. * @throws NoWritableLedgerDirException if there is no writable dir available. */ private File getNewLedgerIndexFile(Long ledger, File excludedDir) throws NoWritableLedgerDirException { File dir = ledgerDirsManager.pickRandomWritableDir(excludedDir); String ledgerName = getLedgerName(ledger); return new File(dir, ledgerName); }
@Override public synchronized void start() { setDaemon(true); LOG.info("I'm starting a bookie with journal directory {}", journalDirectory.getName()); // Start DiskChecker thread ledgerDirsManager.start(); if (indexDirsManager != ledgerDirsManager) { indexDirsManager.start(); } // start sync thread first, so during replaying journals, we could do checkpoint // which reduce the chance that we need to replay journals again if bookie restarted // again before finished journal replays. syncThread.start(); // replay journals try { readJournal(); } catch (IOException ioe) { LOG.error("Exception while replaying journals, shutting down", ioe); shutdown(ExitCode.BOOKIE_EXCEPTION); return; } catch (BookieException be) { LOG.error("Exception while replaying journals, shutting down", be); shutdown(ExitCode.BOOKIE_EXCEPTION); return; } // Do a fully flush after journal replay try { syncThread.requestFlush().get(); } catch (InterruptedException e) { LOG.warn("Interrupting the fully flush after replaying journals : ", e); Thread.currentThread().interrupt(); } catch (ExecutionException e) { LOG.error("Error on executing a fully flush after replaying journals."); shutdown(ExitCode.BOOKIE_EXCEPTION); } // start bookie thread super.start(); // After successful bookie startup, register listener for disk // error/full notifications. ledgerDirsManager.addLedgerDirsListener(getLedgerDirsListener()); if (indexDirsManager != ledgerDirsManager) { indexDirsManager.addLedgerDirsListener(getLedgerDirsListener()); } ledgerStorage.start(); // set running here. // since bookie server use running as a flag to tell bookie server whether it is alive // if setting it in bookie thread, the watcher might run before bookie thread. running = true; try { registerBookie(true).get(); } catch (Exception ie) { LOG.error("Couldn't register bookie with zookeeper, shutting down : ", ie); shutdown(ExitCode.ZK_REG_FAIL); } }
/** * Check that the environment for the bookie is correct. This means that the configuration has * stayed the same as the first run and the filesystem structure is up to date. */ private void checkEnvironment(ZooKeeper zk) throws BookieException, IOException { List<File> allLedgerDirs = new ArrayList<File>( ledgerDirsManager.getAllLedgerDirs().size() + indexDirsManager.getAllLedgerDirs().size()); allLedgerDirs.addAll(ledgerDirsManager.getAllLedgerDirs()); if (indexDirsManager != ledgerDirsManager) { allLedgerDirs.addAll(indexDirsManager.getAllLedgerDirs()); } if (zk == null) { // exists only for testing, just make sure directories are correct checkDirectoryStructure(journalDirectory); for (File dir : allLedgerDirs) { checkDirectoryStructure(dir); } return; } try { String instanceId = getInstanceId(zk); boolean newEnv = false; Cookie masterCookie = Cookie.generateCookie(conf); if (null != instanceId) { masterCookie.setInstanceId(instanceId); } try { Cookie zkCookie = Cookie.readFromZooKeeper(zk, conf); masterCookie.verify(zkCookie); } catch (KeeperException.NoNodeException nne) { newEnv = true; } List<File> missedCookieDirs = new ArrayList<File>(); checkDirectoryStructure(journalDirectory); // try to read cookie from journal directory try { Cookie journalCookie = Cookie.readFromDirectory(journalDirectory); journalCookie.verify(masterCookie); } catch (FileNotFoundException fnf) { missedCookieDirs.add(journalDirectory); } for (File dir : allLedgerDirs) { checkDirectoryStructure(dir); try { Cookie c = Cookie.readFromDirectory(dir); c.verify(masterCookie); } catch (FileNotFoundException fnf) { missedCookieDirs.add(dir); } } if (!newEnv && missedCookieDirs.size() > 0) { LOG.error( "Cookie exists in zookeeper, but not in all local directories. " + " Directories missing cookie file are " + missedCookieDirs); throw new BookieException.InvalidCookieException(); } if (newEnv) { if (missedCookieDirs.size() > 0) { LOG.debug("Directories missing cookie file are {}", missedCookieDirs); masterCookie.writeToDirectory(journalDirectory); for (File dir : allLedgerDirs) { masterCookie.writeToDirectory(dir); } } masterCookie.writeToZooKeeper(zk, conf); } } catch (KeeperException ke) { LOG.error("Couldn't access cookie in zookeeper", ke); throw new BookieException.InvalidCookieException(ke); } catch (UnknownHostException uhe) { LOG.error("Couldn't check cookies, networking is broken", uhe); throw new BookieException.InvalidCookieException(uhe); } catch (IOException ioe) { LOG.error("Error accessing cookie on disks", ioe); throw new BookieException.InvalidCookieException(ioe); } catch (InterruptedException ie) { LOG.error("Thread interrupted while checking cookies, exiting", ie); throw new BookieException.InvalidCookieException(ie); } }