@Test public void testFsUriSetProperly() throws Exception { HMaster master = UTIL.getMiniHBaseCluster().getMaster(); MasterFileSystem fs = master.getMasterFileSystem(); Path masterRoot = FSUtils.getRootDir(fs.conf); Path rootDir = FSUtils.getRootDir(fs.getFileSystem().getConf()); // make sure the fs and the found root dir have the same scheme LOG.debug("from fs uri:" + FileSystem.getDefaultUri(fs.getFileSystem().getConf())); LOG.debug("from configuration uri:" + FileSystem.getDefaultUri(fs.conf)); // make sure the set uri matches by forcing it. assertEquals(masterRoot, rootDir); }
@Test public void testRemoveStaleRecoveringRegionsDuringMasterInitialization() throws Exception { // this test is for when distributed log replay is enabled if (!UTIL.getConfiguration().getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false)) return; LOG.info("Starting testRemoveStaleRecoveringRegionsDuringMasterInitialization"); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); MasterFileSystem fs = master.getMasterFileSystem(); String failedRegion = "failedRegoin1"; String staleRegion = "staleRegion"; ServerName inRecoveryServerName = ServerName.valueOf("mgr,1,1"); ServerName previouselyFaildServerName = ServerName.valueOf("previous,1,1"); String walPath = "/hbase/data/.logs/" + inRecoveryServerName.getServerName() + "-splitting/test"; // Create a ZKW to use in the test ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL); zkw.getRecoverableZooKeeper() .create( ZKSplitLog.getEncodedNodeName(zkw, walPath), new SplitLogTask.Owned(inRecoveryServerName, fs.getLogRecoveryMode()).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); String staleRegionPath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, staleRegion); ZKUtil.createWithParents(zkw, staleRegionPath); String inRecoveringRegionPath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, failedRegion); inRecoveringRegionPath = ZKUtil.joinZNode(inRecoveringRegionPath, inRecoveryServerName.getServerName()); ZKUtil.createWithParents(zkw, inRecoveringRegionPath); Set<ServerName> servers = new HashSet<ServerName>(); servers.add(previouselyFaildServerName); fs.removeStaleRecoveringRegionsFromZK(servers); // verification assertFalse(ZKUtil.checkExists(zkw, staleRegionPath) != -1); assertTrue(ZKUtil.checkExists(zkw, inRecoveringRegionPath) != -1); ZKUtil.deleteChildrenRecursively(zkw, zkw.recoveringRegionsZNode); ZKUtil.deleteChildrenRecursively(zkw, zkw.splitLogZNode); zkw.close(); }
/** Removes the table from hbase:meta and archives the HDFS files. */ protected void removeTableData(final List<HRegionInfo> regions) throws IOException, CoordinatedStateException { try { // 1. Remove regions from META LOG.debug("Deleting regions from META"); MetaTableAccessor.deleteRegions(this.server.getConnection(), regions); // ----------------------------------------------------------------------- // NOTE: At this point we still have data on disk, but nothing in hbase:meta // if the rename below fails, hbck will report an inconsistency. // ----------------------------------------------------------------------- // 2. Move the table in /hbase/.tmp MasterFileSystem mfs = this.masterServices.getMasterFileSystem(); Path tempTableDir = mfs.moveTableToTemp(tableName); // 3. Archive regions from FS (temp directory) FileSystem fs = mfs.getFileSystem(); for (HRegionInfo hri : regions) { LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS"); HFileArchiver.archiveRegion( fs, mfs.getRootDir(), tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName())); } // 4. Delete table directory from FS (temp directory) if (!fs.delete(tempTableDir, true)) { LOG.error("Couldn't delete " + tempTableDir); } LOG.debug("Table '" + tableName + "' archived!"); } finally { cleanupTableState(); } }
/** * Called at startup, to verify if snapshot operation is supported, and to avoid starting the * master if there're snapshots present but the cleaners needed are missing. Otherwise we can end * up with snapshot data loss. * * @param conf The {@link Configuration} object to use * @param mfs The MasterFileSystem to use * @throws IOException in case of file-system operation failure * @throws UnsupportedOperationException in case cleaners are missing and there're snapshot in the * system */ private void checkSnapshotSupport(final Configuration conf, final MasterFileSystem mfs) throws IOException, UnsupportedOperationException { // Verify if snapshot is disabled by the user String enabled = conf.get(HBASE_SNAPSHOT_ENABLED); boolean snapshotEnabled = conf.getBoolean(HBASE_SNAPSHOT_ENABLED, false); boolean userDisabled = (enabled != null && enabled.trim().length() > 0 && !snapshotEnabled); // Extract cleaners from conf Set<String> hfileCleaners = new HashSet<String>(); String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS); if (cleaners != null) Collections.addAll(hfileCleaners, cleaners); Set<String> logCleaners = new HashSet<String>(); cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS); if (cleaners != null) Collections.addAll(logCleaners, cleaners); // check if an older version of snapshot directory was present Path oldSnapshotDir = new Path(mfs.getRootDir(), HConstants.OLD_SNAPSHOT_DIR_NAME); FileSystem fs = mfs.getFileSystem(); List<SnapshotDescription> ss = getCompletedSnapshots(new Path(rootDir, oldSnapshotDir)); if (ss != null && !ss.isEmpty()) { LOG.error("Snapshots from an earlier release were found under: " + oldSnapshotDir); LOG.error("Please rename the directory as " + HConstants.SNAPSHOT_DIR_NAME); } // If the user has enabled the snapshot, we force the cleaners to be present // otherwise we still need to check if cleaners are enabled or not and verify // that there're no snapshot in the .snapshot folder. if (snapshotEnabled) { // Inject snapshot cleaners, if snapshot.enable is true hfileCleaners.add(SnapshotHFileCleaner.class.getName()); hfileCleaners.add(HFileLinkCleaner.class.getName()); // Set cleaners conf conf.setStrings( HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, hfileCleaners.toArray(new String[hfileCleaners.size()])); conf.setStrings( HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, logCleaners.toArray(new String[logCleaners.size()])); } else { // Verify if cleaners are present snapshotEnabled = hfileCleaners.contains(SnapshotHFileCleaner.class.getName()) && hfileCleaners.contains(HFileLinkCleaner.class.getName()); // Warn if the cleaners are enabled but the snapshot.enabled property is false/not set. if (snapshotEnabled) { LOG.warn( "Snapshot log and hfile cleaners are present in the configuration, " + "but the '" + HBASE_SNAPSHOT_ENABLED + "' property " + (userDisabled ? "is set to 'false'." : "is not set.")); } } // Mark snapshot feature as enabled if cleaners are present and user has not disabled it. this.isSnapshotSupported = snapshotEnabled && !userDisabled; // If cleaners are not enabled, verify that there're no snapshot in the .snapshot folder // otherwise we end up with snapshot data loss. if (!snapshotEnabled) { LOG.info("Snapshot feature is not enabled, missing log and hfile cleaners."); Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(mfs.getRootDir()); if (fs.exists(snapshotDir)) { FileStatus[] snapshots = FSUtils.listStatus( fs, snapshotDir, new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); if (snapshots != null) { LOG.error("Snapshots are present, but cleaners are not enabled."); checkSnapshotSupport(); } } } }
/** * Create the namespace directory * * @param env MasterProcedureEnv * @param nsDescriptor NamespaceDescriptor * @throws IOException */ protected static void createDirectory( final MasterProcedureEnv env, final NamespaceDescriptor nsDescriptor) throws IOException { MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); mfs.getFileSystem().mkdirs(FSUtils.getNamespaceDir(mfs.getRootDir(), nsDescriptor.getName())); }