public static void baseSetup() throws Exception {
    MiniDFSShim dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null);
    fs = dfs.getFileSystem();
    baseDfsDir = new Path(new Path(fs.getUri()), "/base");
    fs.mkdirs(baseDfsDir);
    warehouseDir = new Path(baseDfsDir, "warehouse");
    fs.mkdirs(warehouseDir);
    conf.setVar(ConfVars.METASTOREWAREHOUSE, warehouseDir.toString());

    // Assuming the tests are run either in C or D drive in Windows OS!
    dataFileDir =
        conf.get("test.data.files")
            .replace('\\', '/')
            .replace("c:", "")
            .replace("C:", "")
            .replace("D:", "")
            .replace("d:", "");
    dataFilePath = new Path(dataFileDir, "kv1.txt");

    // Set up scratch directory
    Path scratchDir = new Path(baseDfsDir, "scratchdir");
    conf.setVar(HiveConf.ConfVars.SCRATCHDIR, scratchDir.toString());

    // set hive conf vars
    conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
    conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true);
    conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
    int port = MetaStoreUtils.findFreePort();
    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());

    SessionState.start(new CliSessionState(conf));
    driver = new Driver(conf);
    setupDataTable();
  }
  @BeforeClass
  public static void clusterSetupAtBegining()
      throws IOException, LoginException, URISyntaxException {
    SupportsBlocks = true;
    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

    cluster =
        new MiniDFSCluster.Builder(CONF)
            .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
            .numDataNodes(2)
            .build();
    cluster.waitClusterUp();

    fHdfs = cluster.getFileSystem(0);
    fHdfs2 = cluster.getFileSystem(1);
    fHdfs
        .getConf()
        .set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());
    fHdfs2
        .getConf()
        .set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString());

    defaultWorkingDirectory =
        fHdfs.makeQualified(
            new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
    defaultWorkingDirectory2 =
        fHdfs2.makeQualified(
            new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));

    fHdfs.mkdirs(defaultWorkingDirectory);
    fHdfs2.mkdirs(defaultWorkingDirectory2);
  }
  @org.junit.Test
  public void testPermissions() throws Exception {

    Path myFile = new Path("filePerm.txt");
    fs.create(myFile);
    short perm = 0777;
    fs.setPermission(myFile, new FsPermission(perm));
    assertEquals(fs.getFileStatus(myFile).getPermission().toShort(), perm);

    perm = 0700;
    fs.setPermission(myFile, new FsPermission(perm));
    assertEquals(fs.getFileStatus(myFile).getPermission().toShort(), perm);

    fs.delete(myFile);
    assertFalse(fs.exists(myFile));

    /* directory permissions */
    Path directory = new Path("aa/bb/cc");
    perm = 0700;
    fs.mkdirs(directory, new FsPermission(perm));
    assertEquals(fs.getFileStatus(directory).getPermission().toShort(), perm);
    fs.delete(new Path("aa"), true);
    assertFalse(fs.exists(directory));

    perm = 0777;
    fs.mkdirs(directory, new FsPermission(perm));
    assertEquals(fs.getFileStatus(directory).getPermission().toShort(), perm);
    fs.delete(new Path("aa"), true);
    assertFalse(fs.exists(directory));
  }
  private void testMovingFiles(boolean useAcl) throws Exception {
    // Create a tmp directory with wide-open permissions and sticky bit
    Path tmpPath = new Path("/tmp");
    Path tmpPath2 = new Path("/tmp2");
    hdfs.mkdirs(tmpPath);
    hdfs.mkdirs(tmpPath2);
    hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
    if (useAcl) {
      applyAcl(tmpPath);
    }
    hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
    if (useAcl) {
      applyAcl(tmpPath2);
    }

    // Write a file to the new tmp directory as a regular user
    Path file = new Path(tmpPath, "foo");

    writeFile(hdfsAsUser1, file);

    // Log onto cluster as another user and attempt to move the file
    try {
      hdfsAsUser2.rename(file, new Path(tmpPath2, "renamed"));
      fail("Shouldn't be able to rename someone else's file with SB on");
    } catch (IOException ioe) {
      assertTrue(ioe instanceof AccessControlException);
      assertTrue(ioe.getMessage().contains("sticky bit"));
    }
  }
Exemplo n.º 5
0
  @Override
  public void start(CoprocessorEnvironment env) {
    this.env = (RegionCoprocessorEnvironment) env;
    random = new SecureRandom();
    conf = env.getConfiguration();
    baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf);
    this.userProvider = UserProvider.instantiate(conf);

    try {
      fs = FileSystem.get(conf);
      fs.mkdirs(baseStagingDir, PERM_HIDDEN);
      fs.setPermission(baseStagingDir, PERM_HIDDEN);
      // no sticky bit in hadoop-1.0, making directory nonempty so it never gets erased
      fs.mkdirs(new Path(baseStagingDir, "DONOTERASE"), PERM_HIDDEN);
      FileStatus status = fs.getFileStatus(baseStagingDir);
      if (status == null) {
        throw new IllegalStateException("Failed to create staging directory");
      }
      if (!status.getPermission().equals(PERM_HIDDEN)) {
        throw new IllegalStateException(
            "Directory already exists but permissions aren't set to '-rwx--x--x' ");
      }
    } catch (IOException e) {
      throw new IllegalStateException("Failed to get FileSystem instance", e);
    }
  }
Exemplo n.º 6
0
  @Test
  public void errorRenameTest() throws Exception {
    // Rename /dirA to /dirA/dirB should fail
    {
      Path dirA = new Path("/dirA");
      Path finalDst = new Path("/dirA/dirB");

      sTFS.mkdirs(dirA);

      Assert.assertFalse(sTFS.rename(dirA, finalDst));

      Assert.assertFalse(sTFS.exists(finalDst));
      Assert.assertTrue(sTFS.exists(dirA));
      cleanup(sTFS);
    }
    // Rename /fileA to /fileB should fail if /fileB exists
    {
      Path fileA = new Path("/fileA");
      Path fileB = new Path("/fileB");

      create(sTFS, fileA);
      create(sTFS, fileB);

      Assert.assertFalse(sTFS.rename(fileA, fileB));

      Assert.assertTrue(sTFS.exists(fileA));
      Assert.assertTrue(sTFS.exists(fileB));
      cleanup(sTFS);
    }
    // Rename /fileA to /dirA/fileA should fail if /dirA/fileA exists
    {
      Path fileA = new Path("/fileA");
      Path dirA = new Path("/dirA");
      Path finalDst = new Path("/dirA/fileA");

      create(sTFS, fileA);
      create(sTFS, finalDst);
      sTFS.mkdirs(dirA);

      Assert.assertFalse(sTFS.rename(fileA, dirA));

      Assert.assertTrue(sTFS.exists(fileA));
      Assert.assertTrue(sTFS.exists(dirA));
      Assert.assertTrue(sTFS.exists(finalDst));
      cleanup(sTFS);
    }
    // Rename /fileA to an nonexistent path should fail
    {
      Path fileA = new Path("/fileA");
      Path nonexistentPath = new Path("/doesNotExist/fileA");

      create(sTFS, fileA);

      Assert.assertFalse(sTFS.rename(fileA, nonexistentPath));

      Assert.assertTrue(sTFS.exists(fileA));
      cleanup(sTFS);
    }
  }
 @Override
 protected synchronized void startInternal() throws Exception {
   // create filesystem only now, as part of service-start. By this time, RM is
   // authenticated with kerberos so we are good to create a file-system
   // handle.
   fs = fsWorkingPath.getFileSystem(getConfig());
   fs.mkdirs(rmDTSecretManagerRoot);
   fs.mkdirs(rmAppRoot);
 }
Exemplo n.º 8
0
  @Override
  public void visit(State state, Properties props) throws Exception {
    Connector conn = state.getConnector();

    Random rand = (Random) state.get("rand");

    @SuppressWarnings("unchecked")
    List<String> tableNames = (List<String>) state.get("tables");

    String tableName = tableNames.get(rand.nextInt(tableNames.size()));

    Configuration conf = CachedConfiguration.getInstance();
    FileSystem fs = FileSystem.get(conf);

    String bulkDir = "/tmp/concurrent_bulk/b_" + String.format("%016x", Math.abs(rand.nextLong()));

    fs.mkdirs(new Path(bulkDir));
    fs.mkdirs(new Path(bulkDir + "_f"));

    try {
      BatchWriter bw = new RFileBatchWriter(conf, fs, bulkDir + "/file01.rf");
      try {
        TreeSet<Long> rows = new TreeSet<Long>();
        int numRows = rand.nextInt(100000);
        for (int i = 0; i < numRows; i++) {
          rows.add(Math.abs(rand.nextLong()));
        }

        for (Long row : rows) {
          Mutation m = new Mutation(String.format("%016x", row));
          long val = Math.abs(rand.nextLong());
          for (int j = 0; j < 10; j++) {
            m.put("cf", "cq" + j, new Value(String.format("%016x", val).getBytes()));
          }

          bw.addMutation(m);
        }
      } finally {
        bw.close();
      }

      conn.tableOperations()
          .importDirectory(tableName, bulkDir, bulkDir + "_f", rand.nextBoolean());

      log.debug("BulkImported to " + tableName);
    } catch (TableNotFoundException e) {
      log.debug("BulkImport " + tableName + " failed, doesnt exist");
    } catch (TableOfflineException toe) {
      log.debug("BulkImport " + tableName + " failed, offline");
    } finally {
      fs.delete(new Path(bulkDir), true);
      fs.delete(new Path(bulkDir + "_f"), true);
    }
  }
  @Test
  public void testGeneralSBBehavior() throws Exception {
    Path baseDir = new Path("/mcgann");
    hdfs.mkdirs(baseDir);

    // Create a tmp directory with wide-open permissions and sticky bit
    Path p = new Path(baseDir, "tmp");

    hdfs.mkdirs(p);
    hdfs.setPermission(p, new FsPermission((short) 01777));

    confirmCanAppend(conf, p);

    baseDir = new Path("/eccleston");
    hdfs.mkdirs(baseDir);
    p = new Path(baseDir, "roguetraders");

    hdfs.mkdirs(p);
    confirmSettingAndGetting(hdfs, p, baseDir);

    baseDir = new Path("/tennant");
    hdfs.mkdirs(baseDir);
    p = new Path(baseDir, "contemporary");
    hdfs.mkdirs(p);
    hdfs.setPermission(p, new FsPermission((short) 01777));
    confirmDeletingFiles(conf, p);

    baseDir = new Path("/smith");
    hdfs.mkdirs(baseDir);
    p = new Path(baseDir, "scissorsisters");

    // Turn on its sticky bit
    hdfs.mkdirs(p, new FsPermission((short) 01666));
    confirmStickyBitDoesntPropagate(hdfs, baseDir);
  }
Exemplo n.º 10
0
  public void createDirectories(
      Configuration conf, FileSystem fs, String inputDir, String failedDir) throws Exception {

    // System.out.println(fs.getHomeDirectory());
    Path input = new Path(inputDir);
    Path failput = new Path(failedDir);
    fs.mkdirs(input);
    fs.mkdirs(failput);

    // Tweek permissions if needed
    // fs.setPermission(input, FsPermission.createImmutable((short) 0777));
    // fs.setOwner(input, "accumulo", "supergroup");
    // fs.setOwner(output, "accumulo", "supergroup");
  }
Exemplo n.º 11
0
 public void setUp() throws Exception {
   readMessages = new ArrayList<Message>();
   fs = rootDir.getFileSystem(conf);
   defaultConf = new JobConf(new Configuration());
   dataFiles = new String[] {TestUtil.files[0], TestUtil.files[1]};
   setUpCluster();
   fs.delete(new Path(cluster.getRootDir()), true);
   Path streamDir = new Path(cluster.getDataDir(), testStream);
   fs.delete(streamDir, true);
   fs.mkdirs(streamDir);
   collectorDir = new Path(streamDir, collectors);
   fs.delete(collectorDir, true);
   fs.mkdirs(collectorDir);
   TestUtil.setUpFiles(cluster, collectors, dataFiles, null, null, 1, 0);
 }
Exemplo n.º 12
0
  /**
   * Assert that getSplitEditFilesSorted returns files in expected order and that it skips
   * moved-aside files.
   *
   * @throws IOException
   */
  @Test
  public void testGetSplitEditFilesSorted() throws IOException {
    FileSystem fs = FileSystem.get(util.getConfiguration());
    Path regiondir = util.getDataTestDir("regiondir");
    fs.delete(regiondir, true);
    fs.mkdirs(regiondir);
    Path recoverededits = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
    String first = WALSplitter.formatRecoveredEditsFileName(-1);
    createFile(fs, recoverededits, first);
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(0));
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(1));
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(11));
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(2));
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(50));
    String last = WALSplitter.formatRecoveredEditsFileName(Long.MAX_VALUE);
    createFile(fs, recoverededits, last);
    createFile(
        fs, recoverededits, Long.toString(Long.MAX_VALUE) + "." + System.currentTimeMillis());

    final Configuration walConf = new Configuration(util.getConfiguration());
    FSUtils.setRootDir(walConf, regiondir);
    (new WALFactory(walConf, null, "dummyLogName")).getWAL(new byte[] {}, null);

    NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(fs, regiondir);
    assertEquals(7, files.size());
    assertEquals(files.pollFirst().getName(), first);
    assertEquals(files.pollLast().getName(), last);
    assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(0));
    assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(1));
    assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(2));
    assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(11));
  }
Exemplo n.º 13
0
 @Test
 public void testVanishingTaskZNode() throws Exception {
   LOG.info("testVanishingTaskZNode");
   conf.setInt("hbase.splitlog.manager.unassigned.timeout", 0);
   slm = new SplitLogManager(zkw, conf, stopper, "dummy-master", null);
   slm.finishInitialization();
   FileSystem fs = TEST_UTIL.getTestFileSystem();
   final Path logDir = new Path(fs.getWorkingDirectory(), UUID.randomUUID().toString());
   fs.mkdirs(logDir);
   Path logFile = new Path(logDir, UUID.randomUUID().toString());
   fs.createNewFile(logFile);
   new Thread() {
     public void run() {
       try {
         // this call will block because there are no SplitLogWorkers
         slm.splitLogDistributed(logDir);
       } catch (Exception e) {
         LOG.warn("splitLogDistributed failed", e);
         fail();
       }
     }
   }.start();
   waitForCounter(tot_mgr_node_create_result, 0, 1, 10000);
   String znode = ZKSplitLog.getEncodedNodeName(zkw, logFile.toString());
   // remove the task znode
   ZKUtil.deleteNode(zkw, znode);
   waitForCounter(tot_mgr_get_data_nonode, 0, 1, 30000);
   waitForCounter(tot_mgr_log_split_batch_success, 0, 1, 1000);
   assertTrue(fs.exists(logFile));
   fs.delete(logDir, true);
 }
Exemplo n.º 14
0
  public void testLease() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
    try {
      FileSystem fs = cluster.getFileSystem();
      assertTrue(fs.mkdirs(dir));

      Path a = new Path(dir, "a");
      Path b = new Path(dir, "b");

      DataOutputStream a_out = fs.create(a);
      a_out.writeBytes("something");

      assertTrue(hasLease(cluster, a));
      assertTrue(!hasLease(cluster, b));

      DataOutputStream b_out = fs.create(b);
      b_out.writeBytes("something");

      assertTrue(hasLease(cluster, a));
      assertTrue(hasLease(cluster, b));

      a_out.close();
      b_out.close();

      assertTrue(!hasLease(cluster, a));
      assertTrue(!hasLease(cluster, b));

      fs.delete(dir, true);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
 private void touch(FileSystem fs, String path, boolean generateFiles) throws Exception {
   if (generateFiles) {
     fs.create(new Path(path)).close();
   } else {
     fs.mkdirs(new Path(path));
   }
 }
  /**
   * Generate random data, compress it, index and md5 hash the data. Then read it all back and md5
   * that too, to verify that it all went ok.
   *
   * @param testWithIndex Should we index or not?
   * @param charsToOutput How many characters of random data should we output.
   * @throws IOException
   * @throws NoSuchAlgorithmException
   * @throws InterruptedException
   */
  private void runTest(boolean testWithIndex, int charsToOutput)
      throws IOException, NoSuchAlgorithmException, InterruptedException {

    Configuration conf = new Configuration();
    conf.setLong("fs.local.block.size", charsToOutput / 2);
    // reducing block size to force a split of the tiny file
    conf.set("io.compression.codecs", LzopCodec.class.getName());

    Assume.assumeTrue(CoreTestUtil.okToRunLzoTests(conf));

    FileSystem.getLocal(conf).close(); // remove cached filesystem (if any)
    FileSystem localFs = FileSystem.getLocal(conf);
    localFs.delete(outputDir_, true);
    localFs.mkdirs(outputDir_);

    Job job = new Job(conf);
    TextOutputFormat.setCompressOutput(job, true);
    TextOutputFormat.setOutputCompressorClass(job, LzopCodec.class);
    TextOutputFormat.setOutputPath(job, outputDir_);

    TaskAttemptContext attemptContext =
        new TaskAttemptContext(job.getConfiguration(), new TaskAttemptID("123", 0, false, 1, 2));

    // create some input data
    byte[] expectedMd5 = createTestInput(outputDir_, localFs, attemptContext, charsToOutput);

    if (testWithIndex) {
      Path lzoFile = new Path(outputDir_, lzoFileName_);
      LzoIndex.createIndex(localFs, lzoFile);
    }

    LzoTextInputFormat inputFormat = new LzoTextInputFormat();
    TextInputFormat.setInputPaths(job, outputDir_);

    List<InputSplit> is = inputFormat.getSplits(job);
    // verify we have the right number of lzo chunks
    if (testWithIndex && OUTPUT_BIG == charsToOutput) {
      assertEquals(3, is.size());
    } else {
      assertEquals(1, is.size());
    }

    // let's read it all and calculate the md5 hash
    for (InputSplit inputSplit : is) {
      RecordReader<LongWritable, Text> rr =
          inputFormat.createRecordReader(inputSplit, attemptContext);
      rr.initialize(inputSplit, attemptContext);

      while (rr.nextKeyValue()) {
        Text value = rr.getCurrentValue();

        md5_.update(value.getBytes(), 0, value.getLength());
      }

      rr.close();
    }

    localFs.close();
    assertTrue(Arrays.equals(expectedMd5, md5_.digest()));
  }
Exemplo n.º 17
0
 public static boolean mkdir(final String outPathName) throws IOException {
   Configuration conf = new Configuration();
   Path path = new Path(outPathName);
   FileSystem fs = path.getFileSystem(conf);
   fs.mkdirs(path);
   return false;
 }
 @Before
 public void setup() throws IOException {
   configuration = new YarnConfiguration();
   configuration.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName());
   fs = FileSystem.get(configuration);
   workingPath =
       new Path(
           new File("target", this.getClass().getSimpleName() + "-remoteDir").getAbsolutePath());
   configuration.set(YarnConfiguration.FS_BASED_RM_CONF_STORE, workingPath.toString());
   tmpDir =
       new Path(new File("target", this.getClass().getSimpleName() + "-tmpDir").getAbsolutePath());
   fs.delete(workingPath, true);
   fs.delete(tmpDir, true);
   fs.mkdirs(workingPath);
   fs.mkdirs(tmpDir);
 }
Exemplo n.º 19
0
 /*
  * Write some entries in the log file.
  * 7 different tables with name "testtb-%d"
  * 10 region per table with name "tableName-region-%d"
  * 50 entry with row key "row-%d"
  */
 private void writeTestLog(final Path logFile) throws IOException {
   fs.mkdirs(logFile.getParent());
   HLog.Writer writer = HLog.createWriter(fs, logFile, conf);
   try {
     for (int i = 0; i < 7; ++i) {
       byte[] tableName = getTableName(i);
       for (int j = 0; j < 10; ++j) {
         byte[] regionName = getRegionName(tableName, j);
         for (int k = 0; k < 50; ++k) {
           byte[] rowkey = Bytes.toBytes("row-" + k);
           HLogKey key =
               new HLogKey(
                   regionName,
                   tableName,
                   (long) k,
                   System.currentTimeMillis(),
                   HConstants.DEFAULT_CLUSTER_ID);
           WALEdit edit = new WALEdit();
           edit.add(new KeyValue(rowkey, TEST_FAMILY, TEST_QUALIFIER, rowkey));
           writer.append(new HLog.Entry(key, edit));
         }
       }
     }
   } finally {
     writer.close();
   }
 }
 private void createDirIfNotExist(Path path) throws IOException {
   if (!fs.exists(path)) {
     if (!fs.mkdirs(path)) {
       throw new IOException("Unable to create: " + path);
     }
   }
 }
  @Before
  public void setUp() throws Exception {
    fsTarget = fHdfs;
    fsTarget2 = fHdfs2;
    targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
    targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);

    fsTarget.delete(targetTestRoot, true);
    fsTarget2.delete(targetTestRoot2, true);
    fsTarget.mkdirs(targetTestRoot);
    fsTarget2.mkdirs(targetTestRoot2);

    fsViewConf = ViewFileSystemTestSetup.createConfig();
    setupMountPoints();
    fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
  }
Exemplo n.º 22
0
 /**
  * * Function to copy the image files from local file system to hdfs
  *
  * @param conf
  * @param sourcePath
  * @param destinationPath
  * @throws Exception
  */
 public static void copyFromLocal(JobConf conf, String sourcePath, String destinationPath)
     throws Exception {
   FileSystem fs = FileSystem.get(conf);
   Path interPath = new Path(destinationPath);
   fs.mkdirs(interPath);
   fs.copyFromLocalFile(new Path(sourcePath), interPath);
 }
Exemplo n.º 23
0
  // Create temp directory in HDFS to store logsearch logs before sorting
  public void tmpDirHDFS(
      boolean quiet, boolean silent, FileSystem fs, Configuration conf, String tmp, boolean log) {
    logConsole(quiet, silent, info, "Creating new Temp Directory in HDFS: " + tmp);

    try {
      Path path = new Path(tmp);
      if (!(fs.exists(path))) {
        // Create directory
        fs.mkdirs(path);
        if (log != true) {
          fs.deleteOnExit(path);
        }
      }
    } catch (IOException e) {
      if (e.toString().contains("Failed to find any Kerberos")) {
        logConsole(true, true, error, "No/bad Kerberos ticket - please authenticate.");
        System.exit(1);
      } else if (e.toString().contains("quota") && e.toString().contains("exceeded")) {
        logConsole(true, true, error, "Disk quota Exceeded.");
        System.exit(1);
      }
      e.printStackTrace();
      System.exit(1);
    }
  }
Exemplo n.º 24
0
  public static void addTxt(
      HttpServletRequest request, HttpServletResponse response, OutputStreamWriter outStream)
      throws Exception {
    SimpleDateFormat fmt = new SimpleDateFormat("yyyyMMdd");
    String day = fmt.format(new Date());
    Map stormconf = Utils.readStormConfig();
    String store =
        (String) stormconf.get("higo.download.offline.store")
            + "/"
            + day
            + "/upload_"
            + java.util.UUID.randomUUID().toString();

    Configuration conf = getConf(stormconf);
    FileSystem fs = FileSystem.get(conf);
    if (!fs.exists(new Path(store))) {
      fs.mkdirs(new Path(store));
    }

    HashMap<String, String> params = new HashMap<String, String>();
    Path outpath = new Path(store, String.valueOf(System.currentTimeMillis()));
    FSDataOutputStream out = fs.create(outpath);
    Upload up = new Upload();
    up.mergerTo(request, response, "utf-8", out, params);
    out.close();
    String rtn = addTxt(params.get("tableName"), store, params.get("callback"));

    outStream.append(rtn);
  }
Exemplo n.º 25
0
 /**
  * 디렉토리가 존재하지 않는다면 생성한다.
  *
  * @param directory 디렉토리
  * @param conf Hadoop Configuration
  * @throws java.io.IOException HDFS 작업을 실패한 경우
  */
 public static void makeDirectoryIfNotExists(String directory, Configuration conf)
     throws IOException {
   FileSystem fileSystem = FileSystem.get(conf);
   if (!isExist(conf, directory) && !isDirectory(fileSystem, directory)) {
     fileSystem.mkdirs(new Path(directory));
   }
 }
 @BeforeClass
 public static void setUpBeforeClass() throws Exception {
   File minidfsDir = new File("target/minidfs-" + UUID.randomUUID()).getAbsoluteFile();
   minidfsDir.mkdirs();
   Assert.assertTrue(minidfsDir.exists());
   System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
   Configuration conf = new HdfsConfiguration();
   conf.set("dfs.namenode.fs-limits.min-block-size", String.valueOf(32));
   EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
   miniDFS = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
   dir = new Path(miniDFS.getURI() + "/dir");
   FileSystem fs = miniDFS.getFileSystem();
   fs.mkdirs(dir);
   writeFile(fs, new Path(dir + "/forAllTests/" + "path"), 1000);
   dummyEtc = new File(minidfsDir, "dummy-etc");
   dummyEtc.mkdirs();
   Assert.assertTrue(dummyEtc.exists());
   Configuration dummyConf = new Configuration(false);
   for (String file : new String[] {"core", "hdfs", "mapred", "yarn"}) {
     File siteXml = new File(dummyEtc, file + "-site.xml");
     FileOutputStream out = new FileOutputStream(siteXml);
     dummyConf.writeXml(out);
     out.close();
   }
   resourcesDir = minidfsDir.getAbsolutePath();
   hadoopConfDir = dummyEtc.getName();
   System.setProperty("sdc.resources.dir", resourcesDir);
   ;
 }
Exemplo n.º 27
0
  public void SequenceFileRenameRetryCoreTest(int numberOfRetriesRequired, boolean closeSucceed)
      throws Exception {
    String hdfsPath =
        "file:///tmp/flume-test."
            + Calendar.getInstance().getTimeInMillis()
            + "."
            + Thread.currentThread().getId();

    Context context = new Context();
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    Path dirPath = new Path(hdfsPath);
    fs.delete(dirPath, true);
    fs.mkdirs(dirPath);
    context.put("hdfs.path", hdfsPath);
    context.put("hdfs.closeTries", String.valueOf(numberOfRetriesRequired));
    context.put("hdfs.rollCount", "1");
    context.put("hdfs.retryInterval", "1");
    context.put("hdfs.callTimeout", Long.toString(1000));
    MockFileSystem mockFs = new MockFileSystem(fs, numberOfRetriesRequired, closeSucceed);
    BucketWriter bucketWriter =
        new BucketWriter(
            0,
            0,
            1,
            1,
            ctx,
            hdfsPath,
            hdfsPath,
            "singleBucket",
            ".tmp",
            null,
            null,
            null,
            new MockDataStream(mockFs),
            timedRollerPool,
            proxy,
            new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()),
            0,
            null,
            null,
            30000,
            Executors.newSingleThreadExecutor(),
            1,
            numberOfRetriesRequired);

    bucketWriter.setFileSystem(mockFs);
    // At this point, we checked if isFileClosed is available in
    // this JVM, so lets make it check again.
    Event event = EventBuilder.withBody("test", Charsets.UTF_8);
    bucketWriter.append(event);
    // This is what triggers the close, so a 2nd append is required :/
    bucketWriter.append(event);

    TimeUnit.SECONDS.sleep(numberOfRetriesRequired + 2);

    Assert.assertTrue(
        "Expected " + numberOfRetriesRequired + " " + "but got " + bucketWriter.renameTries.get(),
        bucketWriter.renameTries.get() == numberOfRetriesRequired);
  }
Exemplo n.º 28
0
  @SuppressWarnings("rawtypes")
  static Path createConfigurationFileInFs(
      FileSystem fs, String appHome, Map stormConf, YarnConfiguration yarnConf) throws IOException {
    // dump stringwriter's content into FS conf/storm.yaml
    Path confDst =
        new Path(fs.getHomeDirectory(), appHome + Path.SEPARATOR + STORM_CONF_PATH_STRING);
    Path dirDst = confDst.getParent();
    fs.mkdirs(dirDst);

    // storm.yaml
    FSDataOutputStream out = fs.create(confDst);
    Yaml yaml = new Yaml();
    OutputStreamWriter writer = new OutputStreamWriter(out);
    rmNulls(stormConf);
    yaml.dump(stormConf, writer);
    writer.close();
    out.close();

    // yarn-site.xml
    Path yarn_site_xml = new Path(dirDst, "yarn-site.xml");
    out = fs.create(yarn_site_xml);
    writer = new OutputStreamWriter(out);
    yarnConf.writeXml(writer);
    writer.close();
    out.close();

    // logback.xml
    Path logback_xml = new Path(dirDst, "logback.xml");
    out = fs.create(logback_xml);
    CreateLogbackXML(out);
    out.close();

    return dirDst;
  }
Exemplo n.º 29
0
  public HdfsDirectory(Path hdfsDirPath, LockFactory lockFactory, Configuration configuration)
      throws IOException {
    super(lockFactory);
    this.hdfsDirPath = hdfsDirPath;
    this.configuration = configuration;
    fileSystem = FileSystem.get(hdfsDirPath.toUri(), configuration);
    fileContext = FileContext.getFileContext(hdfsDirPath.toUri(), configuration);

    if (fileSystem instanceof DistributedFileSystem) {
      // Make sure dfs is not in safe mode
      while (((DistributedFileSystem) fileSystem).setSafeMode(SafeModeAction.SAFEMODE_GET, true)) {
        LOG.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
        try {
          Thread.sleep(5000);
        } catch (InterruptedException e) {
          Thread.interrupted();
          // continue
        }
      }
    }

    try {
      if (!fileSystem.exists(hdfsDirPath)) {
        boolean success = fileSystem.mkdirs(hdfsDirPath);
        if (!success) {
          throw new RuntimeException("Could not create directory: " + hdfsDirPath);
        }
      }
    } catch (Exception e) {
      org.apache.solr.common.util.IOUtils.closeQuietly(fileSystem);
      throw new RuntimeException("Problem creating directory: " + hdfsDirPath, e);
    }
  }
Exemplo n.º 30
0
  @Override
  public boolean initializeReader(Properties configuration) {
    try {
      hdfsConfiguration = new org.apache.hadoop.conf.Configuration(false);
      hdfsConfiguration.set("fs.defaultFS", configuration.getProperty("hdfs.url"));
      hdfsConfiguration.set(
          "fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
      hdfsConfiguration.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());

      hdfsConfiguration.set(
          "hdfs.url",
          configuration.getProperty("hdfs.url")); // "hdfs://snf-618466.vm.okeanos.grnet.gr:8020");
      // hdfsConfiguration.set("prefix", "/user/vagvaz/");

      hdfsConfiguration.set("prefix", configuration.getProperty("prefix"));
      // fileSystem = FileSystem.get(hdfsConfiguration);
      fileSystem =
          FileSystem.get(
              getDefaultUri(hdfsConfiguration),
              hdfsConfiguration,
              configuration.getProperty("hdfs.user"));

      if (!fileSystem.exists(basePath)) {
        log.info("Creating base path on HDFS " + configuration.getProperty("hdfs.url"));
        fileSystem.mkdirs(basePath);
      }

    } catch (Exception e) {
      log.error(
          "Could not create HDFS remote FileSystem using \n" + configuration.toString() + "\n");
      return false;
    }
    return true;
  }