Exemplo n.º 1
0
 /**
  * Copy a directory to a new FS -both paths must be qualified
  *
  * @param conf conf file
  * @param srcDirPath src dir
  * @param destDirPath dest dir
  * @return # of files copies
  */
 public static int copyDirectory(Configuration conf, Path srcDirPath, Path destDirPath)
     throws IOException {
   FileSystem srcFS = FileSystem.get(srcDirPath.toUri(), conf);
   FileSystem destFS = FileSystem.get(destDirPath.toUri(), conf);
   // list all paths in the src.
   if (!srcFS.exists(srcDirPath)) {
     throw new FileNotFoundException("Source dir not found " + srcDirPath);
   }
   if (!srcFS.isDirectory(srcDirPath)) {
     throw new FileNotFoundException("Source dir not a directory " + srcDirPath);
   }
   FileStatus[] entries = srcFS.listStatus(srcDirPath);
   int srcFileCount = entries.length;
   if (srcFileCount == 0) {
     return 0;
   }
   if (!destFS.exists(destDirPath)) {
     destFS.mkdirs(destDirPath);
   }
   Path[] sourcePaths = new Path[srcFileCount];
   for (int i = 0; i < srcFileCount; i++) {
     FileStatus e = entries[i];
     Path srcFile = e.getPath();
     if (srcFS.isDirectory(srcFile)) {
       throw new IOException(
           "Configuration dir " + srcDirPath + " contains a directory " + srcFile);
     }
     log.debug("copying src conf file {}", srcFile);
     sourcePaths[i] = srcFile;
   }
   log.debug("Copying {} files from to {} to dest {}", srcFileCount, srcDirPath, destDirPath);
   FileUtil.copy(srcFS, sourcePaths, destFS, destDirPath, false, true, conf);
   return srcFileCount;
 }
  @Override
  public boolean nextKeyValue() throws IOException, InterruptedException {
    FileSystem fileSystem = FileSystem.get(configuration);

    if (fileSystem.isDirectory(split.getPath())) {
      return false;
    }

    if (fileProcessed) {
      return false;
    }

    int fileLength = (int) split.getLength();
    byte[] result = new byte[fileLength];

    FSDataInputStream inputStream = null;

    try {
      inputStream = fileSystem.open(split.getPath());
      IOUtils.readFully(inputStream, result, 0, fileLength);
      currentValue.set(result, 0, fileLength);
    } finally {
      IOUtils.closeStream(inputStream);
    }
    fileProcessed = true;
    return true;
  }
Exemplo n.º 3
0
 public void getStats(Path segment, final SegmentReaderStats stats) throws Exception {
   SequenceFile.Reader[] readers =
       SequenceFileOutputFormat.getReaders(
           getConf(), new Path(segment, CrawlDatum.GENERATE_DIR_NAME));
   long cnt = 0L;
   Text key = new Text();
   for (int i = 0; i < readers.length; i++) {
     while (readers[i].next(key)) cnt++;
     readers[i].close();
   }
   stats.generated = cnt;
   Path fetchDir = new Path(segment, CrawlDatum.FETCH_DIR_NAME);
   if (fs.exists(fetchDir) && fs.isDirectory(fetchDir)) {
     cnt = 0L;
     long start = Long.MAX_VALUE;
     long end = Long.MIN_VALUE;
     CrawlDatum value = new CrawlDatum();
     MapFile.Reader[] mreaders = MapFileOutputFormat.getReaders(fs, fetchDir, getConf());
     for (int i = 0; i < mreaders.length; i++) {
       while (mreaders[i].next(key, value)) {
         cnt++;
         if (value.getFetchTime() < start) start = value.getFetchTime();
         if (value.getFetchTime() > end) end = value.getFetchTime();
       }
       mreaders[i].close();
     }
     stats.start = start;
     stats.end = end;
     stats.fetched = cnt;
   }
   Path parseDir = new Path(segment, ParseData.DIR_NAME);
   if (fs.exists(fetchDir) && fs.isDirectory(fetchDir)) {
     cnt = 0L;
     long errors = 0L;
     ParseData value = new ParseData();
     MapFile.Reader[] mreaders = MapFileOutputFormat.getReaders(fs, parseDir, getConf());
     for (int i = 0; i < mreaders.length; i++) {
       while (mreaders[i].next(key, value)) {
         cnt++;
         if (!value.getStatus().isSuccess()) errors++;
       }
       mreaders[i].close();
     }
     stats.parsed = cnt;
     stats.parseErrors = errors;
   }
 }
Exemplo n.º 4
0
  @Test
  public final void testCtasWithoutTableDefinition() throws Exception {
    ResultSet res = executeQuery();
    res.close();

    String tableName = CatalogUtil.normalizeIdentifier("testCtasWithoutTableDefinition");
    CatalogService catalog = testBase.getTestingCluster().getMaster().getCatalog();
    String qualifiedTableName = buildFQName(DEFAULT_DATABASE_NAME, tableName);
    TableDesc desc = catalog.getTableDesc(qualifiedTableName);
    assertTrue(catalog.existsTable(qualifiedTableName));

    assertTrue(desc.getSchema().contains("default.testctaswithouttabledefinition.col1"));
    PartitionMethodDesc partitionDesc = desc.getPartitionMethod();
    assertEquals(partitionDesc.getPartitionType(), CatalogProtos.PartitionType.COLUMN);
    assertEquals(
        "key", partitionDesc.getExpressionSchema().getRootColumns().get(0).getSimpleName());

    FileSystem fs = FileSystem.get(testBase.getTestingCluster().getConfiguration());
    Path path = new Path(desc.getUri());
    assertTrue(fs.isDirectory(path));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=17.0")));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=36.0")));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=38.0")));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=45.0")));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=49.0")));
    if (!testingCluster.isHiveCatalogStoreRunning()) {
      assertEquals(5, desc.getStats().getNumRows().intValue());
    }

    ResultSet res2 = executeFile("check1.sql");

    Map<Double, int[]> resultRows1 = Maps.newHashMap();
    resultRows1.put(45.0d, new int[] {3, 2});
    resultRows1.put(38.0d, new int[] {2, 2});

    int i = 0;
    while (res2.next()) {
      assertEquals(resultRows1.get(res2.getDouble(3))[0], res2.getInt(1));
      assertEquals(resultRows1.get(res2.getDouble(3))[1], res2.getInt(2));
      i++;
    }
    res2.close();
    assertEquals(2, i);
  }
  @BeforeClass
  public static void setUpBeforeClass() throws Exception {
    // Start up our mini cluster on top of an 0.92 root.dir that has data from
    // a 0.92 hbase run -- it has a table with 100 rows in it  -- and see if
    // we can migrate from 0.92
    TEST_UTIL.startMiniZKCluster();
    TEST_UTIL.startMiniDFSCluster(1);
    Path testdir = TEST_UTIL.getDataTestDir("TestMetaMigrationConvertToPB");
    // Untar our test dir.
    File untar = untar(new File(testdir.toString()));
    // Now copy the untar up into hdfs so when we start hbase, we'll run from it.
    Configuration conf = TEST_UTIL.getConfiguration();
    FsShell shell = new FsShell(conf);
    FileSystem fs = FileSystem.get(conf);
    // find where hbase will root itself, so we can copy filesystem there
    Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
    if (!fs.isDirectory(hbaseRootDir.getParent())) {
      // mkdir at first
      fs.mkdirs(hbaseRootDir.getParent());
    }
    doFsCommand(shell, new String[] {"-put", untar.toURI().toString(), hbaseRootDir.toString()});

    // windows fix: tgz file has .META. directory renamed as -META- since the original is an illegal
    // name under windows. So we rename it back. See
    // src/test/data//TestMetaMigrationConvertingToPB.README and
    // https://issues.apache.org/jira/browse/HBASE-6821
    doFsCommand(
        shell,
        new String[] {
          "-mv",
          new Path(hbaseRootDir, "-META-").toString(),
          new Path(hbaseRootDir, ".META.").toString()
        });
    // See whats in minihdfs.
    doFsCommand(shell, new String[] {"-lsr", "/"});
    TEST_UTIL.startMiniHBaseCluster(1, 1);
    // Assert we are running against the copied-up filesystem.  The copied-up
    // rootdir should have had a table named 'TestTable' in it.  Assert it
    // present.
    HTable t = new HTable(TEST_UTIL.getConfiguration(), TESTTABLE);
    ResultScanner scanner = t.getScanner(new Scan());
    int count = 0;
    while (scanner.next() != null) {
      count++;
    }
    // Assert that we find all 100 rows that are in the data we loaded.  If
    // so then we must have migrated it from 0.90 to 0.92.
    Assert.assertEquals(ROW_COUNT, count);
    scanner.close();
    t.close();
  }
  @org.junit.Test
  public void testZDirs() throws Exception {
    final Path subDir1 = new Path("td_dir.1");
    final Path baseDir = new Path("td_testDirs1");
    final Path test1 = new Path("td_test1");
    final Path test2 = new Path("td_test/dir.2");

    assertFalse(fs.exists(baseDir));
    assertFalse(fs.isDirectory(baseDir));

    // make the dir
    fs.mkdirs(baseDir);

    assertTrue(fs.isDirectory(baseDir));
    // fs.setWorkingDirectory(baseDir);

    fs.mkdirs(subDir1);

    assertTrue(fs.isDirectory(subDir1));

    assertFalse(fs.exists(test1));

    assertFalse(fs.isDirectory(test2));

    fs.create(new Path(baseDir, "dummyfile"));
    FileStatus[] p = fs.listStatus(baseDir);
    assertEquals(p.length, 1);

    fs.delete(baseDir, true);
    assertFalse(fs.exists(baseDir));

    fs.delete(subDir1, true);
    assertFalse(fs.exists(subDir1));

    fs.delete(baseDir);
    fs.delete(test1);
    fs.delete(test2);
  }
  /**
   * Get the rootdir.  Make sure its wholesome and exists before returning.
   * @param rd
   * @param conf
   * @param fs
   * @return hbase.rootdir (after checks for existence and bootstrapping if
   * needed populating the directory with necessary bootup files).
   * @throws IOException
   */
  private Path checkRootDir(final Path rd, final Configuration c,
    final FileSystem fs)
  throws IOException {
    // If FS is in safe mode wait till out of it.
    FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
        10 * 1000));
    // Filesystem is good. Go ahead and check for hbase.rootdir.
    try {
      if (!fs.exists(rd)) {
        fs.mkdirs(rd);
        // DFS leaves safe mode with 0 DNs when there are 0 blocks.
        // We used to handle this by checking the current DN count and waiting until
        // it is nonzero. With security, the check for datanode count doesn't work --
        // it is a privileged op. So instead we adopt the strategy of the jobtracker
        // and simply retry file creation during bootstrap indefinitely. As soon as
        // there is one datanode it will succeed. Permission problems should have
        // already been caught by mkdirs above.
        FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
          10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
        		  HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
      } else {
        if (!fs.isDirectory(rd)) {
          throw new IllegalArgumentException(rd.toString() + " is not a directory");
        }
        // as above
        FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
          10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
        		  HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
      }
    } catch (IllegalArgumentException iae) {
      LOG.fatal("Please fix invalid configuration for "
        + HConstants.HBASE_DIR + " " + rd.toString(), iae);
      throw iae;
    }
    // Make sure cluster ID exists
    if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
        HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
      FSUtils.setClusterId(fs, rd, UUID.randomUUID().toString(), c.getInt(
          HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
    }
    clusterId = FSUtils.getClusterId(fs, rd);

    // Make sure the root region directory exists!
    if (!FSUtils.rootRegionExists(fs, rd)) {
      bootstrap(rd, c);
    }
    createRootTableInfo(rd);
    return rd;
  }
Exemplo n.º 8
0
 private static void validateOrMakeDir(FileSystem fs, Path dir, String dirDescription) {
   try {
     if (fs.exists(dir)) {
       if (!fs.isDirectory(dir)) {
         LOG.error(dirDescription + " directory is a file, not a dir. " + dir);
         throw new RuntimeException(dirDescription + " directory is a file, not a dir. " + dir);
       }
     } else if (!fs.mkdirs(dir)) {
       LOG.error("Unable to create " + dirDescription + " directory " + dir);
       throw new RuntimeException("Unable to create " + dirDescription + " directory " + dir);
     }
   } catch (IOException e) {
     LOG.error("Unable to create " + dirDescription + " directory " + dir, e);
     throw new RuntimeException("Unable to create " + dirDescription + " directory " + dir, e);
   }
 }
Exemplo n.º 9
0
 public static boolean chmod(Path path, String chmodStr, boolean recursive) {
   boolean result = false;
   try {
     fs.setPermission(path, FsPermission.valueOf(chmodStr));
     if (recursive == true) {
       FileStatus stats[] = fs.listStatus(path);
       for (FileStatus stat : stats) {
         Path subPath = stat.getPath();
         fs.setPermission(subPath, FsPermission.valueOf(chmodStr));
         if (fs.isDirectory(subPath)) {
           chmod(subPath, chmodStr, recursive);
         }
       }
     }
   } catch (Exception e) {
     LOG.error("修改文件权限出错: " + e.getMessage());
     e.printStackTrace();
   }
   return result;
 }
  @org.junit.Test
  public void testFiles() throws Exception {

    Path subDir1 = new Path("tf_dir.1");
    Path baseDir = new Path("tf_testDirs1");
    Path file1 = new Path("tf_dir.1/foo.1");
    Path file2 = new Path("tf_dir.1/foo.2");

    fs.mkdirs(baseDir);
    assertTrue(fs.isDirectory(baseDir));
    // fs.setWorkingDirectory(baseDir);

    fs.mkdirs(subDir1);

    FSDataOutputStream s1 = fs.create(file1, true, 4096, (short) 1, (long) 4096, null);
    FSDataOutputStream s2 = fs.create(file2, true, 4096, (short) 1, (long) 4096, null);

    s1.close();
    s2.close();

    FileStatus[] p = fs.listStatus(subDir1);
    assertEquals(p.length, 2);

    fs.delete(file1, true);
    p = fs.listStatus(subDir1);
    assertEquals(p.length, 1);

    fs.delete(file2, true);
    p = fs.listStatus(subDir1);
    assertEquals(p.length, 0);

    fs.delete(baseDir, true);
    assertFalse(fs.exists(baseDir));

    fs.delete(subDir1);
    fs.delete(file1);
    fs.delete(file2);
  }
Exemplo n.º 11
0
  public BufferedReader loadDataFromFile(String filepath, long offset) {
    try {
      Path pt = new Path(filepath);
      FileSystem fs = FileSystem.get(fsConf);
      InputStreamReader isr;
      if (fs.isDirectory(pt)) { // multiple parts
        isr = new InputStreamReader(OpenMultiplePartsWithOffset(fs, pt, offset));
      } else { // single file
        FSDataInputStream fileHandler = fs.open(pt);
        if (offset > 0) fileHandler.seek(offset);
        isr = new InputStreamReader(fileHandler);
      }

      BufferedReader reader = new BufferedReader(isr);
      if (offset > 0) reader.readLine(); // skip first line in case of seek
      return reader;
    } catch (FileNotFoundException e) {
      e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    }
    assert false : "Should not reach here!";
    return null;
  }
Exemplo n.º 12
0
  /**
   * @param srcFileName
   * @param destFileName
   * @param csvprop
   * @param rlen
   * @param clen
   * @throws IOException
   */
  @SuppressWarnings("unchecked")
  public final void addHeaderToCSV(String srcFileName, String destFileName, long rlen, long clen)
      throws IOException {
    Configuration conf = new Configuration(ConfigurationManager.getCachedJobConf());

    Path srcFilePath = new Path(srcFileName);
    Path destFilePath = new Path(destFileName);
    FileSystem hdfs = FileSystem.get(conf);

    if (!_props.hasHeader()) {
      // simply move srcFile to destFile

      /*
       * TODO: Remove this roundabout way!
       * For example: destFilePath = /user/biadmin/csv/temp/out/file.csv
       *              & the only path that exists already on HDFS is /user/biadmin/csv/.
       * In this case: the directory structure /user/biadmin/csv/temp/out must be created.
       * Simple hdfs.rename() does not seem to create this directory structure.
       */

      // delete the destination file, if exists already
      // boolean ret1 =
      hdfs.delete(destFilePath, true);

      // Create /user/biadmin/csv/temp/out/file.csv so that ..../temp/out/ is created.
      // boolean ret2 =
      hdfs.createNewFile(destFilePath);

      // delete the file "file.csv" but preserve the directory structure /user/biadmin/csv/temp/out/
      // boolean ret3 =
      hdfs.delete(destFilePath, true);

      // finally, move the data to destFilePath = /user/biadmin/csv/temp/out/file.csv
      // boolean ret4 =
      hdfs.rename(srcFilePath, destFilePath);

      // System.out.println("Return values = del:" + ret1 + ", createNew:" + ret2 + ", del:" + ret3
      // + ", rename:" + ret4);
      return;
    }

    // construct the header line
    StringBuilder sb = new StringBuilder();
    for (int i = 0; i < clen; i++) {
      sb.append("C" + (i + 1));
      if (i < clen - 1) sb.append(_props.getDelim());
    }
    sb.append('\n');

    if (hdfs.isDirectory(srcFilePath)) {

      // compute sorted order among part files
      ArrayList<Path> files = new ArrayList<Path>();
      for (FileStatus stat : hdfs.listStatus(srcFilePath, CSVReblockMR.hiddenFileFilter))
        files.add(stat.getPath());
      Collections.sort(files);

      // first part file path
      Path firstpart = files.get(0);

      // create a temp file, and add header and contents of first part
      Path tmp = new Path(firstpart.toString() + ".tmp");
      OutputStream out = hdfs.create(tmp, true);
      out.write(sb.toString().getBytes());
      sb.setLength(0);

      // copy rest of the data from firstpart
      InputStream in = null;
      try {
        in = hdfs.open(firstpart);
        IOUtils.copyBytes(in, out, conf, true);
      } finally {
        IOUtilFunctions.closeSilently(in);
        IOUtilFunctions.closeSilently(out);
      }

      // rename tmp to firstpart
      hdfs.delete(firstpart, true);
      hdfs.rename(tmp, firstpart);

      // rename srcfile to destFile
      hdfs.delete(destFilePath, true);
      hdfs.createNewFile(destFilePath); // force the creation of directory structure
      hdfs.delete(destFilePath, true); // delete the file, but preserve the directory structure
      hdfs.rename(srcFilePath, destFilePath); // move the data

    } else if (hdfs.isFile(srcFilePath)) {
      // create destination file
      OutputStream out = hdfs.create(destFilePath, true);

      // write header
      out.write(sb.toString().getBytes());
      sb.setLength(0);

      // copy the data from srcFile
      InputStream in = null;
      try {
        in = hdfs.open(srcFilePath);
        IOUtils.copyBytes(in, out, conf, true);
      } finally {
        IOUtilFunctions.closeSilently(in);
        IOUtilFunctions.closeSilently(out);
      }
    } else {
      throw new IOException(srcFilePath.toString() + ": No such file or directory");
    }
  }
Exemplo n.º 13
0
  /**
   * Method to merge multiple CSV part files on HDFS into a single CSV file on HDFS. The part files
   * are created by CSV_WRITE MR job.
   *
   * <p>This method is invoked from CP-write instruction.
   *
   * @param srcFileName
   * @param destFileName
   * @param csvprop
   * @param rlen
   * @param clen
   * @throws IOException
   */
  public final void mergeCSVPartFiles(
      String srcFileName,
      String destFileName,
      CSVFileFormatProperties csvprop,
      long rlen,
      long clen)
      throws IOException {
    Configuration conf = new Configuration(ConfigurationManager.getCachedJobConf());

    Path srcFilePath = new Path(srcFileName);
    Path mergedFilePath = new Path(destFileName);
    FileSystem hdfs = FileSystem.get(conf);

    if (hdfs.exists(mergedFilePath)) {
      hdfs.delete(mergedFilePath, true);
    }
    OutputStream out = hdfs.create(mergedFilePath, true);

    // write out the header, if needed
    if (csvprop.hasHeader()) {
      StringBuilder sb = new StringBuilder();
      for (int i = 0; i < clen; i++) {
        sb.append("C" + (i + 1));
        if (i < clen - 1) sb.append(csvprop.getDelim());
      }
      sb.append('\n');
      out.write(sb.toString().getBytes());
      sb.setLength(0);
    }

    // if the source is a directory
    if (hdfs.isDirectory(srcFilePath)) {
      try {
        FileStatus[] contents = hdfs.listStatus(srcFilePath);
        Path[] partPaths = new Path[contents.length];
        int numPartFiles = 0;
        for (int i = 0; i < contents.length; i++) {
          if (!contents[i].isDirectory()) {
            partPaths[i] = contents[i].getPath();
            numPartFiles++;
          }
        }
        Arrays.sort(partPaths);

        for (int i = 0; i < numPartFiles; i++) {
          InputStream in = hdfs.open(partPaths[i]);
          try {
            IOUtils.copyBytes(in, out, conf, false);
            if (i < numPartFiles - 1) out.write('\n');
          } finally {
            IOUtilFunctions.closeSilently(in);
          }
        }
      } finally {
        IOUtilFunctions.closeSilently(out);
      }
    } else if (hdfs.isFile(srcFilePath)) {
      InputStream in = null;
      try {
        in = hdfs.open(srcFilePath);
        IOUtils.copyBytes(in, out, conf, true);
      } finally {
        IOUtilFunctions.closeSilently(in);
        IOUtilFunctions.closeSilently(out);
      }
    } else {
      throw new IOException(srcFilePath.toString() + ": No such file or directory");
    }
  }
Exemplo n.º 14
0
  /** Tests various options of DFSShell. */
  public void fileStatusTest(int mod) throws IOException {
    Configuration conf = new Configuration();
    conf.setStrings("dfs.namenode.port.list", namenodestr);
    MiniMNDFSCluster cluster = new MiniMNDFSCluster(conf, 1, 1, true, null);
    cluster.waitDatanodeDie();

    FileSystem fs = cluster.getFileSystem(0);
    final DFSClient dfsClient;
    if (mod == 1) dfsClient = new DFSClient(cluster.getNameNode(0).getNameNodeAddress(), conf);
    else dfsClient = new DFSClient(cluster.getNameNode(1).getNameNodeAddress(), conf);
    try {

      //
      // check that / exists
      //
      Path path = new Path("/");
      System.out.println("Path : \"" + path.toString() + "\"");
      System.out.println(fs.isDirectory(path));
      System.out.println(fs.getFileStatus(path).isDir());
      assertTrue("/ should be a directory", fs.getFileStatus(path).isDir() == true);

      // make sure getFileInfo returns null for files which do not exist
      FileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
      assertTrue(fileInfo == null);

      // create a file in home directory
      //
      Path file1 = new Path("filestatus.dat");
      writeFile(fs, file1, 1, fileSize, blockSize);
      System.out.println("Created file filestatus.dat with one " + " replicas.");
      checkFile(fs, file1, 1);
      assertTrue(file1 + " should be a file", fs.getFileStatus(file1).isDir() == false);
      assertTrue(fs.getFileStatus(file1).getBlockSize() == blockSize);
      assertTrue(fs.getFileStatus(file1).getReplication() == 1);
      assertTrue(fs.getFileStatus(file1).getLen() == fileSize);
      System.out.println("Path : \"" + file1 + "\"");

      // create an empty directory
      //
      Path parentDir = new Path("/test");
      Path dir = new Path("/test/mkdirs");
      assertTrue(fs.mkdirs(dir));
      assertTrue(fs.exists(dir));
      assertTrue(dir + " should be a directory", fs.getFileStatus(path).isDir() == true);
      assertTrue(dir + " should be zero size ", fs.getContentSummary(dir).getLength() == 0);
      assertTrue(dir + " should be zero size ", fs.getFileStatus(dir).getLen() == -1);
      System.out.println("Dir : \"" + dir + "\"");

      // create another file that is smaller than a block.
      //
      Path file2 = new Path("/test/mkdirs/filestatus2.dat");
      writeFile(fs, file2, 1, blockSize / 4, blockSize);
      System.out.println("Created file filestatus2.dat with one " + " replicas.");
      checkFile(fs, file2, 1);
      System.out.println("Path : \"" + file2 + "\"");

      // verify file attributes
      assertTrue(fs.getFileStatus(file2).getBlockSize() == blockSize);
      assertTrue(fs.getFileStatus(file2).getReplication() == 1);

      // create another file in the same directory
      Path file3 = new Path("/test/mkdirs/filestatus3.dat");
      writeFile(fs, file3, 1, blockSize / 4, blockSize);
      System.out.println("Created file filestatus3.dat with one " + " replicas.");
      checkFile(fs, file3, 1);

      // verify that the size of the directory increased by the size
      // of the two files
      assertTrue(
          dir + " size should be " + (blockSize / 2),
          blockSize / 2 == fs.getContentSummary(dir).getLength());
    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
  public void testFileIO() throws Exception {

    Path subDir1 = new Path("tfio_dir.1");
    Path file1 = new Path("tfio_dir.1/foo.1");
    Path baseDir = new Path("tfio_testDirs1");

    fs.mkdirs(baseDir);
    assertTrue(fs.isDirectory(baseDir));
    // fs.setWorkingDirectory(baseDir);

    fs.mkdirs(subDir1);

    FSDataOutputStream s1 = fs.create(file1, true, 4096, (short) 1, (long) 4096, null);

    int bufsz = 4096;
    byte[] data = new byte[bufsz];

    for (int i = 0; i < data.length; i++) data[i] = (byte) (i % 16);

    // write 4 bytes and read them back; read API should return a byte per
    // call
    s1.write(32);
    s1.write(32);
    s1.write(32);
    s1.write(32);
    // write some data
    s1.write(data, 0, data.length);
    // flush out the changes
    s1.close();

    // Read the stuff back and verify it is correct
    FSDataInputStream s2 = fs.open(file1, 4096);
    int v;

    v = s2.read();
    assertEquals(v, 32);
    v = s2.read();
    assertEquals(v, 32);
    v = s2.read();
    assertEquals(v, 32);
    v = s2.read();
    assertEquals(v, 32);

    assertEquals(s2.available(), data.length);

    byte[] buf = new byte[bufsz];
    s2.read(buf, 0, buf.length);
    for (int i = 0; i < data.length; i++) assertEquals(data[i], buf[i]);

    assertEquals(s2.available(), 0);

    s2.close();

    fs.delete(file1, true);
    assertFalse(fs.exists(file1));
    fs.delete(subDir1, true);
    assertFalse(fs.exists(subDir1));
    fs.delete(baseDir, true);
    assertFalse(fs.exists(baseDir));

    fs.delete(subDir1);
    fs.delete(file1);
    fs.delete(baseDir);
  }
Exemplo n.º 16
0
  @SuppressWarnings("deprecation")
  public void testHive2Action() throws Exception {
    setupHiveServer2();
    Path inputDir = new Path(getFsTestCaseDir(), INPUT_DIRNAME);
    Path outputDir = new Path(getFsTestCaseDir(), OUTPUT_DIRNAME);
    FileSystem fs = getFileSystem();

    {
      String query = getHive2Script(inputDir.toString(), outputDir.toString());
      Writer dataWriter = new OutputStreamWriter(fs.create(new Path(inputDir, DATA_FILENAME)));
      dataWriter.write(SAMPLE_DATA_TEXT);
      dataWriter.close();
      Context context = createContext(getQueryActionXml(query));
      final RunningJob launcherJob =
          submitAction(context, Namespace.getNamespace("uri:oozie:hive2-action:0.2"));
      String launcherId = context.getAction().getExternalId();
      waitFor(
          200 * 1000,
          new Predicate() {
            @Override
            public boolean evaluate() throws Exception {
              return launcherJob.isComplete();
            }
          });
      assertTrue(launcherJob.isSuccessful());
      Configuration conf = new XConfiguration();
      conf.set("user.name", getTestUser());
      Map<String, String> actionData =
          LauncherMapperHelper.getActionData(getFileSystem(), context.getActionDir(), conf);
      assertFalse(LauncherMapperHelper.hasIdSwap(actionData));
      Hive2ActionExecutor ae = new Hive2ActionExecutor();
      ae.check(context, context.getAction());
      assertTrue(launcherId.equals(context.getAction().getExternalId()));
      assertEquals("SUCCEEDED", context.getAction().getExternalStatus());
      ae.end(context, context.getAction());
      assertEquals(WorkflowAction.Status.OK, context.getAction().getStatus());
      // Disabled external child id check until Hive version is upgraded to 0.14+
      // assertNotNull(context.getExternalChildIDs());
      assertTrue(fs.exists(outputDir));
      assertTrue(fs.isDirectory(outputDir));
    }
    {
      Path script = new Path(getAppPath(), HIVE_SCRIPT_FILENAME);
      Writer scriptWriter = new OutputStreamWriter(fs.create(script));
      scriptWriter.write(getHive2Script(inputDir.toString(), outputDir.toString()));
      scriptWriter.close();

      Writer dataWriter = new OutputStreamWriter(fs.create(new Path(inputDir, DATA_FILENAME)));
      dataWriter.write(SAMPLE_DATA_TEXT);
      dataWriter.close();
      Context context = createContext(getScriptActionXml());
      final RunningJob launcherJob =
          submitAction(context, Namespace.getNamespace("uri:oozie:hive2-action:0.1"));
      String launcherId = context.getAction().getExternalId();
      waitFor(
          200 * 1000,
          new Predicate() {
            @Override
            public boolean evaluate() throws Exception {
              return launcherJob.isComplete();
            }
          });
      assertTrue(launcherJob.isSuccessful());
      Configuration conf = new XConfiguration();
      conf.set("user.name", getTestUser());
      Map<String, String> actionData =
          LauncherMapperHelper.getActionData(getFileSystem(), context.getActionDir(), conf);
      assertFalse(LauncherMapperHelper.hasIdSwap(actionData));
      Hive2ActionExecutor ae = new Hive2ActionExecutor();
      ae.check(context, context.getAction());
      assertTrue(launcherId.equals(context.getAction().getExternalId()));
      assertEquals("SUCCEEDED", context.getAction().getExternalStatus());
      ae.end(context, context.getAction());
      assertEquals(WorkflowAction.Status.OK, context.getAction().getStatus());
      // Disabled external child id check until Hive version is upgraded to 0.14+
      // assertNotNull(context.getExternalChildIDs());
      assertTrue(fs.exists(outputDir));
      assertTrue(fs.isDirectory(outputDir));
    }
    // Negative testcase with incorrect hive-query.
    {
      String query = getHive2BadScript(inputDir.toString(), outputDir.toString());
      Writer dataWriter = new OutputStreamWriter(fs.create(new Path(inputDir, DATA_FILENAME)));
      dataWriter.write(SAMPLE_DATA_TEXT);
      dataWriter.close();
      Context context = createContext(getQueryActionXml(query));
      final RunningJob launcherJob =
          submitAction(context, Namespace.getNamespace("uri:oozie:hive2-action:0.2"));
      String launcherId = context.getAction().getExternalId();
      waitFor(
          200 * 1000,
          new Predicate() {
            @Override
            public boolean evaluate() throws Exception {
              return launcherJob.isComplete();
            }
          });
      assertTrue(launcherJob.isSuccessful());
      Configuration conf = new XConfiguration();
      conf.set("user.name", getTestUser());
      Map<String, String> actionData =
          LauncherMapperHelper.getActionData(getFileSystem(), context.getActionDir(), conf);
      assertFalse(LauncherMapperHelper.hasIdSwap(actionData));
      Hive2ActionExecutor ae = new Hive2ActionExecutor();
      ae.check(context, context.getAction());
      assertTrue(launcherId.equals(context.getAction().getExternalId()));
      assertEquals("FAILED/KILLED", context.getAction().getExternalStatus());
      ae.end(context, context.getAction());
      assertEquals(WorkflowAction.Status.ERROR, context.getAction().getStatus());
      assertNull(context.getExternalChildIDs());
    }
  }