/** Test deleteOnExit */
  public void testDeleteOnExit() throws IOException {
    Configuration conf = new Configuration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs = cluster.getFileSystem();
    FileSystem localfs = FileSystem.getLocal(conf);

    try {

      // Creates files in HDFS and local file system.
      //
      Path file1 = new Path("filestatus.dat");
      Path file2 = new Path("filestatus2.dat");
      Path file3 = new Path("filestatus3.dat");
      FSDataOutputStream stm1 = createFile(fs, file1, 1);
      FSDataOutputStream stm2 = createFile(fs, file2, 1);
      FSDataOutputStream stm3 = createFile(localfs, file3, 1);
      System.out.println("DeleteOnExit: Created files.");

      // write to files and close. Purposely, do not close file2.
      writeFile(stm1);
      writeFile(stm3);
      stm1.close();
      stm2.close();
      stm3.close();

      // set delete on exit flag on files.
      fs.deleteOnExit(file1);
      fs.deleteOnExit(file2);
      localfs.deleteOnExit(file3);

      // close the file system. This should make the above files
      // disappear.
      fs.close();
      localfs.close();
      fs = null;
      localfs = null;

      // reopen file system and verify that file does not exist.
      fs = cluster.getFileSystem();
      localfs = FileSystem.getLocal(conf);

      assertTrue(file1 + " still exists inspite of deletOnExit set.", !fs.exists(file1));
      assertTrue(file2 + " still exists inspite of deletOnExit set.", !fs.exists(file2));
      assertTrue(file3 + " still exists inspite of deletOnExit set.", !localfs.exists(file3));
      System.out.println("DeleteOnExit successful.");

    } finally {
      IOUtils.closeStream(fs);
      IOUtils.closeStream(localfs);
      cluster.shutdown();
    }
  }
Exemplo n.º 2
0
  public void readFile(String file) throws IOException {
    Configuration conf = new Configuration();
    conf.addResource(new Path("/opt/hadoop-0.20.0/conf/core-site.xml"));

    FileSystem fileSystem = FileSystem.get(conf);

    Path path = new Path(file);
    if (!fileSystem.exists(path)) {
      System.out.println("File " + file + " does not exists");
      return;
    }

    FSDataInputStream in = fileSystem.open(path);

    String filename = file.substring(file.lastIndexOf('/') + 1, file.length());

    OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(filename)));

    byte[] b = new byte[1024];
    int numBytes = 0;
    while ((numBytes = in.read(b)) > 0) {
      out.write(b, 0, numBytes);
    }

    in.close();
    out.close();
    fileSystem.close();
  }
Exemplo n.º 3
0
  @Override
  public int run(String[] args) throws Exception {
    if (this.conf == null) {
      this.conf = new Configuration();
    }

    String points = args[0];
    String centers = args[1];
    String result = args[2];
    int maxIterations = Integer.valueOf(args[3]);
    String centersSeqFile = centers + "_seq";
    String pointsSeqFile = points + "_seq";

    conf.set(CENTERS_CONF_KEY, centersSeqFile);

    FileSystem fs = FileSystem.get(conf);

    createCentersSequenceFile(conf, fs, centers, centersSeqFile);

    initializeCenters(conf, fs, points, pointsSeqFile);

    kmeans(conf, fs, pointsSeqFile, result, maxIterations);

    convertPointsSequenceFileToText(conf, fs, result, result + "_text");

    fs.close();
    return 0;
  }
Exemplo n.º 4
0
  public static void downloadHdfs(String srcfilePath, String destFilePath) {
    try {
      Configuration conf = new Configuration();
      FileSystem fs = FileSystem.get(URI.create(srcfilePath), conf);
      FSDataInputStream hdfsInStream = fs.open(new Path(srcfilePath));
      File dstFile = new File(destFilePath);
      if (!dstFile.getParentFile().exists()) {
        dstFile.getParentFile().mkdirs();
      }
      OutputStream out = new FileOutputStream(destFilePath);
      byte[] ioBuffer = new byte[1024];
      int readLen = hdfsInStream.read(ioBuffer);

      while (-1 != readLen) {
        out.write(ioBuffer, 0, readLen);
        readLen = hdfsInStream.read(ioBuffer);
      }
      out.close();
      hdfsInStream.close();
      fs.close();
    } catch (FileNotFoundException e) {
      LOG.error("[downloadHdfs]", e);
    } catch (IOException e) {
      LOG.error("[downloadHdfs]", e);
    }
  }
  /**
   * Generate random data, compress it, index and md5 hash the data. Then read it all back and md5
   * that too, to verify that it all went ok.
   *
   * @param testWithIndex Should we index or not?
   * @param charsToOutput How many characters of random data should we output.
   * @throws IOException
   * @throws NoSuchAlgorithmException
   * @throws InterruptedException
   */
  private void runTest(boolean testWithIndex, int charsToOutput)
      throws IOException, NoSuchAlgorithmException, InterruptedException {

    Configuration conf = new Configuration();
    conf.setLong("fs.local.block.size", charsToOutput / 2);
    // reducing block size to force a split of the tiny file
    conf.set("io.compression.codecs", LzopCodec.class.getName());

    Assume.assumeTrue(CoreTestUtil.okToRunLzoTests(conf));

    FileSystem.getLocal(conf).close(); // remove cached filesystem (if any)
    FileSystem localFs = FileSystem.getLocal(conf);
    localFs.delete(outputDir_, true);
    localFs.mkdirs(outputDir_);

    Job job = new Job(conf);
    TextOutputFormat.setCompressOutput(job, true);
    TextOutputFormat.setOutputCompressorClass(job, LzopCodec.class);
    TextOutputFormat.setOutputPath(job, outputDir_);

    TaskAttemptContext attemptContext =
        new TaskAttemptContext(job.getConfiguration(), new TaskAttemptID("123", 0, false, 1, 2));

    // create some input data
    byte[] expectedMd5 = createTestInput(outputDir_, localFs, attemptContext, charsToOutput);

    if (testWithIndex) {
      Path lzoFile = new Path(outputDir_, lzoFileName_);
      LzoIndex.createIndex(localFs, lzoFile);
    }

    LzoTextInputFormat inputFormat = new LzoTextInputFormat();
    TextInputFormat.setInputPaths(job, outputDir_);

    List<InputSplit> is = inputFormat.getSplits(job);
    // verify we have the right number of lzo chunks
    if (testWithIndex && OUTPUT_BIG == charsToOutput) {
      assertEquals(3, is.size());
    } else {
      assertEquals(1, is.size());
    }

    // let's read it all and calculate the md5 hash
    for (InputSplit inputSplit : is) {
      RecordReader<LongWritable, Text> rr =
          inputFormat.createRecordReader(inputSplit, attemptContext);
      rr.initialize(inputSplit, attemptContext);

      while (rr.nextKeyValue()) {
        Text value = rr.getCurrentValue();

        md5_.update(value.getBytes(), 0, value.getLength());
      }

      rr.close();
    }

    localFs.close();
    assertTrue(Arrays.equals(expectedMd5, md5_.digest()));
  }
Exemplo n.º 6
0
 // 删除文件或文件夹
 public void rmr(String folder) throws IOException {
   Path path = new Path(folder);
   FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
   fs.deleteOnExit(path);
   log.debug("Delete: " + folder);
   fs.close();
 }
Exemplo n.º 7
0
 public void copyFile(String local, String remote) throws IOException {
   FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
   // remote---/用户/用户下的文件或文件夹
   fs.copyFromLocalFile(new Path(local), new Path(remote));
   log.debug("copy from: " + local + " to " + remote);
   fs.close();
 }
Exemplo n.º 8
0
 // 下载文件到本地系统
 public void download(String remote, String local) throws IOException {
   Path path = new Path(remote);
   FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
   fs.copyToLocalFile(path, new Path(local));
   log.debug("download: from" + remote + " to " + local);
   fs.close();
 }
Exemplo n.º 9
0
 @Override
 public void close() throws IOException {
   if (mContext != FileSystemContext.INSTANCE) {
     mContext.close();
   }
   super.close();
 }
Exemplo n.º 10
0
 @SuppressWarnings("deprecation")
 private void testDelegationTokenWithFS(Class fileSystemClass) throws Exception {
   createHttpFSServer();
   Configuration conf = new Configuration();
   conf.set("fs.webhdfs.impl", fileSystemClass.getName());
   conf.set("fs.hdfs.impl.disable.cache", "true");
   URI uri = new URI("webhdfs://" + TestJettyHelper.getJettyURL().toURI().getAuthority());
   FileSystem fs = FileSystem.get(uri, conf);
   Token<?> tokens[] = fs.addDelegationTokens("foo", null);
   fs.close();
   Assert.assertEquals(1, tokens.length);
   fs = FileSystem.get(uri, conf);
   ((DelegationTokenRenewer.Renewable) fs).setDelegationToken(tokens[0]);
   fs.listStatus(new Path("/"));
   fs.close();
 }
Exemplo n.º 11
0
 public void testFsckNonExistent() throws Exception {
   DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8 * 1024);
   MiniDFSCluster cluster = null;
   FileSystem fs = null;
   try {
     Configuration conf = new Configuration();
     conf.setLong("dfs.blockreport.intervalMsec", 10000L);
     cluster = new MiniDFSCluster(conf, 4, true, null);
     fs = cluster.getFileSystem();
     util.createFiles(fs, "/srcdat");
     util.waitReplication(fs, "/srcdat", (short) 3);
     String outStr = runFsck(conf, 0, true, "/non-existent");
     assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
     System.out.println(outStr);
     util.cleanup(fs, "/srcdat");
   } finally {
     if (fs != null) {
       try {
         fs.close();
       } catch (Exception e) {
       }
     }
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
Exemplo n.º 12
0
  /** do fsck */
  public void testFsck() throws Exception {
    DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8 * 1024);
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
      Configuration conf = new Configuration();
      conf.setLong("dfs.blockreport.intervalMsec", 10000L);
      cluster = new MiniDFSCluster(conf, 4, true, null);
      fs = cluster.getFileSystem();
      util.createFiles(fs, "/srcdat");
      util.waitReplication(fs, "/srcdat", (short) 3);
      String outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      System.out.println(outStr);
      if (fs != null) {
        try {
          fs.close();
        } catch (Exception e) {
        }
      }
      cluster.shutdown();

      // restart the cluster; bring up namenode but not the data nodes
      cluster = new MiniDFSCluster(conf, 0, false, null);
      outStr = runFsck(conf, 1, true, "/");
      // expect the result is corrupt
      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
      System.out.println(outStr);

      // bring up data nodes & cleanup cluster
      cluster.startDataNodes(conf, 4, true, null, null);
      cluster.waitActive();
      cluster.waitClusterUp();
      fs = cluster.getFileSystem();
      util.cleanup(fs, "/srcdat");
    } finally {
      if (fs != null) {
        try {
          fs.close();
        } catch (Exception e) {
        }
      }
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
Exemplo n.º 13
0
 public void myShutDown() throws Exception {
   if (fileSys != null) {
     fileSys.close();
   }
   if (dfs != null) {
     dfs.shutdown();
   }
 }
Exemplo n.º 14
0
 @After
 @Override
 public void tearDown() throws Exception {
   if (null != fs) fs.close();
   dfsCluster.shutdown();
   Thread.sleep(2000);
   super.tearDown();
 }
Exemplo n.º 15
0
 public void close() throws IOException {
   if (searchBean != null) {
     searchBean.close();
   }
   if (fs != null) {
     fs.close();
   }
 }
 protected void closeCluster() throws IOException {
   if (null != fs) {
     fs.close();
   }
   if (null != cluster) {
     cluster.shutdown();
   }
 }
Exemplo n.º 17
0
 @Override
 public void closeFileSystem() {
   try {
     fileSystem.close();
   } catch (IOException e) {
   }
   fileSystem = null;
 }
Exemplo n.º 18
0
 // 在根目录下创建文件夹
 public void mkdirs(String folder) throws IOException {
   Path path = new Path(folder);
   FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
   if (!fs.exists(path)) {
     fs.mkdirs(path);
     log.debug("Create: " + folder);
   }
   fs.close();
 }
Exemplo n.º 19
0
 @Override
 public void teardown() {
   super.teardown();
   try {
     fs.close();
   } catch (IOException e) {
     throw new RuntimeException(e);
   }
 }
Exemplo n.º 20
0
 public boolean close() {
   if (fileSystem != null)
     try {
       fileSystem.close();
       return true;
     } catch (IOException e) {
       logger.error(e.getMessage(), e);
     }
   return false;
 }
Exemplo n.º 21
0
 public static void deleteHdfs(String hdfsFile) {
   try {
     Configuration conf = new Configuration();
     FileSystem fs = FileSystem.get(URI.create(hdfsFile), conf);
     fs.deleteOnExit(new Path(hdfsFile));
     fs.close();
   } catch (IOException e) {
     LOG.error("[deleteHdfs]", e);
   }
 }
Exemplo n.º 22
0
  /**
   * Test that appends to files at random offsets.
   *
   * @throws IOException an exception might be thrown
   */
  public void testComplexAppend() throws IOException {
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
    conf.setInt("dfs.heartbeat.interval", 2);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000);
    conf.setInt("dfs.datanode.socket.write.timeout", 30000);
    conf.setInt("dfs.datanode.handler.count", 50);
    conf.setBoolean("dfs.support.append", true);

    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();

    try {
      // create a bunch of test files with random replication factors.
      // Insert them into a linked list.
      //
      for (int i = 0; i < numberOfFiles; i++) {
        short replication = (short) (AppendTestUtil.nextInt(numDatanodes) + 1);
        Path testFile = new Path("/" + i + ".dat");
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, testFile, replication);
        stm.close();
        testFiles.add(testFile);
      }

      // Create threads and make them run workload concurrently.
      workload = new Workload[numThreads];
      for (int i = 0; i < numThreads; i++) {
        workload[i] = new Workload(cluster, i);
        workload[i].start();
      }

      // wait for all transactions to get over
      for (int i = 0; i < numThreads; i++) {
        try {
          System.out.println("Waiting for thread " + i + " to complete...");
          workload[i].join();
          System.out.println("Waiting for thread " + i + " complete.");
        } catch (InterruptedException e) {
          i--; // retry
        }
      }
    } finally {
      fs.close();
      cluster.shutdown();
    }

    // If any of the worker thread failed in their job, indicate that
    // this test failed.
    //
    assertTrue("testComplexAppend Worker encountered exceptions.", globalStatus);
  }
Exemplo n.º 23
0
 /** @see org.apache.commons.vfs2.provider.AbstractFileSystem#close() */
 @Override
 public void close() {
   try {
     if (null != fs) {
       fs.close();
     }
   } catch (final IOException e) {
     throw new RuntimeException("Error closing HDFS client", e);
   }
   super.close();
 }
Exemplo n.º 24
0
 @Override
 protected void doClose() throws ElasticSearchException {
   super.doClose();
   if (closeFileSystem) {
     try {
       fileSystem.close();
     } catch (IOException e) {
       // ignore
     }
   }
   concurrentStreamPool.shutdown();
 }
Exemplo n.º 25
0
 /** Tests if the seek bug exists in FSDataInputStream in LocalFS. */
 @Test
 public void testSeekBugLocalFS() throws IOException {
   Configuration conf = new HdfsConfiguration();
   FileSystem fileSys = FileSystem.getLocal(conf);
   try {
     Path file1 = new Path("build/test/data", "seektest.dat");
     writeFile(fileSys, file1);
     seekReadFile(fileSys, file1);
     cleanupFile(fileSys, file1);
   } finally {
     fileSys.close();
   }
 }
Exemplo n.º 26
0
  /**
   * Test a simple flush on a simple HDFS file.
   *
   * @throws IOException an exception might be thrown
   */
  @Test
  public void testSimpleFlush() throws IOException {
    Configuration conf = new HdfsConfiguration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    try {

      // create a new file.
      Path file1 = new Path("/simpleFlush.dat");
      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
      System.out.println("Created file simpleFlush.dat");

      // write to file
      int mid = AppendTestUtil.FILE_SIZE / 2;
      stm.write(fileContents, 0, mid);
      stm.hflush();
      System.out.println("Wrote and Flushed first part of file.");

      // write the remainder of the file
      stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid);
      System.out.println("Written second part of file");
      stm.hflush();
      stm.hflush();
      System.out.println("Wrote and Flushed second part of file.");

      // verify that full blocks are sane
      checkFile(fs, file1, 1);

      stm.close();
      System.out.println("Closed file.");

      // verify that entire file is good
      AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");

    } catch (IOException e) {
      System.out.println("Exception :" + e);
      throw e;
    } catch (Throwable e) {
      System.out.println("Throwable :" + e);
      e.printStackTrace();
      throw new IOException("Throwable : " + e);
    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
Exemplo n.º 27
0
  @Override
  public synchronized void stop() {
    try {
      accepted.close().awaitUninterruptibly(10, TimeUnit.SECONDS);
      ServerBootstrap bootstrap = new ServerBootstrap(selector);
      bootstrap.releaseExternalResources();
      pipelineFact.destroy();

      localFS.close();
    } catch (Throwable t) {
      LOG.error(t);
    } finally {
      super.stop();
    }
  }
Exemplo n.º 28
0
  public static void main(String[] args) throws IOException {
    Path f = new Path(args[0]);
    System.out.println("javaaction test testjava3" + args[0]);
    Configuration conf = new Configuration();
    FileSystem hdfs = null;
    try {
      hdfs = FileSystem.get(conf);
      hdfs.deleteOnExit(f);
    } catch (IOException e) {

      e.printStackTrace();
      System.exit(1);
    } finally {
      if (null != hdfs) hdfs.close();
    }
  }
Exemplo n.º 29
0
 /**
  * FileNotFoundException is expected for appending to a non-exisiting file
  *
  * @throws FileNotFoundException as the result
  */
 @Test(expected = FileNotFoundException.class)
 public void testFileNotFound() throws IOException {
   Configuration conf = new HdfsConfiguration();
   if (simulatedStorage) {
     conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
   }
   MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   FileSystem fs = cluster.getFileSystem();
   try {
     Path file1 = new Path("/nonexistingfile.dat");
     fs.append(file1);
   } finally {
     fs.close();
     cluster.shutdown();
   }
 }
Exemplo n.º 30
0
 /** Test if the seek bug exists in FSDataInputStream in DFS. */
 @Test
 public void testSeekBugDFS() throws IOException {
   Configuration conf = new HdfsConfiguration();
   MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   FileSystem fileSys = cluster.getFileSystem();
   try {
     Path file1 = new Path("seektest.dat");
     writeFile(fileSys, file1);
     seekReadFile(fileSys, file1);
     smallReadSeek(fileSys, file1);
     cleanupFile(fileSys, file1);
   } finally {
     fileSys.close();
     cluster.shutdown();
   }
 }