Exemple #1
0
  public static void clearFolder(File folder) {
    if (folder.exists()) {
      for (File child : folder.listFiles()) {
        if (child.isDirectory())
          clearFolder(child);

        if (!child.delete())
          throw new RuntimeException("Cannot delete " + child);
      }
    }
  }
Exemple #2
0
 static {
   Configuration conf = null;
   if (H2O.OPT_ARGS.hdfs_config != null) {
     conf = new Configuration();
     File p = new File(H2O.OPT_ARGS.hdfs_config);
     if (!p.exists()) Log.die("Unable to open hdfs configuration file " + p.getAbsolutePath());
     conf.addResource(new Path(p.getAbsolutePath()));
     Log.debug(Sys.HDFS_, "resource ", p.getAbsolutePath(), " added to the hadoop configuration");
   } else {
     conf = new Configuration();
     if (!Strings.isNullOrEmpty(H2O.OPT_ARGS.hdfs)) {
       // setup default remote Filesystem - for version 0.21 and higher
       conf.set("fs.defaultFS", H2O.OPT_ARGS.hdfs);
       // To provide compatibility with version 0.20.0 it is necessary to setup the property
       // fs.default.name which was in newer version renamed to 'fs.defaultFS'
       conf.set("fs.default.name", H2O.OPT_ARGS.hdfs);
     }
   }
   CONF = conf;
 }
Exemple #3
0
 private void serveLocalDisk(InputStream csv) throws IOException {
   _local = true;
   OutputStream output = null;
   try {
     File f = new File(path);
     if (!force && f.exists())
       throw new IllegalArgumentException("File " + path + " already exists.");
     output = new FileOutputStream(path.toString());
     byte[] buffer = new byte[1024];
     int len;
     while ((len = csv.read(buffer)) > 0) {
       output.write(buffer, 0, len);
     }
     Log.info(
         "Key '"
             + src_key.toString()
             + "' was written to "
             + (_local && H2O.CLOUD.size() > 1 ? H2O.SELF_ADDRESS + ":" : "")
             + path.toString()
             + ".");
   } finally {
     if (output != null) output.close();
   }
 }