Ejemplo n.º 1
0
  public static void clearFolder(File folder) {
    if (folder.exists()) {
      for (File child : folder.listFiles()) {
        if (child.isDirectory())
          clearFolder(child);

        if (!child.delete())
          throw new RuntimeException("Cannot delete " + child);
      }
    }
  }
Ejemplo n.º 2
0
 /**
  * Returns the system temporary folder, e.g. /tmp
  */
 public static File tmp() {
   try {
     return File.createTempFile("h2o", null).getParentFile();
   } catch( IOException e ) {
     throw new RuntimeException(e);
   }
 }
Ejemplo n.º 3
0
 public static File writeFile(String content) {
   try {
     return writeFile(File.createTempFile("h2o", null), content);
   } catch( IOException e ) {
     throw Log.errRTExcept(e);
   }
 }
Ejemplo n.º 4
0
 static {
   Configuration conf = null;
   if (H2O.OPT_ARGS.hdfs_config != null) {
     conf = new Configuration();
     File p = new File(H2O.OPT_ARGS.hdfs_config);
     if (!p.exists()) Log.die("Unable to open hdfs configuration file " + p.getAbsolutePath());
     conf.addResource(new Path(p.getAbsolutePath()));
     Log.debug(Sys.HDFS_, "resource ", p.getAbsolutePath(), " added to the hadoop configuration");
   } else {
     conf = new Configuration();
     if (!Strings.isNullOrEmpty(H2O.OPT_ARGS.hdfs)) {
       // setup default remote Filesystem - for version 0.21 and higher
       conf.set("fs.defaultFS", H2O.OPT_ARGS.hdfs);
       // To provide compatibility with version 0.20.0 it is necessary to setup the property
       // fs.default.name which was in newer version renamed to 'fs.defaultFS'
       conf.set("fs.default.name", H2O.OPT_ARGS.hdfs);
     }
   }
   CONF = conf;
 }
Ejemplo n.º 5
0
 public static String readFile(File file) {
   FileReader r = null;
   try {
     r = new FileReader(file);
     char[] data = new char[(int) file.length()];
     r.read(data);
     return new String(data);
   } catch(IOException e) {
     throw Log.errRTExcept(e);
   } finally {
     close(r);
   }
 }
Ejemplo n.º 6
0
 // Store Value v to disk.
 @Override
 public void store(Value v) {
   // Only the home node does persistence on NFS
   if (!v._key.home()) return;
   // A perhaps useless cutout: the upper layers should test this first.
   if (v.isPersisted()) return;
   // Never store arraylets on NFS, instead we'll store the entire array.
   assert !v.isArray();
   try {
     File f = getFileForKey(v._key);
     f.mkdirs();
     FileOutputStream s = new FileOutputStream(f);
     try {
       byte[] m = v.memOrLoad();
       assert (m == null || m.length == v._max); // Assert not saving partial files
       if (m != null) new AutoBuffer(s.getChannel(), false, Value.NFS).putA1(m, m.length).close();
       v.setdsk(); // Set as write-complete to disk
     } finally {
       s.close();
     }
   } catch (IOException e) {
     H2O.ignore(e);
   }
 }
Ejemplo n.º 7
0
 private void serveLocalDisk(InputStream csv) throws IOException {
   _local = true;
   OutputStream output = null;
   try {
     File f = new File(path);
     if (!force && f.exists())
       throw new IllegalArgumentException("File " + path + " already exists.");
     output = new FileOutputStream(path.toString());
     byte[] buffer = new byte[1024];
     int len;
     while ((len = csv.read(buffer)) > 0) {
       output.write(buffer, 0, len);
     }
     Log.info(
         "Key '"
             + src_key.toString()
             + "' was written to "
             + (_local && H2O.CLOUD.size() > 1 ? H2O.SELF_ADDRESS + ":" : "")
             + path.toString()
             + ".");
   } finally {
     if (output != null) output.close();
   }
 }
Ejemplo n.º 8
0
 // file implementation -------------------------------------------------------
 public static Key decodeFile(File f) {
   String kname = KEY_PREFIX + File.separator + f.toString();
   assert (kname.length() <= 512);
   // all NFS keys are NFS-kind keys
   return Key.make(kname.getBytes());
 }