Example #1
0
 /**
  * Stores the stream to its chunk using the atomic union. After the data from the stream is
  * stored, its memory is freed up.
  */
 public void store() {
   assert _ab.eof();
   Key k = ValueArray.getChunkKey(_chunkIndex, _resultKey);
   AtomicUnion u = new AtomicUnion(_ab.bufClose(), _chunkOffset);
   alsoBlockFor(u.fork(k));
   _ab = null; // free mem
 }
Example #2
0
 // User-Weak-Get a Key from the distributed cloud.
 // Right now, just gets chunk#0 from a ValueArray, or a normal Value otherwise.
 public static Value get(Key key) {
   Value val = DKV.get(key);
   if (val != null && val._isArray != 0) {
     Key k2 = ValueArray.getChunkKey(0, key);
     Value vchunk0 = DKV.get(k2);
     assert vchunk0 != null : "missed looking for key " + k2 + " from " + key;
     return vchunk0; // Else just get the prefix asked for
   }
   return val;
 }
Example #3
0
 // Recursively remove, gathering all the pending remote key-deletes
 private static void remove(Key key, Futures fs) {
   Value val = DKV.get(key, 32); // Get the existing Value, if any
   if (val == null) return; // Trivial delete
   if (val._isArray != 0) { // See if this is an Array
     ValueArray ary = ValueArray.value(val);
     for (long i = 0; i < ary.chunks(); i++) // Delete all the chunks
     remove(ary.getChunkKey(i), fs);
   }
   if (key._kb[0] == Key.KEY_OF_KEYS) // Key-of-keys?
   for (Key k : val.flatten()) // Then recursively delete
     remove(k, fs);
   DKV.remove(key, fs);
 }
Example #4
0
 public static void put(Key key, Value val, Futures fs) {
   Value res = DKV.put(key, val, fs);
   // If the old Value was a large array, we need to delete the leftover
   // chunks - they are unrelated to the new Value which might be either
   // bigger or smaller than the old Value.
   if (res != null && res._isArray != 0) {
     ValueArray ary = ValueArray.value(res);
     for (long i = 0; i < ary.chunks(); i++) // Delete all the chunks
     DKV.remove(ary.getChunkKey(i), fs);
   }
   if (key._kb[0] == Key.KEY_OF_KEYS) // Key-of-keys?
   for (Key k : res.flatten()) // Then recursively delete
     remove(k, fs);
   if (res != null) res.freeMem();
 }
Example #5
0
  public static String store2Hdfs(Key srcKey) {
    assert srcKey._kb[0] != Key.ARRAYLET_CHUNK;
    assert PersistHdfs.getPathForKey(srcKey) != null; // Validate key name
    Value v = DKV.get(srcKey);
    if (v == null) return "Key " + srcKey + " not found";
    if (v._isArray == 0) { // Simple chunk?
      v.setHdfs(); // Set to HDFS and be done
      return null; // Success
    }

    // For ValueArrays, make the .hex header
    ValueArray ary = ValueArray.value(v);
    String err = PersistHdfs.freeze(srcKey, ary);
    if (err != null) return err;

    // The task managing which chunks to write next,
    // store in a known key
    TaskStore2HDFS ts = new TaskStore2HDFS(srcKey);
    Key selfKey = ts.selfKey();
    UKV.put(selfKey, ts);

    // Then start writing chunks in-order with the zero chunk
    H2ONode chk0_home = ValueArray.getChunkKey(0, srcKey).home_node();
    RPC.call(ts.chunkHome(), ts);

    // Watch the progress key until it gets removed or an error appears
    long idx = 0;
    while (UKV.get(selfKey, ts) != null) {
      if (ts._indexFrom != idx) {
        System.out.print(" " + idx + "/" + ary.chunks());
        idx = ts._indexFrom;
      }
      if (ts._err != null) { // Found an error?
        UKV.remove(selfKey); // Cleanup & report
        return ts._err;
      }
      try {
        Thread.sleep(100);
      } catch (InterruptedException e) {
      }
    }
    System.out.println(" " + ary.chunks() + "/" + ary.chunks());

    // PersistHdfs.refreshHDFSKeys();
    return null;
  }
Example #6
0
  @Override
  public void compute() {
    String path = null; // getPathFromValue(val);
    ValueArray ary = ValueArray.value(_arykey);
    Key self = selfKey();

    while (_indexFrom < ary.chunks()) {
      Key ckey = ary.getChunkKey(_indexFrom++);
      if (!ckey.home()) { // Next chunk not At Home?
        RPC.call(chunkHome(), this); // Hand the baton off to the next node/chunk
        return;
      }
      Value val = DKV.get(ckey); // It IS home, so get the data
      _err = PersistHdfs.appendChunk(_arykey, val);
      if (_err != null) return;
      UKV.put(self, this); // Update the progress/self key
    }
    // We did the last chunk.  Removing the selfKey is the signal to the web
    // thread that All Done.
    UKV.remove(self);
  }
Example #7
0
 private void updateClusters(
     int[] clusters, int count, long chunk, long numrows, int rpc, long updatedRow) {
   final int offset = (int) (updatedRow - (rpc * chunk));
   final Key chunkKey = ValueArray.getChunkKey(chunk, _job.dest());
   final int[] message;
   if (count == clusters.length) message = clusters;
   else {
     message = new int[count];
     System.arraycopy(clusters, 0, message, 0, message.length);
   }
   final int rows = ValueArray.rpc(chunk, rpc, numrows);
   new Atomic() {
     @Override
     public Value atomic(Value val) {
       assert val == null || val._key.equals(chunkKey);
       AutoBuffer b = new AutoBuffer(rows * ROW_SIZE);
       if (val != null) b._bb.put(val.memOrLoad());
       for (int i = 0; i < message.length; i++) b.put4((offset + i) * 4, message[i]);
       b.position(b.limit());
       return new Value(chunkKey, b.buf());
     }
   }.invoke(chunkKey);
 }
Example #8
0
 private H2ONode chunkHome() {
   return ValueArray.getChunkKey(_indexFrom, _arykey).home_node();
 }