// Read up to 'len' bytes of Value. Value should already be persisted to // disk. A racing delete can trigger a failure where we get a null return, // but no crash (although one could argue that a racing load&delete is a bug // no matter what). @Override public byte[] load(Value v) { long skip = 0; Key k = v._key; // Convert an arraylet chunk into a long-offset from the base file. if (k._kb[0] == Key.ARRAYLET_CHUNK) { skip = ValueArray.getChunkOffset(k); // The offset k = ValueArray.getArrayKey(k); // From the base file key } if (k._kb[0] == Key.DVEC) { skip = water.fvec.NFSFileVec.chunkOffset(k); // The offset } try { FileInputStream s = null; try { s = new FileInputStream(getFileForKey(k)); FileChannel fc = s.getChannel(); fc.position(skip); AutoBuffer ab = new AutoBuffer(fc, true, Value.NFS); byte[] b = ab.getA1(v._max); ab.close(); assert v.isPersisted(); return b; } finally { if (s != null) s.close(); } } catch (IOException e) { // Broken disk / short-file??? H2O.ignore(e); return null; } }
public static Compression guessCompressionMethod(byte [] bits){ AutoBuffer ab = new AutoBuffer(bits); // Look for ZIP magic if( bits.length > ZipFile.LOCHDR && ab.get4(0) == ZipFile.LOCSIG ) return Compression.ZIP; if( bits.length > 2 && ab.get2(0) == GZIPInputStream.GZIP_MAGIC ) return Compression.GZIP; return Compression.NONE; }
@Override public IcedHashMap<K,V> read(AutoBuffer bb) { int n = bb.get4(); for(int i = 0; i < n; ++i) put(bb.<K>get(),bb.<V>get()); return this; }
@Override public AutoBuffer write(AutoBuffer bb) { bb.put4(size()); for(Map.Entry<K, V> e:entrySet())bb.put(e.getKey()).put(e.getValue()); return bb; }
@Override public IcedArrayList<T> read(AutoBuffer bb) { int n = bb.get4(); for(int i = 0; i < n; ++i) add(bb.<T>get()); return this; }
@Override public AutoBuffer write(AutoBuffer bb) { bb.put4(size()); for(T t:this) bb.put(t); return bb; }