@Override public void write(SnapshotRecord... records) throws IOException { for (SnapshotRecord rec : records) { writer.snapshotEntry(rec); bytes += rec.getSize(); } }
private void importOnMember(File snapshot, SnapshotFormat format, SnapshotOptions<K, V> options) throws IOException, ClassNotFoundException { final LocalRegion local = getLocalRegion(region); if (getLoggerI18n().infoEnabled()) getLoggerI18n().info(LocalizedStrings.Snapshot_IMPORT_BEGIN_0, region.getName()); long count = 0; long bytes = 0; long start = CachePerfStats.getStatTime(); // Would be interesting to use a PriorityQueue ordered on isDone() // but this is probably close enough in practice. LinkedList<Future<?>> puts = new LinkedList<Future<?>>(); GFSnapshotImporter in = new GFSnapshotImporter(snapshot); try { int bufferSize = 0; Map<K, V> buffer = new HashMap<K, V>(); SnapshotRecord record; while ((record = in.readSnapshotRecord()) != null) { bytes += record.getSize(); K key = record.getKeyObject(); // Until we modify the semantics of put/putAll to allow null values we // have to subvert the API by using Token.INVALID. Alternatively we could // invoke create/invalidate directly but that prevents us from using // bulk operations. The ugly type coercion below is necessary to allow // strong typing elsewhere. V val = (V) Token.INVALID; if (record.hasValue()) { byte[] data = record.getValue(); // If the underlying object is a byte[], we can't wrap it in a // CachedDeserializable. Somewhere along the line the header bytes // get lost and we start seeing serialization problems. if (data.length > 0 && data[0] == DSCODE.BYTE_ARRAY) { // It would be faster to use System.arraycopy() directly but since // length field is variable it's probably safest and simplest to // keep the logic in the InternalDataSerializer. val = record.getValueObject(); } else { val = (V) CachedDeserializableFactory.create(record.getValue()); } } if (includeEntry(options, key, val)) { buffer.put(key, val); bufferSize += record.getSize(); count++; // Push entries into cache using putAll on a separate thread so we // can keep the disk busy. Throttle puts so we don't overwhelm the cache. if (bufferSize > BUFFER_SIZE) { if (puts.size() == IMPORT_CONCURRENCY) { puts.removeFirst().get(); } final Map<K, V> copy = new HashMap<K, V>(buffer); Future<?> f = GemFireCacheImpl.getExisting("Importing region from snapshot") .getDistributionManager() .getWaitingThreadPool() .submit( new Runnable() { @Override public void run() { local.basicImportPutAll(copy, true); } }); puts.addLast(f); buffer.clear(); bufferSize = 0; } } } // send off any remaining entries if (!buffer.isEmpty()) { local.basicImportPutAll(buffer, true); } // wait for completion and check for errors while (!puts.isEmpty()) { puts.removeFirst().get(); } if (getLoggerI18n().infoEnabled()) { getLoggerI18n() .info( LocalizedStrings.Snapshot_IMPORT_END_0_1_2_3, new Object[] {count, bytes, region.getName(), snapshot}); } } catch (InterruptedException e) { while (!puts.isEmpty()) { puts.removeFirst().cancel(true); } Thread.currentThread().interrupt(); throw (IOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { while (!puts.isEmpty()) { puts.removeFirst().cancel(true); } throw new IOException(e); } finally { in.close(); local.getCachePerfStats().endImport(count, start); } }