/** Create a native VoltDB execution engine */ ExecutionEngine initializeEE(String serializedCatalog, final long timestamp) { String hostname = CoreUtils.getHostnameOrAddress(); ExecutionEngine eeTemp = null; try { if (m_backend == BackendTarget.NATIVE_EE_JNI) { eeTemp = new ExecutionEngineJNI( m_context.cluster.getRelativeIndex(), m_siteId, m_partitionId, CoreUtils.getHostIdFromHSId(m_siteId), hostname, m_context .cluster .getDeployment() .get("deployment") .getSystemsettings() .get("systemsettings") .getMaxtemptablesize(), m_numberOfPartitions); eeTemp.loadCatalog(timestamp, serializedCatalog); } else { // set up the EE over IPC eeTemp = new ExecutionEngineIPC( m_context.cluster.getRelativeIndex(), m_siteId, m_partitionId, CoreUtils.getHostIdFromHSId(m_siteId), hostname, m_context .cluster .getDeployment() .get("deployment") .getSystemsettings() .get("systemsettings") .getMaxtemptablesize(), m_backend, VoltDB.instance().getConfig().m_ipcPorts.remove(0), m_numberOfPartitions); eeTemp.loadCatalog(timestamp, serializedCatalog); } } // just print error info an bail if we run into an error here catch (final Exception ex) { hostLog.l7dlog( Level.FATAL, LogKeys.host_ExecutionSite_FailedConstruction.name(), new Object[] {m_siteId, m_siteIndex}, ex); VoltDB.crashLocalVoltDB(ex.getMessage(), true, ex); } return eeTemp; }
@Override public Callable<Boolean> createSetup( String file_path, String file_nonce, long txnId, Map<Integer, Long> partitionTransactionIds, JSONObject jsData, SystemProcedureExecutionContext context, VoltTable result, Map<String, Map<Integer, Pair<Long, Long>>> exportSequenceNumbers, SiteTracker tracker, HashinatorSnapshotData hashinatorData, long timestamp) { assert SnapshotSiteProcessor.ExecutionSitesCurrentlySnapshotting.isEmpty(); final IndexSnapshotRequestConfig config = new IndexSnapshotRequestConfig(jsData, context.getDatabase()); final Map<Integer, Long> pidToLocalHSIds = findLocalSources(config.partitionRanges, tracker); // mark snapshot start in registry final AtomicInteger numTables = new AtomicInteger(config.tables.length); m_snapshotRecord = SnapshotRegistry.startSnapshot( txnId, context.getHostId(), file_path, file_nonce, SnapshotFormat.INDEX, config.tables); // create table tasks for (Table table : config.tables) { createTasksForTable( table, config.partitionRanges, pidToLocalHSIds, numTables, m_snapshotRecord); result.addRow( context.getHostId(), CoreUtils.getHostnameOrAddress(), table.getTypeName(), "SUCCESS", ""); } return null; }
public DefaultSnapshotDataTarget( final File file, final int hostId, final String clusterName, final String databaseName, final String tableName, final int numPartitions, final boolean isReplicated, final List<Integer> partitionIds, final VoltTable schemaTable, final long txnId, final long timestamp, int version[]) throws IOException { String hostname = CoreUtils.getHostnameOrAddress(); m_file = file; m_tableName = tableName; m_fos = new FileOutputStream(file); m_channel = m_fos.getChannel(); m_needsFinalClose = !isReplicated; final FastSerializer fs = new FastSerializer(); fs.writeInt(0); // CRC fs.writeInt(0); // Header length placeholder fs.writeByte( 1); // Indicate the snapshot was not completed, set to true for the CRC calculation, false // later for (int ii = 0; ii < 4; ii++) { fs.writeInt(version[ii]); // version } JSONStringer stringer = new JSONStringer(); byte jsonBytes[] = null; try { stringer.object(); stringer.key("txnId").value(txnId); stringer.key("hostId").value(hostId); stringer.key("hostname").value(hostname); stringer.key("clusterName").value(clusterName); stringer.key("databaseName").value(databaseName); stringer.key("tableName").value(tableName.toUpperCase()); stringer.key("isReplicated").value(isReplicated); stringer.key("isCompressed").value(true); stringer.key("checksumType").value("CRC32C"); stringer.key("timestamp").value(timestamp); /* * The timestamp string is for human consumption, automated stuff should use * the actual timestamp */ stringer.key("timestampString").value(SnapshotUtil.formatHumanReadableDate(timestamp)); if (!isReplicated) { stringer.key("partitionIds").array(); for (int partitionId : partitionIds) { stringer.value(partitionId); } stringer.endArray(); stringer.key("numPartitions").value(numPartitions); } stringer.endObject(); String jsonString = stringer.toString(); JSONObject jsonObj = new JSONObject(jsonString); jsonString = jsonObj.toString(4); jsonBytes = jsonString.getBytes("UTF-8"); } catch (Exception e) { throw new IOException(e); } fs.writeInt(jsonBytes.length); fs.write(jsonBytes); final BBContainer container = fs.getBBContainer(); container.b.position(4); container.b.putInt(container.b.remaining() - 4); container.b.position(0); final byte schemaBytes[] = PrivateVoltTableFactory.getSchemaBytes(schemaTable); final PureJavaCrc32 crc = new PureJavaCrc32(); ByteBuffer aggregateBuffer = ByteBuffer.allocate(container.b.remaining() + schemaBytes.length); aggregateBuffer.put(container.b); aggregateBuffer.put(schemaBytes); aggregateBuffer.flip(); crc.update(aggregateBuffer.array(), 4, aggregateBuffer.capacity() - 4); final int crcValue = (int) crc.getValue(); aggregateBuffer.putInt(crcValue).position(8); aggregateBuffer.put((byte) 0).position(0); // Haven't actually finished writing file if (m_simulateFullDiskWritingHeader) { m_writeException = new IOException("Disk full"); m_writeFailed = true; m_fos.close(); throw m_writeException; } /* * Be completely sure the write succeeded. If it didn't * the disk is probably full or the path is bunk etc. */ m_acceptOneWrite = true; ListenableFuture<?> writeFuture = write(Callables.returning((BBContainer) DBBPool.wrapBB(aggregateBuffer)), false); try { writeFuture.get(); } catch (InterruptedException e) { m_fos.close(); throw new java.io.InterruptedIOException(); } catch (ExecutionException e) { m_fos.close(); throw m_writeException; } if (m_writeFailed) { m_fos.close(); throw m_writeException; } ScheduledFuture<?> syncTask = null; syncTask = m_syncService.scheduleAtFixedRate( new Runnable() { @Override public void run() { // Only sync for at least 4 megabyte of data, enough to amortize the cost of seeking // on ye olden platters. Since we are appending to a file it's actually 2 seeks. while (m_bytesWrittenSinceLastSync.get() > (1024 * 1024 * 4)) { final int bytesSinceLastSync = m_bytesWrittenSinceLastSync.getAndSet(0); try { m_channel.force(false); } catch (IOException e) { if (!(e instanceof java.nio.channels.AsynchronousCloseException)) { SNAP_LOG.error("Error syncing snapshot", e); } else { SNAP_LOG.debug( "Asynchronous close syncing snasphot data, presumably graceful", e); } } m_bytesAllowedBeforeSync.release(bytesSinceLastSync); } } }, SNAPSHOT_SYNC_FREQUENCY, SNAPSHOT_SYNC_FREQUENCY, TimeUnit.MILLISECONDS); m_syncTask = syncTask; }