コード例 #1
0
ファイル: FastSerializer.java プロジェクト: hnwyllmm/voltdb
  /** Resizes the internal byte buffer with a simple doubling policy, if needed. */
  private final void growIfNeeded(int minimumDesired) {
    if (buffer.b().remaining() < minimumDesired) {
      // Compute the size of the new buffer
      int newCapacity = buffer.b().capacity();
      int newRemaining = newCapacity - buffer.b().position();
      while (newRemaining < minimumDesired) {
        newRemaining += newCapacity;
        newCapacity *= 2;
      }

      // Allocate and copy
      BBContainer next;
      if (isDirect) {
        next = DBBPool.allocateDirect(newCapacity);
      } else {
        next = DBBPool.wrapBB(ByteBuffer.allocate(newCapacity));
      }
      buffer.b().flip();
      next.b().put(buffer.b());
      assert next.b().remaining() == newRemaining;
      buffer.discard();
      buffer = next;
      if (callback != null) callback.onBufferGrow(this);
      assert (buffer.b().order() == ByteOrder.BIG_ENDIAN);
    }
  }
コード例 #2
0
ファイル: FastSerializer.java プロジェクト: hnwyllmm/voltdb
 /** constructor that sets callback object. */
 public FastSerializer(
     boolean bigEndian, boolean isDirect, BufferGrowCallback callback, int initialAllocation) {
   assert (initialAllocation > 0);
   this.isDirect = isDirect;
   if (isDirect) {
     buffer = DBBPool.allocateDirect(initialAllocation);
   } else {
     buffer = DBBPool.wrapBB(ByteBuffer.allocate(initialAllocation));
   }
   this.callback = callback;
   buffer.b().order(bigEndian ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN);
 }
コード例 #3
0
  public static synchronized void pushDRBuffer(int partitionId, ByteBuffer buf) {
    if (logDebug) {
      System.out.println("Received DR buffer size " + buf.remaining());
      AtomicLong haveOpenTransaction = haveOpenTransactionLocal.get();
      buf.order(ByteOrder.LITTLE_ENDIAN);
      // Magic header space for Java for implementing zero copy stuff
      buf.position(8);
      while (buf.hasRemaining()) {
        int startPosition = buf.position();
        byte version = buf.get();
        int type = buf.get();

        int checksum = 0;
        if (version != 0) System.out.println("Remaining is " + buf.remaining());

        switch (DRRecordType.valueOf(type)) {
          case INSERT:
            {
              // Insert
              if (haveOpenTransaction.get() == -1) {
                System.out.println("Have insert but no open transaction");
                System.exit(-1);
              }
              final long tableHandle = buf.getLong();
              final int lengthPrefix = buf.getInt();
              buf.position(buf.position() + lengthPrefix);
              checksum = buf.getInt();
              System.out.println(
                  "Version "
                      + version
                      + " type INSERT table handle "
                      + tableHandle
                      + " length "
                      + lengthPrefix
                      + " checksum "
                      + checksum);
              break;
            }
          case DELETE:
            {
              // Delete
              if (haveOpenTransaction.get() == -1) {
                System.out.println("Have insert but no open transaction");
                System.exit(-1);
              }
              final long tableHandle = buf.getLong();
              final int lengthPrefix = buf.getInt();
              buf.position(buf.position() + lengthPrefix);
              checksum = buf.getInt();
              System.out.println(
                  "Version "
                      + version
                      + " type DELETE table handle "
                      + tableHandle
                      + " length "
                      + lengthPrefix
                      + " checksum "
                      + checksum);
              break;
            }
          case UPDATE:
            // Update
            // System.out.println("Version " + version + " type UPDATE " + checksum " + checksum);
            break;
          case BEGIN_TXN:
            {
              // Begin txn
              final long txnId = buf.getLong();
              final long spHandle = buf.getLong();
              if (haveOpenTransaction.get() != -1) {
                System.out.println(
                    "Have open transaction txnid "
                        + txnId
                        + " spHandle "
                        + spHandle
                        + " but already open transaction");
                System.exit(-1);
              }
              haveOpenTransaction.set(spHandle);
              checksum = buf.getInt();
              System.out.println(
                  "Version "
                      + version
                      + " type BEGIN_TXN "
                      + " txnid "
                      + txnId
                      + " spHandle "
                      + spHandle
                      + " checksum "
                      + checksum);
              break;
            }
          case END_TXN:
            {
              // End txn
              final long spHandle = buf.getLong();
              if (haveOpenTransaction.get() == -1) {
                System.out.println(
                    "Have end transaction spHandle "
                        + spHandle
                        + " but no open transaction and its less then last committed "
                        + lastCommittedSpHandle.get().get());
                //                    checksum = buf.getInt();
                //                    break;
                System.exit(-1);
              }
              haveOpenTransaction.set(-1);
              lastCommittedSpHandle.get().set(spHandle);
              checksum = buf.getInt();
              System.out.println(
                  "Version "
                      + version
                      + " type END_TXN "
                      + " spHandle "
                      + spHandle
                      + " checksum "
                      + checksum);
              break;
            }
        }
        int calculatedChecksum =
            DBBPool.getBufferCRC32C(buf, startPosition, buf.position() - startPosition - 4);
        if (calculatedChecksum != checksum) {
          System.out.println("Checksum " + calculatedChecksum + " didn't match " + checksum);
          System.exit(-1);
        }
      }
    }
    final BBContainer cont = DBBPool.wrapBB(buf);
    DBBPool.registerUnsafeMemory(cont.address());
    cont.discard();
  }
コード例 #4
0
  public DefaultSnapshotDataTarget(
      final File file,
      final int hostId,
      final String clusterName,
      final String databaseName,
      final String tableName,
      final int numPartitions,
      final boolean isReplicated,
      final List<Integer> partitionIds,
      final VoltTable schemaTable,
      final long txnId,
      final long timestamp,
      int version[])
      throws IOException {
    String hostname = CoreUtils.getHostnameOrAddress();
    m_file = file;
    m_tableName = tableName;
    m_fos = new FileOutputStream(file);
    m_channel = m_fos.getChannel();
    m_needsFinalClose = !isReplicated;
    final FastSerializer fs = new FastSerializer();
    fs.writeInt(0); // CRC
    fs.writeInt(0); // Header length placeholder
    fs.writeByte(
        1); // Indicate the snapshot was not completed, set to true for the CRC calculation, false
    // later
    for (int ii = 0; ii < 4; ii++) {
      fs.writeInt(version[ii]); // version
    }
    JSONStringer stringer = new JSONStringer();
    byte jsonBytes[] = null;
    try {
      stringer.object();
      stringer.key("txnId").value(txnId);
      stringer.key("hostId").value(hostId);
      stringer.key("hostname").value(hostname);
      stringer.key("clusterName").value(clusterName);
      stringer.key("databaseName").value(databaseName);
      stringer.key("tableName").value(tableName.toUpperCase());
      stringer.key("isReplicated").value(isReplicated);
      stringer.key("isCompressed").value(true);
      stringer.key("checksumType").value("CRC32C");
      stringer.key("timestamp").value(timestamp);
      /*
       * The timestamp string is for human consumption, automated stuff should use
       * the actual timestamp
       */
      stringer.key("timestampString").value(SnapshotUtil.formatHumanReadableDate(timestamp));
      if (!isReplicated) {
        stringer.key("partitionIds").array();
        for (int partitionId : partitionIds) {
          stringer.value(partitionId);
        }
        stringer.endArray();

        stringer.key("numPartitions").value(numPartitions);
      }
      stringer.endObject();
      String jsonString = stringer.toString();
      JSONObject jsonObj = new JSONObject(jsonString);
      jsonString = jsonObj.toString(4);
      jsonBytes = jsonString.getBytes("UTF-8");
    } catch (Exception e) {
      throw new IOException(e);
    }
    fs.writeInt(jsonBytes.length);
    fs.write(jsonBytes);

    final BBContainer container = fs.getBBContainer();
    container.b.position(4);
    container.b.putInt(container.b.remaining() - 4);
    container.b.position(0);

    final byte schemaBytes[] = PrivateVoltTableFactory.getSchemaBytes(schemaTable);

    final PureJavaCrc32 crc = new PureJavaCrc32();
    ByteBuffer aggregateBuffer = ByteBuffer.allocate(container.b.remaining() + schemaBytes.length);
    aggregateBuffer.put(container.b);
    aggregateBuffer.put(schemaBytes);
    aggregateBuffer.flip();
    crc.update(aggregateBuffer.array(), 4, aggregateBuffer.capacity() - 4);

    final int crcValue = (int) crc.getValue();
    aggregateBuffer.putInt(crcValue).position(8);
    aggregateBuffer.put((byte) 0).position(0); // Haven't actually finished writing file

    if (m_simulateFullDiskWritingHeader) {
      m_writeException = new IOException("Disk full");
      m_writeFailed = true;
      m_fos.close();
      throw m_writeException;
    }

    /*
     * Be completely sure the write succeeded. If it didn't
     * the disk is probably full or the path is bunk etc.
     */
    m_acceptOneWrite = true;
    ListenableFuture<?> writeFuture =
        write(Callables.returning((BBContainer) DBBPool.wrapBB(aggregateBuffer)), false);
    try {
      writeFuture.get();
    } catch (InterruptedException e) {
      m_fos.close();
      throw new java.io.InterruptedIOException();
    } catch (ExecutionException e) {
      m_fos.close();
      throw m_writeException;
    }
    if (m_writeFailed) {
      m_fos.close();
      throw m_writeException;
    }

    ScheduledFuture<?> syncTask = null;
    syncTask =
        m_syncService.scheduleAtFixedRate(
            new Runnable() {
              @Override
              public void run() {
                // Only sync for at least 4 megabyte of data, enough to amortize the cost of seeking
                // on ye olden platters. Since we are appending to a file it's actually 2 seeks.
                while (m_bytesWrittenSinceLastSync.get() > (1024 * 1024 * 4)) {
                  final int bytesSinceLastSync = m_bytesWrittenSinceLastSync.getAndSet(0);
                  try {
                    m_channel.force(false);
                  } catch (IOException e) {
                    if (!(e instanceof java.nio.channels.AsynchronousCloseException)) {
                      SNAP_LOG.error("Error syncing snapshot", e);
                    } else {
                      SNAP_LOG.debug(
                          "Asynchronous close syncing snasphot data, presumably graceful", e);
                    }
                  }
                  m_bytesAllowedBeforeSync.release(bytesSinceLastSync);
                }
              }
            },
            SNAPSHOT_SYNC_FREQUENCY,
            SNAPSHOT_SYNC_FREQUENCY,
            TimeUnit.MILLISECONDS);
    m_syncTask = syncTask;
  }