Пример #1
0
  /** Resizes the internal byte buffer with a simple doubling policy, if needed. */
  private final void growIfNeeded(int minimumDesired) {
    if (buffer.b().remaining() < minimumDesired) {
      // Compute the size of the new buffer
      int newCapacity = buffer.b().capacity();
      int newRemaining = newCapacity - buffer.b().position();
      while (newRemaining < minimumDesired) {
        newRemaining += newCapacity;
        newCapacity *= 2;
      }

      // Allocate and copy
      BBContainer next;
      if (isDirect) {
        next = DBBPool.allocateDirect(newCapacity);
      } else {
        next = DBBPool.wrapBB(ByteBuffer.allocate(newCapacity));
      }
      buffer.b().flip();
      next.b().put(buffer.b());
      assert next.b().remaining() == newRemaining;
      buffer.discard();
      buffer = next;
      if (callback != null) callback.onBufferGrow(this);
      assert (buffer.b().order() == ByteOrder.BIG_ENDIAN);
    }
  }
Пример #2
0
    private void convertChunks() throws IOException, InterruptedException {
      int lastNumCharacters = 1024 * 64;
      while (!Thread.interrupted() && m_saveFile.hasMoreChunks()) {
        if (m_availableBytes.get() > m_maxAvailableBytes) {
          Thread.sleep(5);
          continue;
        }

        BBContainer c = m_saveFile.getNextChunk();
        if (c == null) {
          return;
        }

        try {
          final VoltTable vt = PrivateVoltTableFactory.createVoltTableFromBuffer(c.b(), true);
          Pair<Integer, byte[]> p = VoltTableUtil.toCSV(vt, m_delimiter, null, lastNumCharacters);
          lastNumCharacters = p.getFirst();
          byte csvBytes[] = p.getSecond();
          // should not insert empty byte[] if not last ConverterThread
          if (csvBytes.length > 0) {
            m_availableBytes.addAndGet(csvBytes.length);
            m_available.offer(csvBytes);
          }
        } finally {
          c.discard();
        }
      }
    }
Пример #3
0
 /**
  * Get a ascii-string-safe version of the binary value using a hex encoding.
  *
  * @return A hex-encoded string value representing the serialized objects.
  */
 public String getHexEncodedBytes() {
   buffer.b().flip();
   byte bytes[] = new byte[buffer.b().remaining()];
   buffer.b().get(bytes);
   String hex = Encoder.hexEncode(bytes);
   buffer.discard();
   return hex;
 }
Пример #4
0
 @Override
 public void discard() {
   if (m_hasMoreChunks == false) {
     m_origin.discard();
   } else {
     m_buffers.add(this);
   }
 }
Пример #5
0
 @Override
 public ListenableFuture<?> write(Callable<BBContainer> tupleData, int tableId) {
   try {
     BBContainer container = tupleData.call();
     if (container != null) {
       container.discard();
     }
   } catch (Exception e) {
   }
   return null;
 }
Пример #6
0
  public void close() throws IOException {
    if (m_chunkReaderThread != null) {
      m_chunkReaderThread.interrupt();
      try {
        m_chunkReaderThread.join();
      } catch (InterruptedException e) {
        throw new IOException(e);
      }
    }
    synchronized (this) {
      while (!m_availableChunks.isEmpty()) {
        m_availableChunks.poll().discard();
      }
      notifyAll();
    }

    /*
     * Free buffers used to pull snapshot data in process
     */
    BBContainer cont;
    while ((cont = m_buffers.poll()) != null) {
      cont.discard();
    }
  }
Пример #7
0
  public static synchronized void pushDRBuffer(int partitionId, ByteBuffer buf) {
    if (logDebug) {
      System.out.println("Received DR buffer size " + buf.remaining());
      AtomicLong haveOpenTransaction = haveOpenTransactionLocal.get();
      buf.order(ByteOrder.LITTLE_ENDIAN);
      // Magic header space for Java for implementing zero copy stuff
      buf.position(8);
      while (buf.hasRemaining()) {
        int startPosition = buf.position();
        byte version = buf.get();
        int type = buf.get();

        int checksum = 0;
        if (version != 0) System.out.println("Remaining is " + buf.remaining());

        switch (DRRecordType.valueOf(type)) {
          case INSERT:
            {
              // Insert
              if (haveOpenTransaction.get() == -1) {
                System.out.println("Have insert but no open transaction");
                System.exit(-1);
              }
              final long tableHandle = buf.getLong();
              final int lengthPrefix = buf.getInt();
              buf.position(buf.position() + lengthPrefix);
              checksum = buf.getInt();
              System.out.println(
                  "Version "
                      + version
                      + " type INSERT table handle "
                      + tableHandle
                      + " length "
                      + lengthPrefix
                      + " checksum "
                      + checksum);
              break;
            }
          case DELETE:
            {
              // Delete
              if (haveOpenTransaction.get() == -1) {
                System.out.println("Have insert but no open transaction");
                System.exit(-1);
              }
              final long tableHandle = buf.getLong();
              final int lengthPrefix = buf.getInt();
              buf.position(buf.position() + lengthPrefix);
              checksum = buf.getInt();
              System.out.println(
                  "Version "
                      + version
                      + " type DELETE table handle "
                      + tableHandle
                      + " length "
                      + lengthPrefix
                      + " checksum "
                      + checksum);
              break;
            }
          case UPDATE:
            // Update
            // System.out.println("Version " + version + " type UPDATE " + checksum " + checksum);
            break;
          case BEGIN_TXN:
            {
              // Begin txn
              final long txnId = buf.getLong();
              final long spHandle = buf.getLong();
              if (haveOpenTransaction.get() != -1) {
                System.out.println(
                    "Have open transaction txnid "
                        + txnId
                        + " spHandle "
                        + spHandle
                        + " but already open transaction");
                System.exit(-1);
              }
              haveOpenTransaction.set(spHandle);
              checksum = buf.getInt();
              System.out.println(
                  "Version "
                      + version
                      + " type BEGIN_TXN "
                      + " txnid "
                      + txnId
                      + " spHandle "
                      + spHandle
                      + " checksum "
                      + checksum);
              break;
            }
          case END_TXN:
            {
              // End txn
              final long spHandle = buf.getLong();
              if (haveOpenTransaction.get() == -1) {
                System.out.println(
                    "Have end transaction spHandle "
                        + spHandle
                        + " but no open transaction and its less then last committed "
                        + lastCommittedSpHandle.get().get());
                //                    checksum = buf.getInt();
                //                    break;
                System.exit(-1);
              }
              haveOpenTransaction.set(-1);
              lastCommittedSpHandle.get().set(spHandle);
              checksum = buf.getInt();
              System.out.println(
                  "Version "
                      + version
                      + " type END_TXN "
                      + " spHandle "
                      + spHandle
                      + " checksum "
                      + checksum);
              break;
            }
        }
        int calculatedChecksum =
            DBBPool.getBufferCRC32C(buf, startPosition, buf.position() - startPosition - 4);
        if (calculatedChecksum != checksum) {
          System.out.println("Checksum " + calculatedChecksum + " didn't match " + checksum);
          System.exit(-1);
        }
      }
    }
    final BBContainer cont = DBBPool.wrapBB(buf);
    DBBPool.registerUnsafeMemory(cont.address());
    cont.discard();
  }
Пример #8
0
 public void discard() {
   buffer.discard();
 }
  /*
   * Prepend length is basically synonymous with writing actual tuple data and not
   * the header.
   */
  private ListenableFuture<?> write(
      final Callable<BBContainer> tupleDataC, final boolean prependLength) {
    /*
     * Unwrap the data to be written. For the traditional
     * snapshot data target this should be a noop.
     */
    BBContainer tupleDataTemp;
    try {
      tupleDataTemp = tupleDataC.call();
      /*
       * Can be null if the dedupe filter nulled out the buffer
       */
      if (tupleDataTemp == null) {
        return Futures.immediateFuture(null);
      }
    } catch (Throwable t) {
      return Futures.immediateFailedFuture(t);
    }
    final BBContainer tupleData = tupleDataTemp;

    if (m_writeFailed) {
      tupleData.discard();
      return null;
    }

    m_outstandingWriteTasks.incrementAndGet();

    Future<BBContainer> compressionTask = null;
    if (prependLength) {
      BBContainer cont =
          DBBPool.allocateDirectAndPool(SnapshotSiteProcessor.m_snapshotBufferCompressedLen);
      // Skip 4-bytes so the partition ID is not compressed
      // That way if we detect a corruption we know what partition is bad
      tupleData.b.position(tupleData.b.position() + 4);
      /*
       * Leave 12 bytes, it's going to be a 4-byte length prefix, a 4-byte partition id,
       * and a 4-byte CRC32C of just the header bytes, in addition to the compressed payload CRC
       * that is 16 bytes, but 4 of those are done by CompressionService
       */
      cont.b.position(12);
      compressionTask = CompressionService.compressAndCRC32cBufferAsync(tupleData.b, cont);
    }
    final Future<BBContainer> compressionTaskFinal = compressionTask;

    ListenableFuture<?> writeTask =
        m_es.submit(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                try {
                  if (m_acceptOneWrite) {
                    m_acceptOneWrite = false;
                  } else {
                    if (m_simulateBlockedWrite != null) {
                      m_simulateBlockedWrite.await();
                    }
                    if (m_simulateFullDiskWritingChunk) {
                      throw new IOException("Disk full");
                    }
                  }

                  int totalWritten = 0;
                  if (prependLength) {
                    BBContainer payloadContainer = compressionTaskFinal.get();
                    try {
                      final ByteBuffer payloadBuffer = payloadContainer.b;
                      payloadBuffer.position(0);

                      ByteBuffer lengthPrefix = ByteBuffer.allocate(12);
                      m_bytesAllowedBeforeSync.acquire(payloadBuffer.remaining());
                      // Length prefix does not include 4 header items, just compressd payload
                      // that follows
                      lengthPrefix.putInt(payloadBuffer.remaining() - 16); // length prefix
                      lengthPrefix.putInt(tupleData.b.getInt(0)); // partitionId

                      /*
                       * Checksum the header and put it in the payload buffer
                       */
                      PureJavaCrc32C crc = new PureJavaCrc32C();
                      crc.update(lengthPrefix.array(), 0, 8);
                      lengthPrefix.putInt((int) crc.getValue());
                      lengthPrefix.flip();
                      payloadBuffer.put(lengthPrefix);
                      payloadBuffer.position(0);

                      /*
                       * Write payload to file
                       */
                      while (payloadBuffer.hasRemaining()) {
                        totalWritten += m_channel.write(payloadBuffer);
                      }
                    } finally {
                      payloadContainer.discard();
                    }
                  } else {
                    while (tupleData.b.hasRemaining()) {
                      totalWritten += m_channel.write(tupleData.b);
                    }
                  }
                  m_bytesWritten += totalWritten;
                  m_bytesWrittenSinceLastSync.addAndGet(totalWritten);
                } catch (IOException e) {
                  m_writeException = e;
                  SNAP_LOG.error(
                      "Error while attempting to write snapshot data to file " + m_file, e);
                  m_writeFailed = true;
                  throw e;
                } finally {
                  try {
                    tupleData.discard();
                  } finally {
                    m_outstandingWriteTasksLock.lock();
                    try {
                      if (m_outstandingWriteTasks.decrementAndGet() == 0) {
                        m_noMoreOutstandingWriteTasksCondition.signalAll();
                      }
                    } finally {
                      m_outstandingWriteTasksLock.unlock();
                    }
                  }
                }
                return null;
              }
            });
    return writeTask;
  }
Пример #10
0
  /**
   * Process a message pulled off from the network thread, and discard the container once it's
   * processed.
   *
   * @param msg A pair of <sourceHSId, blockContainer>
   * @return The restore work, or null if there's no data block to return to the site.
   */
  private RestoreWork processMessage(
      Pair<Long, Pair<Long, BBContainer>> msg, CachedByteBufferAllocator resultBufferAllocator) {
    if (msg == null) {
      return null;
    }

    RestoreWork restoreWork = null;
    long hsId = msg.getFirst();
    long targetId = msg.getSecond().getFirst();
    BBContainer container = msg.getSecond().getSecond();
    try {
      ByteBuffer block = container.b();
      byte typeByte = block.get(StreamSnapshotDataTarget.typeOffset);
      final int blockIndex = block.getInt(StreamSnapshotDataTarget.blockIndexOffset);
      StreamSnapshotMessageType type = StreamSnapshotMessageType.values()[typeByte];
      if (type == StreamSnapshotMessageType.FAILURE) {
        VoltDB.crashLocalVoltDB("Rejoin source sent failure message.", false, null);

        // for test code only
        if (m_expectedEOFs.decrementAndGet() == 0) {
          m_EOF = true;
        }
      } else if (type == StreamSnapshotMessageType.END) {
        if (rejoinLog.isTraceEnabled()) {
          rejoinLog.trace("Got END message " + blockIndex);
        }

        // End of stream, no need to ack this buffer
        if (m_expectedEOFs.decrementAndGet() == 0) {
          m_EOF = true;
        }
      } else if (type == StreamSnapshotMessageType.SCHEMA) {
        rejoinLog.trace("Got SCHEMA message");

        block.position(StreamSnapshotDataTarget.contentOffset);
        byte[] schemaBytes = new byte[block.remaining()];
        block.get(schemaBytes);
        m_schemas.put(block.getInt(StreamSnapshotDataTarget.tableIdOffset), schemaBytes);
      } else if (type == StreamSnapshotMessageType.HASHINATOR) {
        block.position(StreamSnapshotDataTarget.contentOffset);
        long version = block.getLong();
        byte[] hashinatorConfig = new byte[block.remaining()];
        block.get(hashinatorConfig);

        restoreWork = new HashinatorRestoreWork(version, hashinatorConfig);
      } else {
        // It's normal snapshot data afterwards

        final int tableId = block.getInt(StreamSnapshotDataTarget.tableIdOffset);

        if (!m_schemas.containsKey(tableId)) {
          VoltDB.crashLocalVoltDB("No schema for table with ID " + tableId, false, null);
        }

        // Get the byte buffer ready to be consumed
        block.position(StreamSnapshotDataTarget.contentOffset);
        ByteBuffer nextChunk = getNextChunk(m_schemas.get(tableId), block, resultBufferAllocator);
        m_bytesReceived += nextChunk.remaining();

        restoreWork = new TableRestoreWork(tableId, nextChunk);
      }

      // Queue ack to this block
      m_ack.ack(hsId, m_EOF, targetId, blockIndex);

      return restoreWork;
    } finally {
      container.discard();
    }
  }
Пример #11
0
 void clear() {
   BBContainer cont = null;
   while ((cont = m_buffers.poll()) != null) {
     cont.discard();
   }
 }