예제 #1
0
  public ExportDataSource(Runnable onDrain, File adFile) throws IOException {
    /*
     * Certainly no more data coming if this is coming off of disk
     */
    m_endOfStream = true;
    m_onDrain = onDrain;
    String overflowPath = adFile.getParent();
    FileInputStream fis = new FileInputStream(adFile);
    byte data[] = new byte[(int) adFile.length()];
    int read = fis.read(data);
    if (read != data.length) {
      throw new IOException("Failed to read ad file " + adFile);
    }
    FastDeserializer fds = new FastDeserializer(data);

    m_HSId = fds.readLong();
    m_database = fds.readString();
    m_generation = fds.readLong();
    m_partitionId = fds.readInt();
    m_signature = fds.readString();
    m_tableName = fds.readString();
    fds.readLong(); // timestamp of JVM startup can be ignored
    int numColumns = fds.readInt();
    for (int ii = 0; ii < numColumns; ++ii) {
      m_columnNames.add(fds.readString());
      int columnType = fds.readInt();
      m_columnTypes.add(columnType);
    }

    String nonce = m_signature + "_" + m_HSId + "_" + m_partitionId;
    m_committedBuffers = new StreamBlockQueue(overflowPath, nonce);

    // compute the number of bytes necessary to hold one bit per
    // schema column
    m_nullArrayLength = ((m_columnTypes.size() + 7) & -8) >> 3;
  }
  public void initFromBuffer(ByteBuffer buf) throws IOException {
    FastDeserializer in = new FastDeserializer(buf);
    byte version = in.readByte(); // version number also embeds the type
    type = ProcedureInvocationType.typeFromByte(version);

    /*
     * If it's a replicated invocation, there should be two txn IDs
     * following the version byte. The first txn ID is the new txn ID, the
     * second one is the original txn ID.
     */
    if (ProcedureInvocationType.isDeprecatedInternalDRType(type)) {
      originalTxnId = in.readLong();
      originalUniqueId = in.readLong();
    }

    if (version >= BatchTimeoutOverrideType.BATCH_TIMEOUT_VERSION) {
      BatchTimeoutOverrideType batchTimeoutType =
          BatchTimeoutOverrideType.typeFromByte(in.readByte());
      if (batchTimeoutType == BatchTimeoutOverrideType.NO_OVERRIDE_FOR_BATCH_TIMEOUT) {
        batchTimeout = BatchTimeoutOverrideType.NO_TIMEOUT;
      } else {
        batchTimeout = in.readInt();
        // Client side have already checked the batchTimeout value, but,
        // on server side, we should check non-negative batchTimeout value again
        // in case of someone is using a non-standard client.
        if (batchTimeout < 0) {
          throw new IllegalArgumentException("Timeout value can't be negative.");
        }
      }
    }

    procName = in.readString().intern();
    clientHandle = in.readLong();
    // do not deserialize parameters in ClientInterface context
    serializedParams = in.remainder();
    final ByteBuffer duplicate = serializedParams.duplicate();
    params =
        new FutureTask<ParameterSet>(
            new Callable<ParameterSet>() {
              @Override
              public ParameterSet call() throws Exception {
                return ParameterSet.fromByteBuffer(duplicate);
              }
            });
  }
예제 #3
0
  // XXX maybe consider an IOException subclass at some point
  public TableSaveFile(
      FileChannel dataIn,
      int readAheadChunks,
      Integer[] relevantPartitionIds,
      boolean continueOnCorruptedChunk)
      throws IOException {
    try {
      EELibraryLoader.loadExecutionEngineLibrary(true);
      if (relevantPartitionIds == null) {
        m_relevantPartitionIds = null;
      } else {
        m_relevantPartitionIds = new HashSet<Integer>();
        for (Integer i : relevantPartitionIds) {
          m_relevantPartitionIds.add(i);
        }
      }
      m_chunkReads = new Semaphore(readAheadChunks);
      m_saveFile = dataIn;
      m_continueOnCorruptedChunk = continueOnCorruptedChunk;

      final PureJavaCrc32 crc = new PureJavaCrc32();
      /*
       * If the CRC check fails because the file wasn't completed
       */
      final PureJavaCrc32 secondCRC = new PureJavaCrc32();

      /*
       * Get the header with the save restore specific information
       */
      final ByteBuffer lengthBuffer = ByteBuffer.allocate(8);
      while (lengthBuffer.hasRemaining()) {
        final int read = m_saveFile.read(lengthBuffer);
        if (read == -1) {
          throw new EOFException();
        }
      }
      lengthBuffer.flip();
      final int originalCRC = lengthBuffer.getInt();
      int length = lengthBuffer.getInt();
      crc.update(lengthBuffer.array(), 4, 4);
      secondCRC.update(lengthBuffer.array(), 4, 4);

      if (length < 0) {
        throw new IOException("Corrupted save file has negative header length");
      }

      if (length > 2097152) {
        throw new IOException("Corrupted save file has unreasonable header length > 2 megs");
      }

      final ByteBuffer saveRestoreHeader = ByteBuffer.allocate(length);
      while (saveRestoreHeader.hasRemaining()) {
        final int read = m_saveFile.read(saveRestoreHeader);
        if (read == -1 || read < length) {
          throw new EOFException();
        }
      }
      saveRestoreHeader.flip();
      crc.update(saveRestoreHeader.array());
      secondCRC.update(new byte[] {1});
      secondCRC.update(saveRestoreHeader.array(), 1, saveRestoreHeader.array().length - 1);

      /*
       *  Get the template for the VoltTable serialization header.
       *  It will have an extra length value preceded to it so that
       *  it can be sucked straight into a buffer. This will not
       *  contain a row count since that varies from chunk to chunk
       *  and is supplied by the chunk
       */
      lengthBuffer.clear();
      lengthBuffer.limit(4);
      /*
       * Why this stupidity and no while loop?
       * Because java is broken and complains about a random final
       * elsewhere if you do.
       */
      {
        final int read = m_saveFile.read(lengthBuffer);
        if (read == -1) {
          throw new EOFException();
        }
      }
      crc.update(lengthBuffer.array(), 0, 4);
      secondCRC.update(lengthBuffer.array(), 0, 4);
      lengthBuffer.flip();
      length = lengthBuffer.getInt();

      if (length < 4) {
        throw new IOException(
            "Corrupted save file has negative length or too small length for VoltTable header");
      }

      if (length > 2097152) {
        throw new IOException(
            "Corrupted save file has unreasonable VoltTable header length > 2 megs");
      }

      m_tableHeader = ByteBuffer.allocate(length + 4);
      m_tableHeader.putInt(length);
      while (m_tableHeader.hasRemaining()) {
        final int read = m_saveFile.read(m_tableHeader);
        if (read == -1) {
          throw new EOFException();
        }
      }
      crc.update(m_tableHeader.array(), 4, length);
      secondCRC.update(m_tableHeader.array(), 4, length);

      boolean failedCRCDueToNotCompleted = false;

      final int actualCRC = (int) crc.getValue();
      if (originalCRC != actualCRC) {
        /*
         * Check if the CRC mismatch is due to the snapshot not being completed
         */
        final int secondCRCValue = (int) secondCRC.getValue();
        if (secondCRCValue == originalCRC) {
          failedCRCDueToNotCompleted = true;
        } else {
          throw new IOException("Checksum mismatch");
        }
      }

      FastDeserializer fd = new FastDeserializer(saveRestoreHeader);
      byte completedByte = fd.readByte();
      m_completed = failedCRCDueToNotCompleted ? false : (completedByte == 1 ? true : false);
      for (int ii = 0; ii < 4; ii++) {
        m_versionNum[ii] = fd.readInt();
      }

      /*
       * Support the original pre 1.3 header format as well as a new JSON format.
       * JSON will make it possible to add info to a snapshot header without
       * breaking backwards compatibility.
       */
      if (m_versionNum[3] == 0) {
        m_txnId = fd.readLong();
        m_timestamp = TransactionIdManager.getTimestampFromTransactionId(m_txnId);
        m_hostId = fd.readInt();
        m_hostname = fd.readString();
        m_clusterName = fd.readString();
        m_databaseName = fd.readString();
        m_tableName = fd.readString();
        m_isReplicated = fd.readBoolean();
        m_isCompressed = false;
        m_checksumType = ChecksumType.CRC32;
        if (!m_isReplicated) {
          m_partitionIds = (int[]) fd.readArray(int.class);
          if (!m_completed) {
            for (Integer partitionId : m_partitionIds) {
              m_corruptedPartitions.add(partitionId);
            }
          }
          m_totalPartitions = fd.readInt();
        } else {
          m_partitionIds = new int[] {0};
          m_totalPartitions = 1;
          if (!m_completed) {
            m_corruptedPartitions.add(0);
          }
        }
        m_hasVersion2FormatChunks = false;
      } else {
        assert (m_versionNum[3] == 1 || m_versionNum[3] == 2);
        if (m_versionNum[3] >= 2) {
          m_hasVersion2FormatChunks = true;
        } else {
          m_hasVersion2FormatChunks = false;
        }
        int numJSONBytes = fd.readInt();
        byte jsonBytes[] = new byte[numJSONBytes];
        fd.readFully(jsonBytes);
        String jsonString = new String(jsonBytes, "UTF-8");
        JSONObject obj = new JSONObject(jsonString);

        m_txnId = obj.getLong("txnId");
        // Timestamp field added for 3.0, might not be there
        if (obj.has("timestamp")) {
          m_timestamp = obj.getLong("timestamp");
        } else {
          // Pre 3.0/IV2 the timestamp was in the transactionid
          m_timestamp = TransactionIdManager.getTimestampFromTransactionId(m_txnId);
        }
        m_hostId = obj.getInt("hostId");
        m_hostname = obj.getString("hostname");
        m_clusterName = obj.getString("clusterName");
        m_databaseName = obj.getString("databaseName");
        m_tableName = obj.getString("tableName");
        m_isReplicated = obj.getBoolean("isReplicated");
        m_isCompressed = obj.optBoolean("isCompressed", false);
        m_checksumType = ChecksumType.valueOf(obj.optString("checksumType", "CRC32"));
        if (!m_isReplicated) {
          JSONArray partitionIds = obj.getJSONArray("partitionIds");
          m_partitionIds = new int[partitionIds.length()];
          for (int ii = 0; ii < m_partitionIds.length; ii++) {
            m_partitionIds[ii] = partitionIds.getInt(ii);
          }

          if (!m_completed) {
            for (Integer partitionId : m_partitionIds) {
              m_corruptedPartitions.add(partitionId);
            }
          }
          m_totalPartitions = obj.getInt("numPartitions");
        } else {
          m_partitionIds = new int[] {0};
          m_totalPartitions = 1;
          if (!m_completed) {
            m_corruptedPartitions.add(0);
          }
        }
      }
      /*
       * Several runtime exceptions can be thrown in valid failure cases where
       * a corrupt save file is being detected.
       */
    } catch (BufferUnderflowException e) {
      throw new IOException(e);
    } catch (BufferOverflowException e) {
      throw new IOException(e);
    } catch (IndexOutOfBoundsException e) {
      throw new IOException(e);
    } catch (JSONException e) {
      throw new IOException(e);
    }
  }