Example #1
0
  @Test
  public void testDataFrameTooMuchPadding() throws Exception {
    http2Connect();

    byte[] dataFrame = new byte[10];

    // Header
    // length
    ByteUtil.setThreeBytes(dataFrame, 0, 1);
    // type 0 (data)
    // flags 8 (padded)
    dataFrame[4] = 0x08;
    // stream 3
    ByteUtil.set31Bits(dataFrame, 5, 3);
    // payload (pad length of 1)
    dataFrame[9] = 1;

    os.write(dataFrame);
    os.flush();

    parser.readFrame(true);

    String trace = output.getTrace();
    Assert.assertTrue(trace, trace.startsWith("0-Goaway-[1]-[1]-["));
  }
Example #2
0
 public static Column readColumn(DataInputStream in) throws IOException {
   BytesBuilder bb = Bytes.builder();
   Bytes family = ByteUtil.read(bb, in);
   Bytes qualifier = ByteUtil.read(bb, in);
   Bytes visibility = ByteUtil.read(bb, in);
   return new Column(family, qualifier, visibility);
 }
Example #3
0
  /**
   * @param bytes
   * @param filename
   * @param action
   * @throws IOException
   */
  public static void bytes2file(byte[] bytes, String filename, int action) throws IOException {
    File file;
    byte[] data;
    switch (action) {
      case OVERWRITE:
        file = new File(filename);
        if (file.exists()) {
          file.delete();
        }
        bytes2file(filename, bytes);
        break;

      case APPEND:
        file = new File(filename);
        if (file.exists()) {
          FileInputStream fileInputStream = new FileInputStream(file);
          int length = fileInputStream.available();
          data = new byte[length];
          fileInputStream.read(data);
          data = ByteUtil.merge(data, bytes);
          fileInputStream.close();

          FileOutputStream fileOutputStream = new FileOutputStream(file);
          fileOutputStream.write(data);
          fileOutputStream.flush();
          fileOutputStream.close();
        }
        break;

      default:
        bytes2file(bytes, filename, OVERWRITE);
    }
  }
Example #4
0
 /**
  * Get the key used in the Phoenix metadata row for a table definition
  *
  * @param schemaName
  * @param tableName
  */
 public static byte[] getTableKey(byte[] tenantId, byte[] schemaName, byte[] tableName) {
   return ByteUtil.concat(
       tenantId,
       QueryConstants.SEPARATOR_BYTE_ARRAY,
       schemaName,
       QueryConstants.SEPARATOR_BYTE_ARRAY,
       tableName);
 }
Example #5
0
 public static byte[] getTableKey(String tenantId, String schemaName, String tableName) {
   return ByteUtil.concat(
       tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId),
       QueryConstants.SEPARATOR_BYTE_ARRAY,
       schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName),
       QueryConstants.SEPARATOR_BYTE_ARRAY,
       Bytes.toBytes(tableName));
 }
 @Override
 public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
   // This serializes the Map. The format is as follows
   // Map size(VInt ie. 1 to 5 bytes) +
   // ( key length [VInt ie. 1 to 5 bytes] + key bytes + value [VInt ie. 1 to 5 bytes] )*
   buffer = new byte[countMapSerializationSize()];
   int offset = 0;
   offset += ByteUtil.vintToBytes(buffer, offset, this.valueVsCount.size());
   for (Entry<ImmutableBytesPtr, Integer> entry : this.valueVsCount.entrySet()) {
     ImmutableBytesPtr key = entry.getKey();
     offset += ByteUtil.vintToBytes(buffer, offset, key.getLength());
     System.arraycopy(key.get(), key.getOffset(), buffer, offset, key.getLength());
     offset += key.getLength();
     offset += ByteUtil.vintToBytes(buffer, offset, entry.getValue().intValue());
   }
   ptr.set(buffer, 0, offset);
   return true;
 }
Example #7
0
 private static byte[] getNameAsBytes(byte[] nameOne, byte[] nameTwo) {
   if (nameOne == null || nameOne.length == 0) {
     return nameTwo;
   } else if ((nameTwo == null || nameTwo.length == 0)) {
     return nameOne;
   } else {
     return ByteUtil.concat(nameOne, QueryConstants.NAME_SEPARATOR_BYTES, nameTwo);
   }
 }
Example #8
0
 // Start/stop row must be swapped if scan is being done in reverse
 public static void setupReverseScan(Scan scan) {
   if (isReversed(scan)) {
     byte[] startRow = scan.getStartRow();
     byte[] stopRow = scan.getStopRow();
     byte[] newStartRow = startRow;
     byte[] newStopRow = stopRow;
     if (startRow.length != 0) {
       /*
        * Must get previous key because this is going from an inclusive start key to an exclusive stop key, and
        * we need the start key to be included. We get the previous key by decrementing the last byte by one.
        * However, with variable length data types, we need to fill with the max byte value, otherwise, if the
        * start key is 'ab', we lower it to 'aa' which would cause 'aab' to be included (which isn't correct).
        * So we fill with a 0xFF byte to prevent this. A single 0xFF would be enough for our primitive types (as
        * that byte wouldn't occur), but for an arbitrary VARBINARY key we can't know how many bytes to tack
        * on. It's lame of HBase to force us to do this.
        */
       newStartRow =
           Arrays.copyOf(startRow, startRow.length + MAX_FILL_LENGTH_FOR_PREVIOUS_KEY.length);
       if (ByteUtil.previousKey(newStartRow, startRow.length)) {
         System.arraycopy(
             MAX_FILL_LENGTH_FOR_PREVIOUS_KEY,
             0,
             newStartRow,
             startRow.length,
             MAX_FILL_LENGTH_FOR_PREVIOUS_KEY.length);
       } else {
         newStartRow = HConstants.EMPTY_START_ROW;
       }
     }
     if (stopRow.length != 0) {
       // Must add null byte because we need the start to be exclusive while it was inclusive
       newStopRow = ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY);
     }
     scan.setStartRow(newStopRow);
     scan.setStopRow(newStartRow);
     scan.setReversed(true);
   }
 }
Example #9
0
 // Go through each slot in the schema and try match it with the split byte array. If the split
 // does not confer to the schema, extends its length to match the schema.
 private static byte[] processSplit(byte[] split, LinkedHashSet<PColumn> pkColumns) {
   int pos = 0, offset = 0, maxOffset = split.length;
   Iterator<PColumn> iterator = pkColumns.iterator();
   while (pos < pkColumns.size()) {
     PColumn column = iterator.next();
     if (column.getDataType().isFixedWidth()) { // Fixed width
       int length = SchemaUtil.getFixedByteSize(column);
       if (maxOffset - offset < length) {
         // The split truncates the field. Fill in the rest of the part and any fields that
         // are missing after this field.
         int fillInLength = length - (maxOffset - offset);
         fillInLength += estimatePartLength(pos + 1, iterator);
         return ByteUtil.fillKey(split, split.length + fillInLength);
       }
       // Account for this field, move to next position;
       offset += length;
       pos++;
     } else { // Variable length
       // If we are the last slot, then we are done. Nothing needs to be filled in.
       if (pos == pkColumns.size() - 1) {
         break;
       }
       while (offset < maxOffset && split[offset] != QueryConstants.SEPARATOR_BYTE) {
         offset++;
       }
       if (offset == maxOffset) {
         // The var-length field does not end with a separator and it's not the last field.
         int fillInLength = 1; // SEPARATOR byte for the current var-length slot.
         fillInLength += estimatePartLength(pos + 1, iterator);
         return ByteUtil.fillKey(split, split.length + fillInLength);
       }
       // Move to the next position;
       offset += 1; // skip separator;
       pos++;
     }
   }
   return split;
 }
Example #10
0
 protected void encodeSpecial(Special s, byte[] buf, int offset) {
   if (s == null) s = new Special();
   ByteUtil.encodeByte(s.specialNumber, buf, offset + 0);
   ByteUtil.encodeByte(s.arg0, buf, offset + 1);
   ByteUtil.encodeByte(s.arg1, buf, offset + 2);
   ByteUtil.encodeByte(s.arg2, buf, offset + 3);
   ByteUtil.encodeByte(s.arg3, buf, offset + 4);
   ByteUtil.encodeByte(s.arg4, buf, offset + 5);
 }
Example #11
0
 protected Special decodeSpecial(byte[] buf, int offset) {
   Special s = new Special();
   s.specialNumber = ByteUtil.leUByte(buf, offset + 0);
   s.arg0 = ByteUtil.leUByte(buf, offset + 1);
   s.arg1 = ByteUtil.leUByte(buf, offset + 2);
   s.arg2 = ByteUtil.leUByte(buf, offset + 3);
   s.arg3 = ByteUtil.leUByte(buf, offset + 4);
   s.arg4 = ByteUtil.leUByte(buf, offset + 5);
   return Special.internIfZero(s);
 }
Example #12
0
 private static byte[] nextKey(byte[] key, RowKeySchema schema, ImmutableBytesWritable ptr) {
   int pos = 0;
   int maxOffset = schema.iterator(key, ptr);
   while (schema.next(ptr, pos, maxOffset) != null) {
     pos++;
   }
   Field field = schema.getField(pos - 1);
   if (!field.getDataType().isFixedWidth()) {
     byte[] newLowerRange = new byte[key.length + 1];
     System.arraycopy(key, 0, newLowerRange, 0, key.length);
     newLowerRange[key.length] =
         SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), key.length == 0, field);
     key = newLowerRange;
   } else {
     key = Arrays.copyOf(key, key.length);
   }
   ByteUtil.nextKey(key, key.length);
   return key;
 }
Example #13
0
  @Test
  public void testDataFrameOnStreamZero() throws Exception {
    http2Connect();

    byte[] dataFrame = new byte[10];

    // Header
    // length
    ByteUtil.setThreeBytes(dataFrame, 0, 1);
    // type (0 for data)
    // flags (0)
    // stream (0)
    // payload (0)

    os.write(dataFrame);
    os.flush();

    parser.readFrame(true);

    String trace = output.getTrace();
    Assert.assertTrue(trace, trace.startsWith("0-Goaway-[1]-[1]-["));
  }
Example #14
0
 /**
  * @param channel the database file.
  * @return The Jet Format represented in the passed-in file
  * @throws IOException if the database file format is unsupported.
  */
 public static JetFormat getFormat(FileChannel channel) throws IOException {
   ByteBuffer buffer = ByteBuffer.allocate(HEADER_LENGTH);
   int bytesRead = channel.read(buffer, 0L);
   if (bytesRead < HEADER_LENGTH) {
     throw new IOException("Empty database file");
   }
   buffer.flip();
   byte version = buffer.get(OFFSET_VERSION);
   if (version == CODE_VERSION_3) {
     return VERSION_3;
   } else if (version == CODE_VERSION_4) {
     if (ByteUtil.matchesRange(buffer, OFFSET_ENGINE_NAME, MSISAM_ENGINE)) {
       return VERSION_MSISAM;
     }
     return VERSION_4;
   } else if (version == CODE_VERSION_12) {
     return VERSION_12;
   } else if (version == CODE_VERSION_14) {
     return VERSION_14;
   }
   throw new IOException(
       "Unsupported " + ((version < CODE_VERSION_3) ? "older" : "newer") + " version: " + version);
 }
Example #15
0
  public static int setKey(
      RowKeySchema schema,
      List<List<KeyRange>> slots,
      int[] slotSpan,
      int[] position,
      Bound bound,
      byte[] key,
      int byteOffset,
      int slotStartIndex,
      int slotEndIndex,
      int schemaStartIndex) {
    int offset = byteOffset;
    boolean lastInclusiveUpperSingleKey = false;
    boolean anyInclusiveUpperRangeKey = false;
    // The index used for slots should be incremented by 1,
    // but the index for the field it represents in the schema
    // should be incremented by 1 + value in the current slotSpan index
    // slotSpan stores the number of columns beyond one that the range spans
    Field field = null;
    int i = slotStartIndex, fieldIndex = ScanUtil.getRowKeyPosition(slotSpan, slotStartIndex);
    for (i = slotStartIndex; i < slotEndIndex; i++) {
      // Build up the key by appending the bound of each key range
      // from the current position of each slot.
      KeyRange range = slots.get(i).get(position[i]);
      // Use last slot in a multi-span column to determine if fixed width
      field = schema.getField(fieldIndex + slotSpan[i]);
      boolean isFixedWidth = field.getDataType().isFixedWidth();
      fieldIndex += slotSpan[i] + 1;
      /*
       * If the current slot is unbound then stop if:
       * 1) setting the upper bound. There's no value in
       *    continuing because nothing will be filtered.
       * 2) setting the lower bound when the type is fixed length
       *    for the same reason. However, if the type is variable width
       *    continue building the key because null values will be filtered
       *    since our separator byte will be appended and incremented.
       */
      if (range.isUnbound(bound) && (bound == Bound.UPPER || isFixedWidth)) {
        break;
      }
      byte[] bytes = range.getRange(bound);
      System.arraycopy(bytes, 0, key, offset, bytes.length);
      offset += bytes.length;
      /*
       * We must add a terminator to a variable length key even for the last PK column if
       * the lower key is non inclusive or the upper key is inclusive. Otherwise, we'd be
       * incrementing the key value itself, and thus bumping it up too much.
       */
      boolean inclusiveUpper = range.isInclusive(bound) && bound == Bound.UPPER;
      boolean exclusiveLower = !range.isInclusive(bound) && bound == Bound.LOWER;
      // If we are setting the upper bound of using inclusive single key, we remember
      // to increment the key if we exit the loop after this iteration.
      //
      // We remember to increment the last slot if we are setting the upper bound with an
      // inclusive range key.
      //
      // We cannot combine the two flags together in case for single-inclusive key followed
      // by the range-exclusive key. In that case, we do not need to increment the end at the
      // end. But if we combine the two flag, the single inclusive key in the middle of the
      // key slots would cause the flag to become true.
      lastInclusiveUpperSingleKey = range.isSingleKey() && inclusiveUpper;
      anyInclusiveUpperRangeKey |= !range.isSingleKey() && inclusiveUpper;
      // A null or empty byte array is always represented as a zero byte
      byte sepByte =
          SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), bytes.length == 0, field);

      if (!isFixedWidth
          && (fieldIndex < schema.getMaxFields()
              || inclusiveUpper
              || exclusiveLower
              || sepByte == QueryConstants.DESC_SEPARATOR_BYTE)) {
        key[offset++] = sepByte;
        // Set lastInclusiveUpperSingleKey back to false if this is the last pk column
        // as we don't want to increment the null byte in this case
        lastInclusiveUpperSingleKey &= i < schema.getMaxFields() - 1;
      }
      // If we are setting the lower bound with an exclusive range key, we need to bump the
      // slot up for each key part. For an upper bound, we bump up an inclusive key, but
      // only after the last key part.
      if (exclusiveLower) {
        if (!ByteUtil.nextKey(key, offset)) {
          // Special case for not being able to increment.
          // In this case we return a negative byteOffset to
          // remove this part from the key being formed. Since the
          // key has overflowed, this means that we should not
          // have an end key specified.
          return -byteOffset;
        }
        // We're filtering on values being non null here, but we still need the 0xFF
        // terminator, since DESC keys ignore the last byte as it's expected to be
        // the terminator. Without this, we'd ignore the separator byte that was
        // just added and incremented.
        if (!isFixedWidth
            && bytes.length == 0
            && SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), false, field)
                == QueryConstants.DESC_SEPARATOR_BYTE) {
          key[offset++] = QueryConstants.DESC_SEPARATOR_BYTE;
        }
      }
    }
    if (lastInclusiveUpperSingleKey || anyInclusiveUpperRangeKey) {
      if (!ByteUtil.nextKey(key, offset)) {
        // Special case for not being able to increment.
        // In this case we return a negative byteOffset to
        // remove this part from the key being formed. Since the
        // key has overflowed, this means that we should not
        // have an end key specified.
        return -byteOffset;
      }
    }
    // Remove trailing separator bytes, since the columns may have been added
    // after the table has data, in which case there won't be a separator
    // byte.
    if (bound == Bound.LOWER) {
      while (--i >= schemaStartIndex
          && offset > byteOffset
          && !(field = schema.getField(--fieldIndex)).getDataType().isFixedWidth()
          && field.getSortOrder() == SortOrder.ASC
          && key[offset - 1] == QueryConstants.SEPARATOR_BYTE) {
        offset--;
        fieldIndex -= slotSpan[i];
      }
    }
    return offset - byteOffset;
  }
Example #16
0
 public static final void readVectorF(final byte[] buffer, final int offset, TelemVect3 vector) {
   vector.x = ByteUtil.readFloat(buffer, offset + 0 * ByteUtil.SIZE_FLOAT);
   vector.y = ByteUtil.readFloat(buffer, offset + 1 * ByteUtil.SIZE_FLOAT);
   vector.z = ByteUtil.readFloat(buffer, offset + 2 * ByteUtil.SIZE_FLOAT);
 }
Example #17
0
 public static final void readVectorD(final byte[] buffer, final int offset, TelemVect3 vector) {
   vector.x = (float) ByteUtil.readDouble(buffer, offset + 0 * ByteUtil.SIZE_DOUBLE);
   vector.y = (float) ByteUtil.readDouble(buffer, offset + 1 * ByteUtil.SIZE_DOUBLE);
   vector.z = (float) ByteUtil.readDouble(buffer, offset + 2 * ByteUtil.SIZE_DOUBLE);
 }
Example #18
0
 /**
  * Get the key used in the Phoenix function data row for a function definition
  *
  * @param tenantId
  * @param functionName
  */
 public static byte[] getFunctionKey(byte[] tenantId, byte[] functionName) {
   return ByteUtil.concat(tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY, functionName);
 }
Example #19
0
 @Override
 protected byte[] defineHeaderMask() {
   return ByteUtil.copyOf(BASE_HEADER_MASK, BASE_HEADER_MASK.length - 2);
 }
 /** Returns the SHA-256 digest of the bytes read, encoded as base64. */
 public String getDigest() {
   return ByteUtil.encodeFSSafeBase64(mDigestCalculator.digest());
 }
Example #21
0
 public static void writeColumn(Column col, DataOutputStream out) throws IOException {
   ByteUtil.write(out, col.getFamily());
   ByteUtil.write(out, col.getQualifier());
   ByteUtil.write(out, col.getVisibility());
 }
Example #22
0
 public static Column convert(Key k) {
   Bytes f = ByteUtil.toBytes(k.getColumnFamilyData());
   Bytes q = ByteUtil.toBytes(k.getColumnQualifierData());
   Bytes v = ByteUtil.toBytes(k.getColumnVisibilityData());
   return new Column(f, q, v);
 }
  public static Message decode(Session session, String text) {
    Logger logger = LoggerFactory.getLogger(KrakenMessageDecoder.class.getName());
    Charset utf8 = Charset.forName("utf-8");

    // remove potential control characters
    text = text.trim();
    if (text.length() == 0) return null;

    if (logger.isDebugEnabled())
      logger.debug(
          "kraken webconsole: debug websocket frame length {}, json [{}]", text.length(), text);

    if (text.equals("ping")) return null;

    // decrypt if msg is encrypted
    if (session.has("enc_key")) {
      try {
        JSONTokener tokenizer = new JSONTokener(new StringReader(text));
        JSONArray container = (JSONArray) tokenizer.nextValue();
        JSONObject header = container.getJSONObject(0);
        JSONObject body = container.getJSONObject(1);

        if (header.has("iv") && body.has("data")) {
          String data = body.getString("data");

          byte[] iv =
              ByteUtil.asArray(
                  Base64.decode(ChannelBuffers.wrappedBuffer(header.getString("iv").getBytes())));
          byte[] buf =
              ByteUtil.asArray(Base64.decode(ChannelBuffers.wrappedBuffer(data.getBytes())));
          byte[] key = ByteUtil.asByteArray(UUID.fromString(session.getString("enc_key")));
          SecretKeySpec secret = new SecretKeySpec(key, "AES");

          Cipher cipher = Cipher.getInstance("AES/CBC/PKCS7Padding");
          cipher.init(Cipher.DECRYPT_MODE, secret, new IvParameterSpec(iv));
          byte[] plain = new byte[cipher.getOutputSize(buf.length)];
          int plainLength = cipher.update(buf, 0, buf.length, plain, 0);
          plainLength += cipher.doFinal(plain, plainLength);

          text = new String(plain, 0, plainLength, utf8);
          logger.trace("kraken webconsole: decrypted msg [{}]", text);
        }
      } catch (Exception e) {
        logger.error("kraken webconsole: cannot decode encrypted msg [" + text + "]", e);
      }
    }

    try {
      JSONTokener tokenizer = new JSONTokener(new StringReader(text));
      JSONArray container = (JSONArray) tokenizer.nextValue();
      JSONObject header = container.getJSONObject(0);
      JSONObject body = container.getJSONObject(1);

      Message msg = new Message();

      msg.setGuid(header.getString("guid").trim());
      msg.setType(Message.Type.valueOf(header.getString("type").trim()));
      msg.setSource(header.getString("source"));
      msg.setTarget(header.getString("target"));
      msg.setMethod(header.getString("method").trim());
      msg.setParameters(parse(body));

      return msg;
    } catch (JSONException e) {
      logger.error("kraken webconsole: invalid json => " + text, e);
    }
    return null;
  }