/** Apply column family options such as Bloom filters, compression, and data block encoding. */ protected void applyColumnFamilyOptions(byte[] tableName, byte[][] columnFamilies) throws IOException { HBaseAdmin admin = new HBaseAdmin(conf); HTableDescriptor tableDesc = admin.getTableDescriptor(tableName); LOG.info("Disabling table " + Bytes.toString(tableName)); admin.disableTable(tableName); for (byte[] cf : columnFamilies) { HColumnDescriptor columnDesc = tableDesc.getFamily(cf); boolean isNewCf = columnDesc == null; if (isNewCf) { columnDesc = new HColumnDescriptor(cf); } if (bloomType != null) { columnDesc.setBloomFilterType(bloomType); } if (compressAlgo != null) { columnDesc.setCompressionType(compressAlgo); } if (dataBlockEncodingAlgo != null) { columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo); columnDesc.setEncodeOnDisk(!encodeInCacheOnly); } if (inMemoryCF) { columnDesc.setInMemory(inMemoryCF); } if (isNewCf) { admin.addColumn(tableName, columnDesc); } else { admin.modifyColumn(tableName, columnDesc); } } LOG.info("Enabling table " + Bytes.toString(tableName)); admin.enableTable(tableName); }
@Override protected void updateMeta(final byte[] oldRegion1, final byte[] oldRegion2, HRegion newRegion) throws IOException { byte[][] regionsToDelete = {oldRegion1, oldRegion2}; for (int r = 0; r < regionsToDelete.length; r++) { Delete delete = new Delete(regionsToDelete[r]); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER); root.delete(delete, null, true); if (LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r])); } } HRegionInfo newInfo = newRegion.getRegionInfo(); newInfo.setOffline(true); Put put = new Put(newRegion.getRegionName()); put.add( HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(newInfo)); root.put(put); if (LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(newRegion.getRegionName())); } }
public AuthenticatedSocket(InetAddress ia, int port, MessageDigest md, byte[] secret) throws IOException, AuthenticationException { super(ia, port); try { OutputStream output = this.getOutputStream(); InputStream input = this.getInputStream(); // Get challenge length byte[] challengeSize = new byte[4]; input.read(challengeSize); // Receive random challenge string byte[] challenge = new byte[Bytes.toInt(challengeSize)]; input.read(challenge); // Generate MD5 hash byte[] append = Bytes.append(challenge, secret); byte[] hash = md.digest(append); // Send time and hash strings output.write(hash); } catch (Exception e) { throw new AuthenticationException("Authentication failed: " + e.getMessage()); } }
void appendInstance(final Bytes bytes, final V value) { bytes.clear(); if (generatedValueType) ((BytesMarshallable) value).writeMarshallable(bytes); else bytes.writeInstance(vClass, value); bytes.flip(); appendValue(bytes); }
private HRegionInfo nextRegion() throws IOException { try { Result results = getMetaRow(); if (results == null) { return null; } byte[] regionInfoValue = results.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); if (regionInfoValue == null || regionInfoValue.length == 0) { throw new NoSuchElementException( "meta region entry missing " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + Bytes.toString(HConstants.REGIONINFO_QUALIFIER)); } HRegionInfo region = Writables.getHRegionInfo(regionInfoValue); if (!Bytes.equals(region.getTableName(), this.tableName)) { return null; } return region; } catch (IOException e) { e = RemoteExceptionHandler.checkIOException(e); LOG.error("meta scanner error", e); metaScanner.close(); throw e; } }
@Override protected void updateMeta(final byte[] oldRegion1, final byte[] oldRegion2, HRegion newRegion) throws IOException { byte[][] regionsToDelete = {oldRegion1, oldRegion2}; for (int r = 0; r < regionsToDelete.length; r++) { if (Bytes.equals(regionsToDelete[r], latestRegion.getRegionName())) { latestRegion = null; } Delete delete = new Delete(regionsToDelete[r]); table.delete(delete); if (LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r])); } } newRegion.getRegionInfo().setOffline(true); Put put = new Put(newRegion.getRegionName()); put.add( HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(newRegion.getRegionInfo())); table.put(put); if (LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(newRegion.getRegionName())); } }
static TableDescriptorModtime getTableDescriptorModtime( FileSystem fs, Path hbaseRootDir, String tableName) throws NullPointerException, IOException { // ignore both -ROOT- and .META. tables if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0 || Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) { return null; } return getTableDescriptorModtime(fs, FSUtils.getTablePath(hbaseRootDir, tableName)); }
@Before public void setUp() { failures = spy(new Failures()); bytes = new Bytes(); bytes.setFailures(failures); absValueComparisonStrategy = new ComparatorBasedComparisonStrategy(new AbsValueComparator<Byte>()); bytesWithAbsValueComparisonStrategy = new Bytes(absValueComparisonStrategy); bytesWithAbsValueComparisonStrategy.failures = failures; }
void appendValue(final Bytes value) { if (value.remaining() + 4 > tmpBytes.remaining()) throw new IllegalArgumentException( "Value too large for entry was " + (value.remaining() + 4) + ", remaining: " + tmpBytes.remaining()); tmpBytes.writeStopBit(value.remaining()); tmpBytes.position(align(tmpBytes.position())); tmpBytes.write(value); }
public BPlusTreeIntToString60 read() throws IOException { Map<Integer, Node> nodes = new HashMap<Integer, Node>(); Block block = new Block(file.read(0)); int maxSize = Bytes.byteToInt(block.getByte(1)); int rootBlock = Bytes.bytesToInt(block.getBytes(2, 4)); // readNode(maxSize, rootBlock, blockToNode) file.close(); return BPlusTreeIntToString60.fromBytes(block, nodes); }
private long longHashCode(Bytes bytes) { long h = 0; int i = 0; long limit = bytes.limit(); // clustering. for (; i < limit - 7; i += 8) h = 10191 * h + bytes.readLong(i); // for (; i < bytes.limit() - 3; i += 2) // h = 10191 * h + bytes.readInt(i); for (; i < limit; i++) h = 57 * h + bytes.readByte(i); h ^= (h >>> 31) + (h << 31); h += (h >>> 21) + (h >>> 11); return h; }
/** * Get HTD from HDFS. * * @param fs * @param hbaseRootDir * @param tableName * @return Descriptor or null if none found. * @throws IOException */ public static HTableDescriptor getTableDescriptor( FileSystem fs, Path hbaseRootDir, byte[] tableName) throws IOException { HTableDescriptor htd = null; try { TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, hbaseRootDir, Bytes.toString(tableName)); htd = tdmt == null ? null : tdmt.getTableDescriptor(); } catch (NullPointerException e) { LOG.debug( "Exception during readTableDecriptor. Current table name = " + Bytes.toString(tableName), e); } return htd; }
/** * Extracts the value of a cell containing a data point. * * @param values The contents of a cell in HBase. * @param value_idx The offset inside {@code values} at which the value starts. * @param flags The flags for this value. * @return The value of the cell. */ static long extractIntegerValue(final byte[] values, final int value_idx, final byte flags) { switch (flags & Const.LENGTH_MASK) { case 7: return Bytes.getLong(values, value_idx); case 3: return Bytes.getInt(values, value_idx); case 1: return Bytes.getShort(values, value_idx); case 0: return values[value_idx] & 0xFF; } throw new IllegalDataException( "Integer value @ " + value_idx + " not on 8/4/2/1 bytes in " + Arrays.toString(values)); }
/** * Extracts the value of a cell containing a data point. * * @param values The contents of a cell in HBase. * @param value_idx The offset inside {@code values} at which the value starts. * @param flags The flags for this value. * @return The value of the cell. */ static double extractFloatingPointValue( final byte[] values, final int value_idx, final byte flags) { switch (flags & Const.LENGTH_MASK) { case 7: return Double.longBitsToDouble(Bytes.getLong(values, value_idx)); case 3: return Float.intBitsToFloat(Bytes.getInt(values, value_idx)); } throw new IllegalDataException( "Floating point value @ " + value_idx + " not on 8 or 4 bytes in " + Arrays.toString(values)); }
@Test public void testHTableDescriptors() throws IOException, InterruptedException { final String name = "testHTableDescriptors"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any debris laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); final int count = 10; // Write out table infos. for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(name + i); createHTDInFS(fs, rootdir, htd); } FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) { @Override public HTableDescriptor get(byte[] tablename) throws TableExistsException, FileNotFoundException, IOException { LOG.info(Bytes.toString(tablename) + ", cachehits=" + this.cachehits); return super.get(tablename); } }; for (int i = 0; i < count; i++) { assertTrue(htds.get(Bytes.toBytes(name + i)) != null); } for (int i = 0; i < count; i++) { assertTrue(htds.get(Bytes.toBytes(name + i)) != null); } // Update the table infos for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(name + i); htd.addFamily(new HColumnDescriptor("" + i)); FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); for (int i = 0; i < count; i++) { assertTrue(htds.get(Bytes.toBytes(name + i)) != null); } for (int i = 0; i < count; i++) { assertTrue(htds.get(Bytes.toBytes(name + i)) != null); } assertEquals(count * 4, htds.invocations); assertTrue( "expected=" + (count * 2) + ", actual=" + htds.cachehits, htds.cachehits >= (count * 2)); assertTrue(htds.get(HConstants.ROOT_TABLE_NAME) != null); assertEquals(htds.invocations, count * 4 + 1); assertTrue( "expected=" + ((count * 2) + 1) + ", actual=" + htds.cachehits, htds.cachehits >= ((count * 2) + 1)); }
@Override public void add(HTableDescriptor htd) throws IOException { if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) { throw new NotImplementedException(); } if (Bytes.equals(HConstants.META_TABLE_NAME, htd.getName())) { throw new NotImplementedException(); } if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) { throw new NotImplementedException(); } if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd); long modtime = getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString()); this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd)); }
/** * A lower level API, return ID integer from raw value bytes. In case of not found * * <p>- if roundingFlag=0, throw IllegalArgumentException; <br> * - if roundingFlag<0, the closest smaller ID integer if exist; <br> * - if roundingFlag>0, the closest bigger ID integer if exist. <br> * * <p>Bypassing the cache layer, this could be significantly slower than getIdFromValue(T value). * * @throws IllegalArgumentException if value is not found in dictionary and rounding is off; or if * rounding cannot find a smaller or bigger ID */ public final int getIdFromValueBytes(byte[] value, int offset, int len, int roundingFlag) throws IllegalArgumentException { if (isNullByteForm(value, offset, len)) return nullId(); else { int id = getIdFromValueBytesImpl(value, offset, len, roundingFlag); if (id == -1) throw new IllegalArgumentException( "Value '" + Bytes.toString(value, offset, len) + "' (" + Bytes.toStringBinary(value, offset, len) + ") not exists!"); return id; } }
/** Returns a human readable string representation of the object. */ public String toString() { // The argument passed to StringBuilder is a pretty good estimate of the // length of the final string based on the row key and number of elements. final String metric = metricName(); final int size = size(); final StringBuilder buf = new StringBuilder(80 + metric.length() + key.length * 4 + size * 16); final long base_time = baseTime(); buf.append("RowSeq(") .append(key == null ? "<null>" : Arrays.toString(key)) .append(" (metric=") .append(metric) .append("), base_time=") .append(base_time) .append(" (") .append(base_time > 0 ? new Date(base_time * 1000) : "no date") .append("), ["); for (short i = 0; i < size; i++) { final short qual = Bytes.getShort(qualifiers, i * 2); buf.append('+').append((qual & 0xFFFF) >>> Const.FLAG_BITS); if (isInteger(i)) { buf.append(":long(").append(longValue(i)); } else { buf.append(":float(").append(doubleValue(i)); } buf.append(')'); if (i != size - 1) { buf.append(", "); } } buf.append("])"); return buf.toString(); }
public void write(int b) { int newcount = mCount + 1; if (newcount > mBuffer.length) mBuffer = Bytes.copyOf(mBuffer, Math.max(mBuffer.length << 1, newcount)); mBuffer[mCount] = (byte) b; mCount = newcount; }
public static void main(String[] args) throws IOException { OldSketches sketches = new OldSketches("./resources/", "5even"); Random rand = new Random(); long totalTime = 0; byte[] value = new byte[1024]; byte[] key; for (long i = 0; i < 1000000; i++) { rand.nextBytes(value); key = Bytes.fromLong(i).toByteArray(); long start = System.currentTimeMillis(); if (i % 2 == 0) sketches.put(key, value); else sketches.delete(key); totalTime += System.currentTimeMillis() - start; } System.out.println("Total write time: " + totalTime); long start = System.currentTimeMillis(); sketches.bomb(); System.out.println("Bombing time: " + (System.currentTimeMillis() - start)); sketches.shutdown(); }
void directPut(final Bytes key, final Bytes value, int hash2) { lock(); try { hash2 = hashLookup.startSearch(hash2); while (true) { final int pos = hashLookup.nextPos(); if (pos < 0) { directPutEntry(key, value, hash2); return; } else { final long offset = entriesOffset + pos * entrySize; tmpBytes.storePositionAndSize(bytes, offset, entrySize); if (!keyEquals(key, tmpBytes)) continue; final long keyLength = key.remaining(); tmpBytes.skip(keyLength); appendValue(value); return; } } } finally { unlock(); } }
void directRemove(final Bytes keyBytes, int hash2) { lock(); try { hash2 = hashLookup.startSearch(hash2); while (true) { final int pos = hashLookup.nextPos(); if (pos < 0) { return; } else { final long offset = entriesOffset + pos * entrySize; tmpBytes.storePositionAndSize(bytes, offset, entrySize); if (!keyEquals(keyBytes, tmpBytes)) continue; final long keyLength = align(keyBytes.remaining() + tmpBytes.position()); // includes the stop bit length. tmpBytes.position(keyLength); hashLookup.remove(hash2, pos); decrementSize(); freeList.clear(pos); if (pos < nextSet) nextSet = pos; return; } } } finally { unlock(); } }
public static ResponseBody readResponse(InputStream is, int expectBodyLenth) throws Exception { byte[] respHeader = new byte[10]; int length = is.read(respHeader); // 判断读取的header长度 if (length != respHeader.length) { throw new IllegalStateException( "read responseHeader fail, length not enouth, length: " + length); } // 判断相应的标志位,为响应标志 if (respHeader[8] != TRACKER_PROTO_CMD_RESP) { throw new IllegalStateException("invalid responseHeader cmd, cmd: " + respHeader[8]); } // 异常标志位,0是正常,非0异常。 if (respHeader[9] != 0) { return new ResponseBody(respHeader[9], null); } // 判断body的长度。 long bodyLength = Bytes.bytes2long(respHeader, 0); if (bodyLength < 0) { throw new IllegalStateException("invalid bodyLength, bodyLength: " + bodyLength); } byte[] body = new byte[(int) bodyLength]; int readBodyLength = is.read(body); // 验证body读完整。 if (bodyLength != readBodyLength) { throw new IllegalStateException( "invalid read body , bodyLength: " + bodyLength + ", readBodyLength: " + readBodyLength); } if (expectBodyLenth > 0 && readBodyLength != expectBodyLenth) { throw new IllegalStateException( "readBodyLength:" + readBodyLength + ", expectBodyLenth:" + expectBodyLenth); } return new ResponseBody(respHeader[9], body); }
@Test(expected = IllegalArgumentException.class) public void testEmptyNamespaceName() { for (String nn : emptyNames) { TableName.isLegalNamespaceName(Bytes.toBytes(nn)); fail("invalid Namespace name " + nn + " should have failed with IllegalArgumentException"); } }
@Test(expected = IllegalArgumentException.class) public void testEmptyTableName() { for (String tn : emptyNames) { TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(tn)); fail("invalid tablename " + tn + " should have failed with IllegalArgumentException"); } }
/** * Decodes the response of an RPC and triggers its {@link Deferred}. * * <p>This method is used by FrameDecoder when the channel gets disconnected. The buffer for that * channel is passed to this method in case there's anything left in it. * * @param ctx Unused. * @param chan The channel on which the response came. * @param buf The buffer containing the raw RPC response. * @return {@code null}, always. */ @Override protected Object decodeLast( final ChannelHandlerContext ctx, final Channel chan, final ChannelBuffer buf, final VoidEnum unused) { // When we disconnect, decodeLast is called instead of decode. // We simply check whether there's any data left in the buffer, in which // case we attempt to process it. But if there's no data left, then we // don't even bother calling decode() as it'll complain that the buffer // doesn't contain enough data, which unnecessarily pollutes the logs. if (buf.readable()) { try { return decode(ctx, chan, buf, unused); } finally { if (buf.readable()) { LOG.error( getPeerUuidLoggingString() + "After decoding the last message on " + chan + ", there was still some undecoded bytes in the channel's" + " buffer (which are going to be lost): " + buf + '=' + Bytes.pretty(buf)); } } } else { return null; } }
public static long parseLine(Bytes bytes, Range line, long start, long limit) { byte b0 = 0, b1 = 0; long ret = -1; long i; for (i = start; i < limit; i++) { b0 = b1; b1 = bytes.get(i); if (b1 == LF) { long len; if (b0 == CR) { len = i - start - 1; } else { len = i - start; } line.set(start, len); ret = i + 1; break; } } return ret; }
/** * Set an encoded (inclusive) start partition key for the scan. * * @param partitionKey the encoded partition key * @return this instance */ @InterfaceAudience.LimitedPrivate("Impala") public S lowerBoundPartitionKeyRaw(byte[] partitionKey) { if (Bytes.memcmp(partitionKey, lowerBoundPartitionKey) > 0) { this.lowerBoundPartitionKey = partitionKey; } return (S) this; }
/** * Scans the table and merges two adjacent regions if they are small. This only happens when a lot * of rows are deleted. * * <p>When merging the META region, the HBase instance must be offline. When merging a normal * table, the HBase instance must be online, but the table must be disabled. * * @param conf - configuration object for HBase * @param fs - FileSystem where regions reside * @param tableName - Table to be compacted * @param testMasterRunning True if we are to verify master is down before running merge * @throws IOException */ public static void merge( Configuration conf, FileSystem fs, final byte[] tableName, final boolean testMasterRunning) throws IOException { boolean masterIsRunning = false; if (testMasterRunning) { masterIsRunning = HConnectionManager.execute( new HConnectable<Boolean>(conf) { @Override public Boolean connect(HConnection connection) throws IOException { return connection.isMasterRunning(); } }); } if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { if (masterIsRunning) { throw new IllegalStateException("Can not compact META table if instance is on-line"); } new OfflineMerger(conf, fs).process(); } else { if (!masterIsRunning) { throw new IllegalStateException("HBase instance must be running to merge a normal table"); } HBaseAdmin admin = new HBaseAdmin(conf); if (!admin.isTableDisabled(tableName)) { throw new TableNotDisabledException(tableName); } new OnlineMerger(conf, fs, tableName).process(); } }
public static long parseLines(Bytes bytes, Ranges lines, long start, long limit) { byte b0 = 0, b1 = 0; long ret = -1; long i; long from = start; for (i = start; i < limit; i++) { b0 = b1; b1 = bytes.get(i); if (b1 == LF) { long len; if (b0 == CR) { len = i - from - 1; } else { len = i - from; } if (len == 0) { ret = i + 1; break; } lines.add(from, len); from = i + 1; } } return ret; }