protected static String a() { String str = null; try { Info advertisingIdInfo = AdvertisingIdClient.getAdvertisingIdInfo(Chartboost.sharedChartboost().getContext()); } catch (IOException e) { String str2 = str; } catch (GooglePlayServicesRepairableException e2) { str2 = str; } catch (GooglePlayServicesNotAvailableException e3) { str2 = str; } if (advertisingIdInfo == null) { c.a(a.c); return str; } else { if (advertisingIdInfo.isLimitAdTrackingEnabled()) { c.a(a.e); } else { c.a(a.d); } try { UUID fromString = UUID.fromString(advertisingIdInfo.getId()); ByteBuffer wrap = ByteBuffer.wrap(new byte[16]); wrap.putLong(fromString.getMostSignificantBits()); wrap.putLong(fromString.getLeastSignificantBits()); return b.b(wrap.array()); } catch (IllegalArgumentException e4) { CBLogging.a("CBIdentityAdv", "Exception raised retrieveAdvertisingID", e4); return advertisingIdInfo.getId().replace("-", AdTrackerConstants.BLANK); } } }
public String asHexToken(Key key) { try { Cipher encodingCipher = Cipher.getInstance("AES"); encodingCipher.init(Cipher.ENCRYPT_MODE, key); ByteBuffer buffer = ByteBuffer.allocate(16 + 1 + 8 + 8 + getBufferSize()); buffer.position(16); buffer.put(getId()); buffer.putLong(getExpires().getTimeInMillis()); buffer.putLong(getUoid()); getBytes(buffer); if (buffer.position() != buffer.capacity()) { throw new RuntimeException( "Buffer's position should be at the end " + buffer.position() + "/" + buffer.capacity()); } MessageDigest messageDigest = MessageDigest.getInstance("MD5"); buffer.position(16); messageDigest.update(buffer); buffer.position(0); buffer.put(messageDigest.digest()); byte[] encodedBytes = encodingCipher.doFinal(buffer.array()); String encodedHexString = new String(Hex.encodeHex(encodedBytes)); return encodedHexString; } catch (Exception e) { LOGGER.error("", e); } return null; }
public String uuidToBase64(String str) { UUID uuid = UUID.fromString(str); ByteBuffer bb = ByteBuffer.wrap(new byte[16]); bb.putLong(uuid.getMostSignificantBits()); bb.putLong(uuid.getLeastSignificantBits()); return Base64.encodeBase64URLSafeString(bb.array()); }
@Override public ByteBuffer write(ByteBuffer buff, Object obj) { if (!(obj instanceof Long)) { return super.write(buff, obj); } long x = (Long) obj; if (x < 0) { // -Long.MIN_VALUE is smaller than 0 if (-x < 0 || -x > DataUtils.COMPRESSED_VAR_LONG_MAX) { buff.put((byte) TAG_LONG_FIXED); buff.putLong(x); } else { buff.put((byte) TAG_LONG_NEGATIVE); DataUtils.writeVarLong(buff, -x); } } else if (x <= 7) { buff.put((byte) (TAG_LONG_0_7 + x)); } else if (x <= DataUtils.COMPRESSED_VAR_LONG_MAX) { buff.put((byte) TYPE_LONG); DataUtils.writeVarLong(buff, x); } else { buff.put((byte) TAG_LONG_FIXED); buff.putLong(x); } return buff; }
public static DataFileAccessorImpl create( final File file, final Date startDate, final Date endDate) throws Exception { logger.debug("Creating new file: {}", file); if (!file.createNewFile()) { throw new IllegalStateException( String.format("Unable to create file %s, already exists", file)); } final FileOutputStream out = new FileOutputStream(file); try { final FileChannel channel = out.getChannel(); final ByteBuffer buffer = ByteBuffer.allocate(100); buffer.putInt(0x1202); // magic marker buffer.putInt(0x0101); // version buffer.putLong(startDate.getTime()); // start timestamp buffer.putLong(endDate.getTime()); // end timestamp buffer.flip(); while (buffer.hasRemaining()) { final int rc = channel.write(buffer); logger.debug("Header written - {} bytes", rc); } return new DataFileAccessorImpl(file); } finally { out.close(); } }
private static byte[] createTCPMsg(int opcode) { ByteBuffer buffer = ByteBuffer.wrap(new byte[1]); switch (opcode) { case 0: buffer = ByteBuffer.wrap(new byte[1]); buffer.put((byte) 0x00); break; case 1: String userName = "******"; buffer = ByteBuffer.wrap(new byte[userName.length() + 1]); buffer.put((byte) 0x01); buffer.put(userName.getBytes(StandardCharsets.UTF_8)); break; case 2: case 3: case 4: buffer = ByteBuffer.wrap(new byte[17]); buffer.put((byte) opcode); buffer.putLong(userId.getMostSignificantBits()); buffer.putLong(userId.getLeastSignificantBits()); break; } return buffer.array(); }
/** * Fences a ledger. From this point on, clients will be unable to write to this ledger. Only * recoveryAddEntry will be able to add entries to the ledger. This method is idempotent. Once a * ledger is fenced, it can never be unfenced. Fencing a fenced ledger has no effect. */ public SettableFuture<Boolean> fenceLedger(long ledgerId, byte[] masterKey) throws IOException, BookieException { LedgerDescriptor handle = handles.getHandle(ledgerId, masterKey); boolean success; synchronized (handle) { success = handle.setFenced(); } if (success) { // fenced first time, we should add the key to journal ensure we can rebuild ByteBuffer bb = ByteBuffer.allocate(8 + 8); bb.putLong(ledgerId); bb.putLong(METAENTRY_ID_FENCE_KEY); bb.flip(); FutureWriteCallback fwc = new FutureWriteCallback(); LOG.debug("record fenced state for ledger {} in journal.", ledgerId); journal.logAddEntry(bb, fwc, null); return fwc.getResult(); } else { // already fenced SettableFuture<Boolean> successFuture = SettableFuture.create(); successFuture.set(true); return successFuture; } }
@Override public void writeAfterController(long timestamp) { jointCommand.clear(); jointCommand.putLong(estimatorTicksPerControlTick); jointCommand.putLong(timestamp); jointCommand.putLong(estimatorFrequencyInHz); for (int i = 0; i < joints.size(); i++) { OneDoFJoint joint = joints.get(i); if (fingerJointMap == null || !fingerJointMap.containsKey(joint.getName())) { if (joint.isUnderPositionControl()) jointCommand.putDouble(joint.getqDesired()); else jointCommand.putDouble(joint.getTau()); } else jointCommand.putDouble( fingerJointMap .get(joint.getName()) .getqDesired()); // fingers are always position controlled } jointCommand.flip(); try { while (jointCommand.hasRemaining()) { channel.write(jointCommand); } } catch (IOException e) { e.printStackTrace(); } }
@Override void putMyBytes(ByteBuffer buffer) { buffer.putLong(goodsId); buffer.putInt(quantity); buffer.putLong(priceNQT); buffer.putInt(deliveryDeadlineTimestamp); }
@Override public ByteBuffer getPacket() { // int size = 100; ByteBuffer response = ByteBuffer.allocate(106); // response.put((byte)0x84); // response.put(Hex.intToBytes(player.getPacketCount(), 3),0,3); response.put((byte) 0x60); // Encapsulation ID response.put((byte) 0x03); response.put((byte) 0x00); // size of packet response.put(Hex.intToBytes(player.getDataCount(), 3), 0, 3); response.put((byte) 0x00); // MinecrafPE ID response.putInt(16); response.put((byte) 0x04); response.put((byte) 0x3f); response.put((byte) 0x57); response.put((byte) 0xfe); response.put((byte) 0xcd); response.putShort((short) player.clientPort); for (int i = 0; i < 10; i++) { response.put(Hex.intToBytes(4, 3)); response.putInt(0xffffffff); } response.putShort((short) 0); response.putLong(unknown1); response.putLong(1L); return response; }
@Override public byte[] getBytes() { ByteBuffer byteBuffer = ByteBuffer.allocate(98); byteBuffer.order(ByteOrder.BIG_ENDIAN); byteBuffer.putLong(0, this.getConnectionId()); byteBuffer.putInt(8, this.getAction().value()); byteBuffer.putInt(12, this.getTransactionId()); byteBuffer.position(16); byteBuffer.put(infoHash.getBytes()); byteBuffer.position(36); byteBuffer.put(peerId.getBytes()); byteBuffer.putLong(56, downloaded); byteBuffer.putLong(64, left); byteBuffer.putLong(72, uploaded); byteBuffer.putInt(80, this.getEvent().value()); byteBuffer.putInt(84, peerInfo.getIpAddress()); byteBuffer.putInt(88, key); byteBuffer.putInt(92, numWant); byteBuffer.putChar(96, (char) peerInfo.getPort()); byteBuffer.flip(); return byteBuffer.array(); }
@Override public SessionID createIdentifier() { final UUID uuid = UUID.randomUUID(); ByteBuffer bb = ByteBuffer.wrap(new byte[16]); bb.putLong(uuid.getMostSignificantBits()); bb.putLong(uuid.getLeastSignificantBits()); return SessionID.createSessionID(bb.array()); }
/** {@inheritDoc} */ @Override public void writeBytes(final ByteBuffer buffer, final IRegistry<String> stringRegistry) throws BufferOverflowException { buffer.putLong(this.getTimestamp()); buffer.putLong(this.getTraceId()); buffer.putInt(this.getOrderIndex()); buffer.putInt(this.getLockId()); }
@Override public void aggregate(ByteBuffer buf, int position) { long time = timeSelector.get(); long lastTime = buf.getLong(position); if (time >= lastTime) { buf.putLong(position, time); buf.putLong(position + Longs.BYTES, valueSelector.get()); } }
/** * Write the header. * * @param buff the target buffer */ void writeHeader(ByteBuffer buff) { buff.put((byte) 'c'); buff.putInt(length); buff.putInt(id); buff.putInt(pageCount); buff.putLong(metaRootPos); buff.putLong(maxLength); buff.putLong(maxLengthLive); }
/** * Appends a zip64 extended info record to the extras contained in {@code ze}. If {@code ze} * contains no extras, a new extras array is created. */ public static void insertZip64ExtendedInfoToExtras(ZipEntry ze) throws ZipException { final byte[] output; // We always write the size, uncompressed size and local rel header offset in all our // Zip64 extended info headers (in both the local file header as well as the central // directory). We always omit the disk number because we don't support spanned // archives anyway. // // 2 bytes : Zip64 Extended Info Header ID // 2 bytes : Zip64 Extended Info Field Size. // 8 bytes : Uncompressed size // 8 bytes : Compressed size // 8 bytes : Local header rel offset. // ---------- // 28 bytes : total final int extendedInfoSize = 28; if (ze.extra == null) { output = new byte[extendedInfoSize]; } else { // If the existing extras are already too big, we have no choice but to throw // an error. if (ze.extra.length + extendedInfoSize > 65535) { throw new ZipException("No space in extras for zip64 extended entry info"); } // We copy existing extras over and put the zip64 extended info at the beginning. This // is to avoid breakages in the presence of "old style" extras which don't contain // headers and lengths. The spec is again silent about these inconsistencies. // // This means that people that for ZipOutputStream users, the value ZipEntry.getExtra // after an entry is written will be different from before. This shouldn't be an issue // in practice. output = new byte[ze.extra.length + extendedInfoSize]; System.arraycopy(ze.extra, 0, output, extendedInfoSize, ze.extra.length); } ByteBuffer bb = ByteBuffer.wrap(output).order(ByteOrder.LITTLE_ENDIAN); bb.putShort(ZIP64_EXTENDED_INFO_HEADER_ID); // We subtract four because extendedInfoSize includes the ID and field // size itself. bb.putShort((short) (extendedInfoSize - 4)); if (ze.getMethod() == ZipEntry.STORED) { bb.putLong(ze.size); bb.putLong(ze.compressedSize); } else { // Store these fields in the data descriptor instead. bb.putLong(0); // size. bb.putLong(0); // compressed size. } // The offset is only relevant in the central directory entry, but we write it out here // anyway, since we know what it is. bb.putLong(ze.localHeaderRelOffset); ze.extra = output; }
@Override void putMyBytes(ByteBuffer buffer) { buffer.putLong(purchaseId); buffer.putInt( goodsIsText ? goods.getData().length | Integer.MIN_VALUE : goods.getData().length); buffer.put(goods.getData()); buffer.put(goods.getNonce()); buffer.putLong(discountNQT); }
@Override void putMyBytes(ByteBuffer buffer) { buffer.putLong(assetId); buffer.putLong(quantityQNT); if (getVersion() == 0 && comment != null) { byte[] commentBytes = Convert.toBytes(this.comment); buffer.putShort((short) commentBytes.length); buffer.put(commentBytes); } }
public PacketEntityInformation(List<LivingEntity> entities) { ByteBuffer tempbuffer = ByteBuffer.allocate(entities.size() * 20); // 4 bytes for entity id, 16 for uuid for (Entity e : entities) { tempbuffer.putLong(e.getUniqueId().getLeastSignificantBits()); tempbuffer.putLong(e.getUniqueId().getMostSignificantBits()); tempbuffer.putInt(e.getEntityId()); } data = tempbuffer.array(); }
private ByteBuffer genEntry(long ledger, long entry, int size) { ByteBuffer bb = ByteBuffer.wrap(new byte[size]); bb.putLong(ledger); bb.putLong(entry); while (bb.hasRemaining()) { bb.put((byte) 0xFF); } bb.flip(); return bb; }
public THandleIdentifier toTHandleIdentifier() { byte[] guid = new byte[16]; byte[] secret = new byte[16]; ByteBuffer guidBB = ByteBuffer.wrap(guid); ByteBuffer secretBB = ByteBuffer.wrap(secret); guidBB.putLong(publicId.getMostSignificantBits()); guidBB.putLong(publicId.getLeastSignificantBits()); secretBB.putLong(secretId.getMostSignificantBits()); secretBB.putLong(secretId.getLeastSignificantBits()); return new THandleIdentifier(ByteBuffer.wrap(guid), ByteBuffer.wrap(secret)); }
@JsonIgnore public void generateApiKey() { Encoder e = Base64.getEncoder(); ByteBuffer bb = ByteBuffer.wrap(new byte[16 * 2]); for (int i = 0; i < 2; i++) { UUID u = UUID.randomUUID(); bb.putLong(u.getMostSignificantBits()); bb.putLong(u.getLeastSignificantBits()); } setApiKey(e.encodeToString(bb.array())); }
@Override public ByteBuffer write(ByteBuffer buff, Object obj) { if (!(obj instanceof UUID)) { return super.write(buff, obj); } buff.put((byte) TYPE_UUID); UUID a = (UUID) obj; buff.putLong(a.getMostSignificantBits()); buff.putLong(a.getLeastSignificantBits()); return buff; }
public void encode(ByteBuffer buf) { buf.putInt(type.ordinal()); buf.putInt(keySize); buf.putInt(valueSize); buf.putLong(keyCount); buf.putInt(order.ordinal()); buf.putInt(distribution.ordinal()); buf.putLong(start); buf.putLong(end); buf.putInt(scanBufferSize); }
/** * Prepare a send buffer. * * @param cmd the command identifier * @param key the key (for keyed ops) * @param val the data payload * @param extraHeaders any additional headers that need to be sent */ protected void prepareBuffer(String key, long cas, byte[] val, Object... extraHeaders) { int extraLen = 0; for (Object o : extraHeaders) { if (o instanceof Integer) { extraLen += 4; } else if (o instanceof byte[]) { extraLen += ((byte[]) o).length; } else if (o instanceof Long) { extraLen += 8; } else { assert false : "Unhandled extra header type: " + o.getClass(); } } final byte[] keyBytes = KeyUtil.getKeyBytes(key); int bufSize = MIN_RECV_PACKET + keyBytes.length + val.length; // # magic, opcode, keylen, extralen, datatype, [reserved], // bodylen, opaque, cas // REQ_PKT_FMT=">BBHBBxxIIQ" // set up the initial header stuff ByteBuffer bb = ByteBuffer.allocate(bufSize + extraLen); assert bb.order() == ByteOrder.BIG_ENDIAN; bb.put(REQ_MAGIC); bb.put((byte) cmd); bb.putShort((short) keyBytes.length); bb.put((byte) extraLen); bb.put((byte) 0); // data type bb.putShort((short) 0); // reserved bb.putInt(keyBytes.length + val.length + extraLen); bb.putInt(opaque); bb.putLong(cas); // Add the extra headers. for (Object o : extraHeaders) { if (o instanceof Integer) { bb.putInt((Integer) o); } else if (o instanceof byte[]) { bb.put((byte[]) o); } else if (o instanceof Long) { bb.putLong((Long) o); } else { assert false : "Unhandled extra header type: " + o.getClass(); } } // Add the normal stuff bb.put(keyBytes); bb.put(val); bb.flip(); setBuffer(bb); }
public void writeComplete(int rc, long ledgerId, long entryId, Object ctx) { Cnxn src = (Cnxn) ctx; ByteBuffer bb = ByteBuffer.allocate(24); bb.putInt(BookieProtocol.ADDENTRY); bb.putInt(rc); bb.putLong(ledgerId); bb.putLong(entryId); bb.flip(); if (LOG.isTraceEnabled()) { LOG.trace("Add entry rc = " + rc + " for " + entryId + "@" + ledgerId); } src.sendResponse(new ByteBuffer[] {bb}); }
private void initializeFile() { try { int totalmapsize = 1 + smallEntriesCount + hashTableSize; sizeMap = write(0, 8 * totalmapsize); // I have no idea if a new mapped file is 0-initialized, a test // show that it is, but the API docs are silent about this. // So, to be on the safe side, a quick put to 0... for (int i = 0; i < totalmapsize; i++) sizeMap.putLong(0); sizeMap.putLong(newBucketOffset, 8 * totalmapsize); } catch (Throwable t) { throw new HGException(t); } }
@Test public void shouldMoveFiles() throws IOException { // given final EphemeralFileSystemAbstraction fs = new EphemeralFileSystemAbstraction(); fs.mkdirs(storeDir); fs.mkdirs(migrationDir); final Set<File> logsInStoreDir = new HashSet<>( Arrays.asList( new File(storeDir, getLegacyLogFilename(1)), new File(storeDir, getLegacyLogFilename(2)))); final List<File> logsInMigrationDir = Arrays.asList( new File(migrationDir, getLegacyLogFilename(1)), new File(migrationDir, getLegacyLogFilename(2))); for (File file : logsInMigrationDir) { try (StoreChannel channel = fs.create(file)) { ByteBuffer buffer = ByteBuffer.allocate(8); buffer.putLong(42); buffer.flip(); channel.write(buffer); } } // should override older files for (File file : logsInStoreDir) { try (StoreChannel channel = fs.create(file)) { ByteBuffer buffer = ByteBuffer.allocate(8); buffer.putLong(13); buffer.flip(); channel.write(buffer); } } // when new LegacyLogs(fs, reader, writer).operate(FileOperation.MOVE, migrationDir, storeDir); // then assertEquals(logsInStoreDir, new HashSet<>(Arrays.asList(fs.listFiles(storeDir)))); for (File file : logsInStoreDir) { try (StoreChannel channel = fs.open(file, "r")) { ByteBuffer buffer = ByteBuffer.allocate(8); channel.read(buffer); buffer.flip(); assertEquals(42, buffer.getLong()); } } }
private void sendInitialState() throws IOException { jointCommand.clear(); jointCommand.putLong(estimatorTicksPerControlTick * 3); jointCommand.putLong(0); jointCommand.putLong(1000); for (int i = 0; i < joints.size(); i++) { jointCommand.putDouble(0.0); } jointCommand.flip(); while (jointCommand.hasRemaining()) { channel.write(jointCommand); } }
protected ByteBuffer serializeHeader(final int contentSize) { final ByteBuffer buffer = ByteBuffer.allocate(40); buffer.put(getVersionInByte()); buffer.put((byte) 0); buffer.putShort(getFlags()); buffer.putLong(_seq); // The size here is uncompressed size, if the data is compressed. buffer.putInt(contentSize); buffer.putLong(_mgmtId); buffer.putLong(_agentId); buffer.putLong(_via); buffer.flip(); return buffer; }