protected void messageReceived( byte[] data, String action, LocalTransport sourceTransport, Version version, @Nullable final Long sendRequestId) { Transports.assertTransportThread(); try { transportServiceAdapter.received(data.length); StreamInput stream = StreamInput.wrap(data); stream.setVersion(version); long requestId = stream.readLong(); byte status = stream.readByte(); boolean isRequest = TransportStatus.isRequest(status); if (isRequest) { ThreadContext threadContext = threadPool.getThreadContext(); threadContext.readHeaders(stream); handleRequest(stream, requestId, data.length, sourceTransport, version); } else { final TransportResponseHandler handler = transportServiceAdapter.onResponseReceived(requestId); // ignore if its null, the adapter logs it if (handler != null) { if (TransportStatus.isError(status)) { handleResponseError(stream, handler); } else { handleResponse(stream, sourceTransport, handler); } } } } catch (Throwable e) { if (sendRequestId != null) { TransportResponseHandler handler = sourceTransport.transportServiceAdapter.onResponseReceived(sendRequestId); if (handler != null) { RemoteTransportException error = new RemoteTransportException(nodeName(), localAddress, action, e); sourceTransport .workers() .execute( () -> { ThreadContext threadContext = sourceTransport.threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { sourceTransport.handleException(handler, error); } }); } } else { logger.warn("Failed to receive message for action [{}]", e, action); } } }
@Test public void testSerialization() throws Exception { int iterations = randomIntBetween(5, 20); for (int i = 0; i < iterations; i++) { IndicesOptions indicesOptions = IndicesOptions.fromOptions( randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); ClusterStateRequest clusterStateRequest = new ClusterStateRequest() .routingTable(randomBoolean()) .metaData(randomBoolean()) .nodes(randomBoolean()) .blocks(randomBoolean()) .indices("testindex", "testindex2") .indicesOptions(indicesOptions); Version testVersion = VersionUtils.randomVersionBetween( random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); BytesStreamOutput output = new BytesStreamOutput(); output.setVersion(testVersion); clusterStateRequest.writeTo(output); StreamInput streamInput = StreamInput.wrap(output.bytes()); streamInput.setVersion(testVersion); ClusterStateRequest deserializedCSRequest = new ClusterStateRequest(); deserializedCSRequest.readFrom(streamInput); assertThat(deserializedCSRequest.routingTable(), equalTo(clusterStateRequest.routingTable())); assertThat(deserializedCSRequest.metaData(), equalTo(clusterStateRequest.metaData())); assertThat(deserializedCSRequest.nodes(), equalTo(clusterStateRequest.nodes())); assertThat(deserializedCSRequest.blocks(), equalTo(clusterStateRequest.blocks())); assertThat(deserializedCSRequest.indices(), equalTo(clusterStateRequest.indices())); if (testVersion.onOrAfter(Version.V_1_5_0)) { assertOptionsMatch( deserializedCSRequest.indicesOptions(), clusterStateRequest.indicesOptions()); } else { // versions before V_1_5_0 use IndicesOptions.lenientExpandOpen() assertOptionsMatch( deserializedCSRequest.indicesOptions(), IndicesOptions.lenientExpandOpen()); } } }
@Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception { Object m = e.getMessage(); if (!(m instanceof ChannelBuffer)) { ctx.sendUpstream(e); return; } ChannelBuffer buffer = (ChannelBuffer) m; int size = buffer.getInt(buffer.readerIndex() - 4); transportServiceAdapter.received(size + 6); // we have additional bytes to read, outside of the header boolean hasMessageBytesToRead = (size - (NettyHeader.HEADER_SIZE - 6)) != 0; int markedReaderIndex = buffer.readerIndex(); int expectedIndexReader = markedReaderIndex + size; // netty always copies a buffer, either in NioWorker in its read handler, where it copies to a // fresh // buffer, or in the cumlation buffer, which is cleaned each time StreamInput streamIn = ChannelBufferStreamInputFactory.create(buffer, size); long requestId = buffer.readLong(); byte status = buffer.readByte(); Version version = Version.fromId(buffer.readInt()); StreamInput wrappedStream; if (TransportStatus.isCompress(status) && hasMessageBytesToRead && buffer.readable()) { Compressor compressor = CompressorFactory.compressor(buffer); if (compressor == null) { int maxToRead = Math.min(buffer.readableBytes(), 10); int offset = buffer.readerIndex(); StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [") .append(maxToRead) .append("] content bytes out of [") .append(buffer.readableBytes()) .append("] readable bytes with message size [") .append(size) .append("] ") .append("] are ["); for (int i = 0; i < maxToRead; i++) { sb.append(buffer.getByte(offset + i)).append(","); } sb.append("]"); throw new ElasticsearchIllegalStateException(sb.toString()); } wrappedStream = CachedStreamInput.cachedHandlesCompressed(compressor, streamIn); } else { wrappedStream = CachedStreamInput.cachedHandles(streamIn); } wrappedStream.setVersion(version); if (TransportStatus.isRequest(status)) { String action = handleRequest(ctx.getChannel(), wrappedStream, requestId, version); if (buffer.readerIndex() != expectedIndexReader) { if (buffer.readerIndex() < expectedIndexReader) { logger.warn( "Message not fully read (request) for [{}] and action [{}], resetting", requestId, action); } else { logger.warn( "Message read past expected size (request) for [{}] and action [{}], resetting", requestId, action); } buffer.readerIndex(expectedIndexReader); } } else { TransportResponseHandler handler = transportServiceAdapter.remove(requestId); // ignore if its null, the adapter logs it if (handler != null) { if (TransportStatus.isError(status)) { handlerResponseError(wrappedStream, handler); } else { handleResponse(ctx.getChannel(), wrappedStream, handler); } } else { // if its null, skip those bytes buffer.readerIndex(markedReaderIndex + size); } if (buffer.readerIndex() != expectedIndexReader) { if (buffer.readerIndex() < expectedIndexReader) { logger.warn( "Message not fully read (response) for [{}] handler {}, error [{}], resetting", requestId, handler, TransportStatus.isError(status)); } else { logger.warn( "Message read past expected size (response) for [{}] handler {}, error [{}], resetting", requestId, handler, TransportStatus.isError(status)); } buffer.readerIndex(expectedIndexReader); } } wrappedStream.close(); }
public void testSerialization() throws IOException { MultiGetRequest multiGetRequest = new MultiGetRequest(); if (randomBoolean()) { multiGetRequest.preference(randomAsciiOfLength(randomIntBetween(1, 10))); } if (randomBoolean()) { multiGetRequest.realtime(false); } if (randomBoolean()) { multiGetRequest.refresh(true); } multiGetRequest.ignoreErrorsOnGeneratedFields(randomBoolean()); MultiGetShardRequest multiGetShardRequest = new MultiGetShardRequest(multiGetRequest, "index", 0); int numItems = iterations(10, 30); for (int i = 0; i < numItems; i++) { MultiGetRequest.Item item = new MultiGetRequest.Item( "alias-" + randomAsciiOfLength(randomIntBetween(1, 10)), "type", "id-" + i); if (randomBoolean()) { int numFields = randomIntBetween(1, 5); String[] fields = new String[numFields]; for (int j = 0; j < fields.length; j++) { fields[j] = randomAsciiOfLength(randomIntBetween(1, 10)); } item.fields(fields); } if (randomBoolean()) { item.version(randomIntBetween(1, Integer.MAX_VALUE)); item.versionType(randomFrom(VersionType.values())); } if (randomBoolean()) { item.fetchSourceContext(new FetchSourceContext(randomBoolean())); } multiGetShardRequest.add(0, item); } BytesStreamOutput out = new BytesStreamOutput(); out.setVersion(randomVersion(random())); multiGetShardRequest.writeTo(out); StreamInput in = StreamInput.wrap(out.bytes()); in.setVersion(out.getVersion()); MultiGetShardRequest multiGetShardRequest2 = new MultiGetShardRequest(); multiGetShardRequest2.readFrom(in); assertThat(multiGetShardRequest2.index(), equalTo(multiGetShardRequest.index())); assertThat(multiGetShardRequest2.preference(), equalTo(multiGetShardRequest.preference())); assertThat(multiGetShardRequest2.realtime(), equalTo(multiGetShardRequest.realtime())); assertThat(multiGetShardRequest2.refresh(), equalTo(multiGetShardRequest.refresh())); assertThat( multiGetShardRequest2.ignoreErrorsOnGeneratedFields(), equalTo(multiGetShardRequest.ignoreErrorsOnGeneratedFields())); assertThat(multiGetShardRequest2.items.size(), equalTo(multiGetShardRequest.items.size())); for (int i = 0; i < multiGetShardRequest2.items.size(); i++) { MultiGetRequest.Item item = multiGetShardRequest.items.get(i); MultiGetRequest.Item item2 = multiGetShardRequest2.items.get(i); assertThat(item2.index(), equalTo(item.index())); assertThat(item2.type(), equalTo(item.type())); assertThat(item2.id(), equalTo(item.id())); assertThat(item2.fields(), equalTo(item.fields())); assertThat(item2.version(), equalTo(item.version())); assertThat(item2.versionType(), equalTo(item.versionType())); assertThat(item2.fetchSourceContext(), equalTo(item.fetchSourceContext())); } assertThat(multiGetShardRequest2.indices(), equalTo(multiGetShardRequest.indices())); assertThat( multiGetShardRequest2.indicesOptions(), equalTo(multiGetShardRequest.indicesOptions())); }