@Override
 protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
   if (!fieldType().stored()) {
     return;
   }
   byte[] value;
   if (context.parser().currentToken() == XContentParser.Token.VALUE_NULL) {
     return;
   } else {
     value = context.parser().binaryValue();
     if (compress != null && compress && !CompressorFactory.isCompressed(value, 0, value.length)) {
       if (compressThreshold == -1 || value.length > compressThreshold) {
         BytesStreamOutput bStream = new BytesStreamOutput();
         StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream);
         stream.writeBytes(value, 0, value.length);
         stream.close();
         value = bStream.bytes().toBytes();
       }
     }
   }
   if (value == null) {
     return;
   }
   fields.add(new Field(names.indexName(), value, fieldType));
 }
  @Override
  public BytesReference value(Object value) {
    if (value == null) {
      return null;
    }

    BytesReference bytes;
    if (value instanceof BytesRef) {
      bytes = new BytesArray((BytesRef) value);
    } else if (value instanceof BytesReference) {
      bytes = (BytesReference) value;
    } else if (value instanceof byte[]) {
      bytes = new BytesArray((byte[]) value);
    } else {
      try {
        bytes = new BytesArray(Base64.decode(value.toString()));
      } catch (IOException e) {
        throw new ElasticsearchParseException("failed to convert bytes", e);
      }
    }
    try {
      return CompressorFactory.uncompressIfNeeded(bytes);
    } catch (IOException e) {
      throw new ElasticsearchParseException("failed to decompress source", e);
    }
  }
Exemplo n.º 3
0
 @Override
 public IndexInput openInput(String name, IOContext context) throws IOException {
   ensureOpen();
   StoreFileMetaData metaData = filesMetadata.get(name);
   if (metaData == null) {
     throw new FileNotFoundException(name);
   }
   IndexInput in = metaData.directory().openInput(name, context);
   boolean success = false;
   try {
     // Only for backward comp. since we now use Lucene codec compression
     if (name.endsWith(".fdt") || name.endsWith(".tvf")) {
       Compressor compressor = CompressorFactory.compressor(in);
       if (compressor != null) {
         in = compressor.indexInput(in);
       }
     }
     success = true;
   } finally {
     if (!success) {
       IOUtils.closeWhileHandlingException(in);
     }
   }
   return in;
 }
  public void testCompressEnabled() throws Exception {
    String mapping =
        XContentFactory.jsonBuilder()
            .startObject()
            .startObject("type")
            .startObject("_source")
            .field("compress", true)
            .endObject()
            .endObject()
            .endObject()
            .string();

    DocumentMapper documentMapper =
        createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);

    ParsedDocument doc =
        documentMapper.parse(
            "test",
            "type",
            "1",
            XContentFactory.jsonBuilder()
                .startObject()
                .field("field1", "value1")
                .field("field2", "value2")
                .endObject()
                .bytes());

    BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
    assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true));
  }
Exemplo n.º 5
0
 /** Returns bytes reference, also un compress the source if needed. */
 public BytesReference sourceRef() {
   try {
     this.source = CompressorFactory.uncompressIfNeeded(this.source);
     return this.source;
   } catch (IOException e) {
     throw new ElasticsearchParseException("failed to decompress source", e);
   }
 }
Exemplo n.º 6
0
 @Override
 public IndexInput openInput(String name, IOContext context) throws IOException {
   StoreFileMetaData metaData = filesMetadata.get(name);
   if (metaData == null) {
     throw new FileNotFoundException(name);
   }
   IndexInput in = metaData.directory().openInput(name, context);
   // Only for backward comp. since we now use Lucene codec compression
   if (name.endsWith(".fdt") || name.endsWith(".tvf")) {
     Compressor compressor = CompressorFactory.compressor(in);
     if (compressor != null) {
       in = compressor.indexInput(in);
     }
   }
   return in;
 }
Exemplo n.º 7
0
  @Override
  public void write(MetaData metaData) throws GatewayException {
    final String newMetaData = "metadata-" + (currentIndex + 1);
    CachedStreamOutput.Entry cachedEntry = CachedStreamOutput.popEntry();
    try {
      StreamOutput streamOutput;
      if (compress) {
        streamOutput = cachedEntry.bytes(CompressorFactory.defaultCompressor());
      } else {
        streamOutput = cachedEntry.bytes();
      }
      XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, streamOutput);
      builder.startObject();
      MetaData.Builder.toXContent(metaData, builder, ToXContent.EMPTY_PARAMS);
      builder.endObject();
      builder.close();
      metaDataBlobContainer.writeBlob(
          newMetaData,
          new ByteArrayInputStream(
              cachedEntry.bytes().underlyingBytes(), 0, cachedEntry.bytes().size()),
          cachedEntry.bytes().size());
    } catch (IOException e) {
      throw new GatewayException("Failed to write metadata [" + newMetaData + "]", e);
    } finally {
      CachedStreamOutput.pushEntry(cachedEntry);
    }

    currentIndex++;

    try {
      metaDataBlobContainer.deleteBlobsByFilter(
          new BlobContainer.BlobNameFilter() {
            @Override
            public boolean accept(String blobName) {
              return blobName.startsWith("metadata-") && !newMetaData.equals(blobName);
            }
          });
    } catch (IOException e) {
      logger.debug("Failed to delete old metadata, will do it next time", e);
    }
  }
  @Override
  public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
    Object m = e.getMessage();
    if (!(m instanceof ChannelBuffer)) {
      ctx.sendUpstream(e);
      return;
    }
    ChannelBuffer buffer = (ChannelBuffer) m;
    int size = buffer.getInt(buffer.readerIndex() - 4);
    transportServiceAdapter.received(size + 6);

    // we have additional bytes to read, outside of the header
    boolean hasMessageBytesToRead = (size - (NettyHeader.HEADER_SIZE - 6)) != 0;

    int markedReaderIndex = buffer.readerIndex();
    int expectedIndexReader = markedReaderIndex + size;

    // netty always copies a buffer, either in NioWorker in its read handler, where it copies to a
    // fresh
    // buffer, or in the cumlation buffer, which is cleaned each time
    StreamInput streamIn = ChannelBufferStreamInputFactory.create(buffer, size);

    long requestId = buffer.readLong();
    byte status = buffer.readByte();
    Version version = Version.fromId(buffer.readInt());

    StreamInput wrappedStream;
    if (TransportStatus.isCompress(status) && hasMessageBytesToRead && buffer.readable()) {
      Compressor compressor = CompressorFactory.compressor(buffer);
      if (compressor == null) {
        int maxToRead = Math.min(buffer.readableBytes(), 10);
        int offset = buffer.readerIndex();
        StringBuilder sb =
            new StringBuilder("stream marked as compressed, but no compressor found, first [")
                .append(maxToRead)
                .append("] content bytes out of [")
                .append(buffer.readableBytes())
                .append("] readable bytes with message size [")
                .append(size)
                .append("] ")
                .append("] are [");
        for (int i = 0; i < maxToRead; i++) {
          sb.append(buffer.getByte(offset + i)).append(",");
        }
        sb.append("]");
        throw new ElasticsearchIllegalStateException(sb.toString());
      }
      wrappedStream = CachedStreamInput.cachedHandlesCompressed(compressor, streamIn);
    } else {
      wrappedStream = CachedStreamInput.cachedHandles(streamIn);
    }
    wrappedStream.setVersion(version);

    if (TransportStatus.isRequest(status)) {
      String action = handleRequest(ctx.getChannel(), wrappedStream, requestId, version);
      if (buffer.readerIndex() != expectedIndexReader) {
        if (buffer.readerIndex() < expectedIndexReader) {
          logger.warn(
              "Message not fully read (request) for [{}] and action [{}], resetting",
              requestId,
              action);
        } else {
          logger.warn(
              "Message read past expected size (request) for [{}] and action [{}], resetting",
              requestId,
              action);
        }
        buffer.readerIndex(expectedIndexReader);
      }
    } else {
      TransportResponseHandler handler = transportServiceAdapter.remove(requestId);
      // ignore if its null, the adapter logs it
      if (handler != null) {
        if (TransportStatus.isError(status)) {
          handlerResponseError(wrappedStream, handler);
        } else {
          handleResponse(ctx.getChannel(), wrappedStream, handler);
        }
      } else {
        // if its null, skip those bytes
        buffer.readerIndex(markedReaderIndex + size);
      }
      if (buffer.readerIndex() != expectedIndexReader) {
        if (buffer.readerIndex() < expectedIndexReader) {
          logger.warn(
              "Message not fully read (response) for [{}] handler {}, error [{}], resetting",
              requestId,
              handler,
              TransportStatus.isError(status));
        } else {
          logger.warn(
              "Message read past expected size (response) for [{}] handler {}, error [{}], resetting",
              requestId,
              handler,
              TransportStatus.isError(status));
        }
        buffer.readerIndex(expectedIndexReader);
      }
    }
    wrappedStream.close();
  }
Exemplo n.º 9
0
  public InternalNode(Settings preparedSettings, boolean loadConfigSettings)
      throws ElasticsearchException {
    final Settings pSettings =
        settingsBuilder()
            .put(preparedSettings)
            .put(Client.CLIENT_TYPE_SETTING, CLIENT_TYPE)
            .build();
    Tuple<Settings, Environment> tuple =
        InternalSettingsPreparer.prepareSettings(pSettings, loadConfigSettings);
    tuple = new Tuple<>(TribeService.processSettings(tuple.v1()), tuple.v2());

    // The only place we can actually fake the version a node is running on:
    Version version = pSettings.getAsVersion("tests.mock.version", Version.CURRENT);

    ESLogger logger = Loggers.getLogger(Node.class, tuple.v1().get("name"));
    logger.info(
        "version[{}], pid[{}], build[{}/{}]",
        version,
        JvmInfo.jvmInfo().pid(),
        Build.CURRENT.hashShort(),
        Build.CURRENT.timestamp());

    logger.info("initializing ...");

    if (logger.isDebugEnabled()) {
      Environment env = tuple.v2();
      logger.debug(
          "using home [{}], config [{}], data [{}], logs [{}], work [{}], plugins [{}]",
          env.homeFile(),
          env.configFile(),
          Arrays.toString(env.dataFiles()),
          env.logsFile(),
          env.workFile(),
          env.pluginsFile());
    }

    this.pluginsService = new PluginsService(tuple.v1(), tuple.v2());
    this.settings = pluginsService.updatedSettings();
    // create the environment based on the finalized (processed) view of the settings
    this.environment = new Environment(this.settings());

    CompressorFactory.configure(settings);
    final NodeEnvironment nodeEnvironment;
    try {
      nodeEnvironment = new NodeEnvironment(this.settings, this.environment);
    } catch (IOException ex) {
      throw new ElasticsearchIllegalStateException("Failed to created node environment", ex);
    }

    boolean success = false;
    try {
      ModulesBuilder modules = new ModulesBuilder();
      modules.add(new Version.Module(version));
      modules.add(new PageCacheRecyclerModule(settings));
      modules.add(new CircuitBreakerModule(settings));
      modules.add(new BigArraysModule(settings));
      modules.add(new PluginsModule(settings, pluginsService));
      modules.add(new SettingsModule(settings));
      modules.add(new NodeModule(this));
      modules.add(new NetworkModule());
      modules.add(new ScriptModule(settings));
      modules.add(new EnvironmentModule(environment));
      modules.add(new NodeEnvironmentModule(nodeEnvironment));
      modules.add(new ClusterNameModule(settings));
      modules.add(new ThreadPoolModule(settings));
      modules.add(new DiscoveryModule(settings));
      modules.add(new ClusterModule(settings));
      modules.add(new RestModule(settings));
      modules.add(new TransportModule(settings));
      if (settings.getAsBoolean(HTTP_ENABLED, true)) {
        modules.add(new HttpServerModule(settings));
      }
      modules.add(new RiversModule(settings));
      modules.add(new IndicesModule(settings));
      modules.add(new SearchModule());
      modules.add(new ActionModule(false));
      modules.add(new MonitorModule(settings));
      modules.add(new GatewayModule(settings));
      modules.add(new NodeClientModule());
      modules.add(new ShapeModule());
      modules.add(new PercolatorModule());
      modules.add(new ResourceWatcherModule());
      modules.add(new RepositoriesModule());
      modules.add(new TribeModule());
      modules.add(new BenchmarkModule(settings));

      injector = modules.createInjector();

      client = injector.getInstance(Client.class);
      success = true;
    } finally {
      if (!success) {
        nodeEnvironment.close();
      }
    }

    logger.info("initialized");
  }