/** * Because we're running with a security manager (most likely), we need to scan for Lingo3G * license in ES configuration directories. */ private Path scanForLingo3GLicense(Environment environment, Path pluginConfigPath) { List<Path> licenses = new ArrayList<>(); for (Path candidate : new Path[] { pluginConfigPath.resolve("license.xml"), pluginConfigPath.resolve(".license.xml"), environment.configFile().resolve("license.xml"), environment.configFile().resolve(".license.xml") }) { logger.debug( "Lingo3G license location scan: {} {}.", candidate.toAbsolutePath().normalize(), Files.isRegularFile(candidate) ? "(found)" : "(not found)"); if (Files.isRegularFile(candidate)) { licenses.add(candidate); } } if (licenses.size() > 1) { throw new ElasticsearchException( "There should be exactly one Lingo3G license on scan paths: {}", licenses); } if (licenses.size() == 1) { return licenses.iterator().next(); } else { return null; } }
@Override public void run() { try { URL url = buildPingUrl(); if (logger.isDebugEnabled()) { logger.debug("Sending UDC information to {}...", url); } HttpURLConnection conn = (HttpURLConnection) url.openConnection(); conn.setConnectTimeout((int) HTTP_TIMEOUT.millis()); conn.setReadTimeout((int) HTTP_TIMEOUT.millis()); if (conn.getResponseCode() >= 300) { throw new Exception( String.format("%s Responded with Code %d", url.getHost(), conn.getResponseCode())); } if (logger.isDebugEnabled()) { BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); String line = reader.readLine(); while (line != null) { logger.debug(line); line = reader.readLine(); } reader.close(); } else { conn.getInputStream().close(); } successCounter.incrementAndGet(); } catch (Exception e) { if (logger.isDebugEnabled()) { logger.debug("Error sending UDC information", e); } failCounter.incrementAndGet(); } }
/** * Start a river * * @param river */ public void start(T river) { if (logger.isDebugEnabled()) logger.debug("start({})", river); // We only add the river if the river is started if (river == null || !river.isStart()) return; riverService.start(river, getHelper().toXContent(river)); if (logger.isDebugEnabled()) logger.debug("/start({})", river); }
protected String getJsonSettings(String jsonDefinition, Object... args) throws Exception { logger.debug("Get river setting"); String setting = copyToStringFromClasspath(jsonDefinition); if (args != null) { setting = String.format(setting, args); } logger.debug("River setting: {}", setting); return setting; }
/** * Stop a river * * @param river */ public void stop(T river) { if (logger.isDebugEnabled()) logger.debug("stop({})", river); // We only add the river if the river is started if (river == null || !river.isStart()) return; riverService.stop(river); if (logger.isDebugEnabled()) logger.debug("/stop({})", river); }
@Test public void testRenameAttribute() throws Throwable { logger.debug("Start testRenameAttribute"); try { logger.debug("Create river {}", getRiver()); String script = "ctx.document.score2 = ctx.document.score; delete ctx.document.score;"; super.createRiver( "/test/elasticsearch/plugin/river/mongodb/script/test-mongodb-river-with-script.json", getRiver(), String.valueOf(getMongoPort1()), String.valueOf(getMongoPort2()), String.valueOf(getMongoPort3()), getDatabase(), getCollection(), script, getIndex(), getDatabase()); String mongoDocument = copyToStringFromClasspath( "/test/elasticsearch/plugin/river/mongodb/script/test-simple-mongodb-document.json"); DBObject dbObject = (DBObject) JSON.parse(mongoDocument); WriteResult result = mongoCollection.insert(dbObject); Thread.sleep(wait); String id = dbObject.get("_id").toString(); logger.info("WriteResult: {}", result.toString()); refreshIndex(); ActionFuture<IndicesExistsResponse> response = getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex())); assertThat(response.actionGet().isExists(), equalTo(true)); SearchResponse sr = getNode() .client() .prepareSearch(getIndex()) .setQuery(fieldQuery("_id", id)) .execute() .actionGet(); logger.debug("SearchResponse {}", sr.toString()); long totalHits = sr.getHits().getTotalHits(); logger.debug("TotalHits: {}", totalHits); assertThat(totalHits, equalTo(1l)); assertThat(sr.getHits().getHits()[0].sourceAsMap().containsKey("score2"), equalTo(true)); mongoCollection.remove(dbObject); } catch (Throwable t) { logger.error("testRenameAttribute failed.", t); t.printStackTrace(); throw t; } finally { super.deleteRiver(); super.deleteIndex(); } }
/** * Update (or add) a river * * @param river */ public void update(T river) { if (logger.isDebugEnabled()) logger.debug("update({})", river); XContentBuilder xb = getHelper().toXContent(river); client .prepareIndex( SMDSearchProperties.ES_META_INDEX, SMDSearchProperties.ES_META_RIVERS, river.getId()) .setSource(xb) .setRefresh(true) .execute() .actionGet(); if (logger.isDebugEnabled()) logger.debug("/update({})", river); }
@Override public void run() { if (logger.isDebugEnabled()) { logger.debug("Create task manager thread."); } do { logger.info("TaskManager: current task index: " + Integer.toString(currentTaskIndex)); try { String output = currentTask.Run(); logger.info("Task {[]} output: {[]}", currentTask.id(), output); logger.info("Task [" + currentTask.id() + "] output: " + output); } catch (IOException ex) { logger.error("TaskManager: IOException"); } catch (InterruptedException ex) { logger.error("TaskManager: Interrupted Exception"); } currentTask = GetNextTask(); } while (null != currentTask); DeleteMappingRequest req = new DeleteMappingRequest("_river"); req.type(river.riverName().name()); DeleteMappingResponse resp = client.admin().indices().deleteMapping(req).actionGet(); logger.info("TaskManager: delete request: " + resp.toString()); }
@Override @SuppressWarnings("unchecked") public synchronized void write(ArchivePacket packet) throws IOException { if (!isOpen()) { throw new IOException("not open"); } if (out == null) { throw new IOException("no output stream found"); } if (packet == null || packet.payload() == null) { throw new IOException("no payload to write for entry"); } byte[] buf = packet.payload().toString().getBytes(); String name = ArchiveUtils.encodeArchiveEntryName(packet); ArchiveEntry entry = out.newArchiveEntry(); entry.setName(name); entry.setLastModified(new Date()); entry.setEntrySize(buf.length); out.putArchiveEntry(entry); out.write(buf); out.closeArchiveEntry(); packetCounter++; if (watcher.getBytesToTransfer() != 0 && watcher.getBytesTransferred() > watcher.getBytesToTransfer()) { logger.debug( "bytes watcher: transferred = {}, rate {}", watcher.getBytesTransferred(), watcher.getRecentByteRatePerSecond()); switchToNextArchive(); watcher.resetWatcher(); } }
/** * Detects if we're connecting to a Found Elasticsearch cluster (using pre-configured host * suffixes) and adds a SSL handler at the beginning of the pipeline if we're connecting to a * SSL-endpoint (using a list of pre-configured ports). */ @Override public void connectRequested(final ChannelHandlerContext ctx, final ChannelStateEvent e) throws Exception { if (e.getValue() instanceof InetSocketAddress) { InetSocketAddress inetSocketAddress = (InetSocketAddress) e.getValue(); for (String suffix : hostSuffixes) { isFoundCluster = isFoundCluster || inetSocketAddress.getHostString().endsWith(suffix); } if (isFoundCluster) { for (int sslPort : sslPorts) { if (inetSocketAddress.getPort() == sslPort) { logger.debug( "Enabling SSL on transport layer with unsafeAllowSelfSigned=[{}].", unsafeAllowSelfSigned); FoundSSLHandler handler = FoundSSLUtils.getSSLHandler(unsafeAllowSelfSigned, inetSocketAddress); ctx.getPipeline().addFirst("ssl", handler); break; } } } else { ctx.getPipeline().remove(this); } } super.connectRequested(ctx, e); }
private boolean handleRevision1Response(ChannelHandlerContext ctx, int payloadLength) throws Exception { int code = buffered.readInt(); int descriptionLength = buffered.readInt(); byte[] descBytes = new byte[descriptionLength]; buffered.readBytes(descBytes, 0, descBytes.length); String description = new String(descBytes, StandardCharsets.UTF_8); logger.debug( "Decoded payload with length:[{}], code:[{}], descriptionLength:[{}], description:[{}] on connection [{}]", payloadLength, code, descriptionLength, description, ctx.getChannel().getLocalAddress()); if (200 <= code && code <= 299) { logger.info( "Connected to Found Elasticsearch: [{}]: [{}] on connection [{}]", code, description, ctx.getChannel().getLocalAddress()); return true; } else { logger.error( "Unable to connect to Found Elasticsearch: [{}]: [{}] on connection [{}]", code, description, ctx.getChannel().getLocalAddress()); return false; } }
@Override public BytesRef writeToBytes() { long start = System.nanoTime(); int size = set.size(); BytesRef bytes = new BytesRef(new byte[HEADER_SIZE + (int) bytesUsed.get()]); // Encode encoding type Bytes.writeInt(bytes, this.getEncoding().ordinal()); // Encode flag bytes.bytes[bytes.offset++] = (byte) (this.isPruned() ? 1 : 0); // Encode size of the set Bytes.writeInt(bytes, size); // Encode longs BytesRef reusable = new BytesRef(); for (int i = 0; i < this.set.size(); i++) { this.set.get(i, reusable); Bytes.writeBytesRef(reusable, bytes); } logger.debug( "Serialized {} terms - took {} ms", this.size(), (System.nanoTime() - start) / 1000000); bytes.length = bytes.offset; bytes.offset = 0; return bytes; }
private void add(String name, List<KnapsackState> values, KnapsackState targetValue) throws IOException { logger.debug("add: {} -> {}", name, values); put( name, generate(ImmutableList.<KnapsackState>builder().addAll(values).add(targetValue).build())); }
private List<KnapsackState> get(String name) throws IOException { ImmutableList.Builder<KnapsackState> builder = ImmutableList.builder(); try { logger.debug("get knapsack states: {}", name); final Client client = injector.getInstance(Client.class); createIndexIfNotExist(client); GetResponse getResponse = client.prepareGet(INDEX_NAME, MAPPING_NAME, name).execute().actionGet(); if (!getResponse.isExists()) { return builder.build(); } XContentParser parser = xContent(JSON).createParser(getResponse.getSourceAsBytes()); while (parser.nextToken() != START_ARRAY) { // forward } while (parser.nextToken() != END_ARRAY) { KnapsackState state = new KnapsackState(); builder.add(state.fromXContent(parser)); } return builder.build(); } catch (Throwable t) { logger.error("get settings failed", t); return null; } }
public ACLRequest(RestRequest request, RestChannel channel) { this( request.uri(), getAddress(channel), request.header("X-Api-Key"), request.header("Auth"), request.content().length(), request.method(), getXForwardedForHeader(request)); ESLogger logger = ESLoggerFactory.getLogger(ACLRequest.class.getName()); logger.debug("Headers:\n"); for (Map.Entry<String, String> header : request.headers()) { logger.debug(header.getKey() + "=" + header.getValue()); } }
@Override public void run() { try { socket = new DatagramSocket(port); while (!isClosed) { if (socket.isClosed()) return; byte[] buf = new byte[256]; // receive request DatagramPacket packet = new DatagramPacket(buf, buf.length); socket.receive(packet); ByteArrayInputStream bis = new ByteArrayInputStream(buf, 0, packet.getLength()); BufferedReader in = new BufferedReader(new InputStreamReader(bis)); String msg; while ((msg = in.readLine()) != null) { logger.debug("Read from socket: " + msg); content.add(msg.trim()); } in.close(); } } catch (IOException e) { e.printStackTrace(); } }
/** * Remove river * * @param river */ public void remove(T river) { if (logger.isDebugEnabled()) logger.debug("remove({})", river); // We stop the river if running if (riverService.checkState(river)) { riverService.stop(river); } // We remove the river in the database client .prepareDelete( SMDSearchProperties.ES_META_INDEX, SMDSearchProperties.ES_META_RIVERS, river.getId()) .execute() .actionGet(); if (logger.isDebugEnabled()) logger.debug("/remove({})", river); }
@Test public void testIgnoreScript() throws Throwable { logger.debug("Start testIgnoreScript"); try { logger.debug("Create river {}", getRiver()); String script = "ctx.ignore = true;"; super.createRiver( "/test/elasticsearch/plugin/river/mongodb/script/test-mongodb-river-with-script.json", getRiver(), String.valueOf(getMongoPort1()), String.valueOf(getMongoPort2()), String.valueOf(getMongoPort3()), getDatabase(), getCollection(), script, getIndex(), getDatabase()); String mongoDocument = copyToStringFromClasspath( "/test/elasticsearch/plugin/river/mongodb/script/test-simple-mongodb-document.json"); DBObject dbObject = (DBObject) JSON.parse(mongoDocument); WriteResult result = mongoCollection.insert(dbObject); Thread.sleep(wait); logger.info("WriteResult: {}", result.toString()); refreshIndex(); ActionFuture<IndicesExistsResponse> response = getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex())); assertThat(response.actionGet().isExists(), equalTo(true)); CountResponse countResponse = getNode().client().count(countRequest(getIndex())).actionGet(); logger.info("Document count: {}", countResponse.getCount()); assertThat(countResponse.getCount(), equalTo(0l)); mongoCollection.remove(dbObject); } catch (Throwable t) { logger.error("testIgnoreScript failed.", t); t.printStackTrace(); throw t; } finally { super.deleteRiver(); super.deleteIndex(); } }
@Override public BulkNodeClient flushIngest() { if (closed) { throw new ElasticsearchIllegalStateException("client is closed"); } logger.debug("flushing bulk processor"); BulkProcessorHelper.flush(bulkProcessor); return this; }
protected long getCount(final String index, final String type) { logger.debug("getCount()"); esSetup.client().admin().indices().refresh(new RefreshRequest()).actionGet(); final CountResponse count = esSetup.client().count(new CountRequest(index).types(type)).actionGet(); return count.getCount(); }
private void remove(final String name) { try { logger.debug("remove: {}", name); final Client client = injector.getInstance(Client.class); createIndexIfNotExist(client); client.prepareDelete(INDEX_NAME, MAPPING_NAME, name).setRefresh(true).execute().actionGet(); } catch (Throwable t) { logger.error("remove failed", t); } }
public void initCities() { if (log.isDebugEnabled()) { log.debug("Initializing all registry cities"); } // File bulkFile = createCitiesBulkFile2(); // Insert cities // bulkFromFile(bulkFile, INDEX_NAME, INDEX_TYPE); }
private void shutdownMongoInstances() { logger.debug("*** shutdownMongoInstances ***"); mongo.close(); try { logger.debug("Start shutdown {}", mongod1); mongod1.stop(); } catch (Throwable t) { } try { logger.debug("Start shutdown {}", mongod2); mongod2.stop(); } catch (Throwable t) { } try { logger.debug("Start shutdown {}", mongod3); mongod3.stop(); } catch (Throwable t) { } }
private void remove(String name, List<KnapsackState> values, KnapsackState targetValue) throws IOException { logger.debug("remove: {} -> {}", name, values); ImmutableList.Builder<KnapsackState> updatedValues = ImmutableList.builder(); for (KnapsackState value : values) { if (!value.equals(targetValue)) { updatedValues.add(value); } } put(name, generate(updatedValues.build())); }
@Override public synchronized void shutdown() { try { if (bulkProcessor != null) { logger.debug("closing bulk processor..."); bulkProcessor.close(); } if (metric != null && metric.indices() != null && !metric.indices().isEmpty()) { logger.debug("stopping bulk mode for indices {}...", metric.indices()); for (String index : ImmutableSet.copyOf(metric.indices())) { stopBulk(index); } } logger.debug("shutting down..."); client.close(); logger.debug("shutting down completed"); } catch (Exception e) { logger.error(e.getMessage(), e); } }
protected void deleteIndex(String name) { logger.info("Delete index [{}]", name); node.client().admin().indices().delete(deleteIndexRequest(name)).actionGet(); logger.debug("Running Cluster Health"); ClusterHealthResponse clusterHealth = node.client() .admin() .cluster() .health(clusterHealthRequest().waitForGreenStatus()) .actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); }
protected void deleteRiver(String name) { logger.info("Delete river [{}]", name); DeleteMappingRequest deleteMapping = new DeleteMappingRequest("_river").type(name); node.client().admin().indices().deleteMapping(deleteMapping).actionGet(); logger.debug("Running Cluster Health"); ClusterHealthResponse clusterHealth = node.client() .admin() .cluster() .health(clusterHealthRequest().waitForGreenStatus()) .actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); }
/** * Get the river definition by its name * * @param name * @return */ public T get(String name) { if (logger.isDebugEnabled()) logger.debug("get({})", name); T river = null; if (name != null) { GetRequestBuilder rb = new GetRequestBuilder(client, SMDSearchProperties.ES_META_INDEX); rb.setType(SMDSearchProperties.ES_META_RIVERS); rb.setId(name); try { GetResponse response = rb.execute().actionGet(); if (response.isExists()) { river = getHelper().toRiver(buildInstance(), response.getSourceAsMap()); } } catch (IndexMissingException e) { // Index does not exists, so RIVER does not exist... } } if (logger.isDebugEnabled()) logger.debug("/get({})={}", name, river); return river; }
/** * Get all active rivers * * @return */ public List<T> get() { if (logger.isDebugEnabled()) logger.debug("get()"); List<T> rivers = new ArrayList<T>(); SearchRequestBuilder srb = new SearchRequestBuilder(client); try { srb.setIndices(SMDSearchProperties.ES_META_INDEX); srb.setTypes(SMDSearchProperties.ES_META_RIVERS); SearchResponse response = srb.execute().actionGet(); if (response.getHits().totalHits() > 0) { for (int i = 0; i < response.getHits().hits().length; i++) { T river = buildInstance(); SearchHit hit = response.getHits().hits()[i]; // We only manage rivers for type getHelper().type() river = getHelper().toRiver(river, hit.sourceAsMap()); if (river.getType().equals(getHelper().type())) { // For each river, we check if the river is started or not river.setStart(riverService.checkState(river)); rivers.add(river); } } } } catch (IndexMissingException e) { // That's a common use case. We started with an empty index } if (logger.isDebugEnabled()) logger.debug("/get()={}", rivers); return rivers; }
private void put(final String name, final XContentBuilder builder) { try { logger.debug("put knapsack state: {} -> {}", name, builder.string()); final Client client = injector.getInstance(Client.class); createIndexIfNotExist(client); client .prepareIndex(INDEX_NAME, MAPPING_NAME, name) .setSource(builder) .setRefresh(true) .execute() .actionGet(); } catch (Throwable t) { logger.error("update settings failed", t); } }