private void updateNodes(MemoryPoolAssignmentsRequest assignments) { ImmutableSet.Builder<Node> builder = ImmutableSet.builder(); Set<Node> aliveNodes = builder .addAll(nodeManager.getNodes(ACTIVE)) .addAll(nodeManager.getNodes(SHUTTING_DOWN)) .build(); ImmutableSet<String> aliveNodeIds = aliveNodes.stream().map(Node::getNodeIdentifier).collect(toImmutableSet()); // Remove nodes that don't exist anymore // Make a copy to materialize the set difference Set<String> deadNodes = ImmutableSet.copyOf(difference(nodes.keySet(), aliveNodeIds)); nodes.keySet().removeAll(deadNodes); // Add new nodes for (Node node : aliveNodes) { if (!nodes.containsKey(node.getNodeIdentifier())) { nodes.put( node.getNodeIdentifier(), new RemoteNodeMemory( httpClient, memoryInfoCodec, assignmentsRequestJsonCodec, locationFactory.createMemoryInfoLocation(node))); } } // Schedule refresh for (RemoteNodeMemory node : nodes.values()) { node.asyncRefresh(assignments); } }
@Override public Connector create(String connectorId, Map<String, String> config) { try { Bootstrap app = new Bootstrap( new MBeanModule(), binder -> { CurrentNodeId currentNodeId = new CurrentNodeId(nodeManager.getCurrentNode().getNodeIdentifier()); MBeanServer mbeanServer = new RebindSafeMBeanServer(getPlatformMBeanServer()); binder.bind(MBeanServer.class).toInstance(mbeanServer); binder.bind(CurrentNodeId.class).toInstance(currentNodeId); binder.bind(NodeManager.class).toInstance(nodeManager); binder.bind(PageSorter.class).toInstance(pageSorter); binder.bind(BlockEncodingSerde.class).toInstance(blockEncodingSerde); binder.bind(TypeManager.class).toInstance(typeManager); }, module, new StorageModule(), new RaptorModule(connectorId)); Injector injector = app.strictConfig() .doNotInitializeLogging() .setRequiredConfigurationProperties(config) .setOptionalConfigurationProperties(optionalConfig) .initialize(); return injector.getInstance(RaptorConnector.class); } catch (Exception e) { throw Throwables.propagate(e); } }
@Override public ConnectorSplitSource getSplits( ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorTableLayoutHandle layoutHandle) { AtopTableLayoutHandle handle = checkType(layoutHandle, AtopTableLayoutHandle.class, "layoutHandle"); AtopTableHandle table = handle.getTableHandle(); List<ConnectorSplit> splits = new ArrayList<>(); DateTime end = DateTime.now().withZone(timeZone); for (Node node : nodeManager.getActiveDatasourceNodes(connectorId.getId())) { DateTime start = end.minusDays(maxHistoryDays - 1).withTimeAtStartOfDay(); while (start.isBefore(end)) { DateTime splitEnd = start.withTime(23, 59, 59, 999); Domain splitDomain = Domain.create( ValueSet.ofRanges( Range.range(TIMESTAMP, start.getMillis(), true, splitEnd.getMillis(), true)), false); if (handle.getStartTimeConstraint().overlaps(splitDomain) && handle.getEndTimeConstraint().overlaps(splitDomain)) { splits.add(new AtopSplit(table.getTable(), node.getHostAndPort(), start)); } start = start.plusDays(1).withTimeAtStartOfDay(); } } return new FixedSplitSource(connectorId.getId(), splits); }
@Override public ConnectorSplitSource getSplits( ConnectorSession session, ConnectorTableLayoutHandle layout) { TpchTableHandle tableHandle = checkType(layout, TpchTableLayoutHandle.class, "layout").getTable(); Set<Node> nodes = nodeManager.getActiveDatasourceNodes(connectorId); checkState(!nodes.isEmpty(), "No TPCH nodes available"); int totalParts = nodes.size() * splitsPerNode; int partNumber = 0; // Split the data using split and skew by the number of nodes available. ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder(); for (Node node : nodes) { for (int i = 0; i < splitsPerNode; i++) { splits.add( new TpchSplit( tableHandle, partNumber, totalParts, ImmutableList.of(node.getHostAndPort()))); partNumber++; } } return new FixedSplitSource(connectorId, splits.build()); }
@Override public URI createLocalTaskLocation(TaskId taskId) { return createTaskLocation(nodeManager.getCurrentNode(), taskId); }