public CounterStat getOutputPositions() { CounterStat stat = new CounterStat(); for (PipelineContext pipelineContext : pipelineContexts) { if (pipelineContext.isOutputPipeline()) { stat.merge(pipelineContext.getOutputPositions()); } } return stat; }
public CounterStat getInputDataSize() { CounterStat stat = new CounterStat(); for (PipelineContext pipelineContext : pipelineContexts) { if (pipelineContext.isInputPipeline()) { stat.merge(pipelineContext.getInputDataSize()); } } return stat; }
public boolean tryReserveMemory(long bytes) { if (pipelineContext.tryReserveMemory(bytes)) { memoryReservation.getAndAdd(bytes); return true; } return false; }
private DriverSplitRunner createDriverRunner(@Nullable ScheduledSplit partitionedSplit) { pendingCreation.incrementAndGet(); // create driver context immediately so the driver existence is recorded in the stats // the number of drivers is used to balance work across nodes DriverContext driverContext = pipelineContext.addDriverContext(); return new DriverSplitRunner(this, driverContext, partitionedSplit); }
public void freeMemory(long bytes) { if (bytes == 0) { return; } checkArgument(bytes >= 0, "bytes is negative"); checkArgument(bytes <= memoryReservation.get(), "tried to free more memory than is reserved"); pipelineContext.freeMemory(bytes); memoryReservation.getAndAdd(-bytes); }
public void startProcessTimer() { if (startNanos.compareAndSet(0, System.nanoTime())) { pipelineContext.start(); executionStartTime.set(DateTime.now()); } intervalWallStart.set(System.nanoTime()); intervalCpuStart.set(currentThreadCpuTime()); intervalUserStart.set(currentThreadUserTime()); }
public void finished() { if (!finished.compareAndSet(false, true)) { // already finished return; } executionEndTime.set(DateTime.now()); endNanos.set(System.nanoTime()); freeMemory(memoryReservation.get()); pipelineContext.driverFinished(this); }
private Driver createDriver(@Nullable ScheduledSplit partitionedSplit) { Driver driver = driverFactory.createDriver(pipelineContext.addDriverContext()); // record driver so other threads add unpartitioned sources can see the driver // NOTE: this MUST be done before reading unpartitionedSources, so we see a consistent view of // the unpartitioned sources drivers.add(new WeakReference<>(driver)); if (partitionedSplit != null) { // TableScanOperator requires partitioned split to be added before the first call to process driver.updateSource( new TaskSource(partitionedSourceId, ImmutableSet.of(partitionedSplit), true)); } // add unpartitioned sources for (TaskSource source : unpartitionedSources.values()) { driver.updateSource(source); } pendingCreation.decrementAndGet(); closeDriverFactoryIfFullyCreated(); return driver; }
public boolean isVerboseStats() { return pipelineContext.isVerboseStats(); }
public ListenableFuture<?> reserveSystemMemory(long bytes) { checkArgument(bytes >= 0, "bytes is negative"); ListenableFuture<?> future = pipelineContext.reserveSystemMemory(bytes); systemMemoryReservation.getAndAdd(bytes); return future; }
public ListenableFuture<?> reserveMemory(long bytes) { ListenableFuture<?> future = pipelineContext.reserveMemory(bytes); memoryReservation.getAndAdd(bytes); return future; }
public void transferMemoryToTaskContext(long bytes) { pipelineContext.transferMemoryToTaskContext(bytes); checkArgument( memoryReservation.addAndGet(-bytes) >= 0, "Tried to transfer more memory than is reserved"); }
public void failed(Throwable cause) { pipelineContext.failed(cause); finished.set(true); freeMemory(memoryReservation.get()); }
public Session getSession() { return pipelineContext.getSession(); }
public boolean isCpuTimerEnabled() { return pipelineContext.isCpuTimerEnabled(); }
public boolean isDone() { return finished.get() || pipelineContext.isDone(); }
public TaskId getTaskId() { return pipelineContext.getTaskId(); }
private static LookupSourceSupplier buildHash( boolean parallelBuild, TaskContext taskContext, List<Integer> hashChannels, RowPagesBuilder buildPages) { if (parallelBuild) { ParallelHashBuilder parallelHashBuilder = new ParallelHashBuilder( buildPages.getTypes(), hashChannels, buildPages.getHashChannel(), 100, PARTITION_COUNT); // collect input data DriverContext collectDriverContext = taskContext.addPipelineContext(true, true).addDriverContext(); ValuesOperatorFactory valuesOperatorFactory = new ValuesOperatorFactory( 0, new PlanNodeId("test"), buildPages.getTypes(), buildPages.build()); OperatorFactory collectOperatorFactory = parallelHashBuilder.getCollectOperatorFactory(1, new PlanNodeId("test")); Driver driver = new Driver( collectDriverContext, valuesOperatorFactory.createOperator(collectDriverContext), collectOperatorFactory.createOperator(collectDriverContext)); while (!driver.isFinished()) { driver.process(); } // build hash tables PipelineContext buildPipeline = taskContext.addPipelineContext(true, true); OperatorFactory buildOperatorFactory = parallelHashBuilder.getBuildOperatorFactory(new PlanNodeId("test")); for (int i = 0; i < PARTITION_COUNT; i++) { DriverContext buildDriverContext = buildPipeline.addDriverContext(); Driver buildDriver = new Driver(buildDriverContext, buildOperatorFactory.createOperator(buildDriverContext)); while (!buildDriver.isFinished()) { buildDriver.process(); } } return parallelHashBuilder.getLookupSourceSupplier(); } else { DriverContext driverContext = taskContext.addPipelineContext(true, true).addDriverContext(); ValuesOperatorFactory valuesOperatorFactory = new ValuesOperatorFactory( 0, new PlanNodeId("test"), buildPages.getTypes(), buildPages.build()); HashBuilderOperatorFactory hashBuilderOperatorFactory = new HashBuilderOperatorFactory( 1, new PlanNodeId("test"), buildPages.getTypes(), hashChannels, buildPages.getHashChannel(), 100); Driver driver = new Driver( driverContext, valuesOperatorFactory.createOperator(driverContext), hashBuilderOperatorFactory.createOperator(driverContext)); while (!driver.isFinished()) { driver.process(); } return hashBuilderOperatorFactory.getLookupSourceSupplier(); } }