コード例 #1
0
ファイル: Foreman.java プロジェクト: nitinware/drill
  /**
   * Set up the root fragment (which will run locally), and submit it for execution.
   *
   * @param rootFragment
   * @param rootOperator
   * @throws ExecutionSetupException
   */
  private void setupRootFragment(final PlanFragment rootFragment, final FragmentRoot rootOperator)
      throws ExecutionSetupException {
    @SuppressWarnings("resource")
    final FragmentContext rootContext =
        new FragmentContext(
            drillbitContext,
            rootFragment,
            queryContext,
            initiatingClient,
            drillbitContext.getFunctionImplementationRegistry());
    @SuppressWarnings("resource")
    final IncomingBuffers buffers = new IncomingBuffers(rootFragment, rootContext);
    rootContext.setBuffers(buffers);

    queryManager.addFragmentStatusTracker(rootFragment, true);

    final ControlTunnel tunnel =
        drillbitContext.getController().getTunnel(queryContext.getCurrentEndpoint());
    final FragmentExecutor rootRunner =
        new FragmentExecutor(
            rootContext,
            rootFragment,
            new FragmentStatusReporter(rootContext, tunnel),
            rootOperator);
    final RootFragmentManager fragmentManager =
        new RootFragmentManager(rootFragment.getHandle(), buffers, rootRunner);

    if (buffers.isDone()) {
      // if we don't have to wait for any incoming data, start the fragment runner.
      bee.addFragmentRunner(fragmentManager.getRunnable());
    } else {
      // if we do, record the fragment manager in the workBus.
      drillbitContext.getWorkBus().addFragmentManager(fragmentManager);
    }
  }
コード例 #2
0
ファイル: Foreman.java プロジェクト: xsnxj/drill
  /**
   * Set up the root fragment (which will run locally), and submit it for execution.
   *
   * @param rootFragment
   * @param rootOperator
   * @throws ExecutionSetupException
   */
  private void setupRootFragment(final PlanFragment rootFragment, final FragmentRoot rootOperator)
      throws ExecutionSetupException {
    @SuppressWarnings("resource")
    final FragmentContext rootContext =
        new FragmentContext(
            drillbitContext,
            rootFragment,
            queryContext,
            initiatingClient,
            drillbitContext.getFunctionImplementationRegistry());
    @SuppressWarnings("resource")
    final IncomingBuffers buffers = new IncomingBuffers(rootFragment, rootContext);
    rootContext.setBuffers(buffers);

    queryManager.addFragmentStatusTracker(rootFragment, true);

    rootRunner =
        new FragmentExecutor(
            rootContext,
            rootFragment,
            queryManager.newRootStatusHandler(rootContext, drillbitContext),
            rootOperator);
    final RootFragmentManager fragmentManager =
        new RootFragmentManager(rootFragment.getHandle(), buffers, rootRunner);

    if (buffers.isDone()) {
      // if we don't have to wait for any incoming data, start the fragment runner.
      bee.addFragmentRunner(fragmentManager.getRunnable());
    } else {
      // if we do, record the fragment manager in the workBus.
      // TODO aren't we managing our own work? What does this do? It looks like this will never get
      // run
      drillbitContext.getWorkBus().addFragmentManager(fragmentManager);
    }
  }
コード例 #3
0
  @Test
  public void testSubstringNegative(
      @Injectable final DrillbitContext bitContext,
      @Injectable UserServer.UserClientConnection connection)
      throws Throwable {

    new NonStrictExpectations() {
      {
        bitContext.getMetrics();
        result = new MetricRegistry();
        bitContext.getAllocator();
        result = new TopLevelAllocator();
        bitContext.getOperatorCreatorRegistry();
        result = new OperatorCreatorRegistry(c);
        bitContext.getConfig();
        result = c;
      }
    };

    PhysicalPlanReader reader =
        new PhysicalPlanReader(
            c, c.getMapper(), CoordinationProtos.DrillbitEndpoint.getDefaultInstance());
    PhysicalPlan plan =
        reader.readPhysicalPlan(
            Files.toString(
                FileUtils.getResourceAsFile("/functions/testSubstringNegative.json"),
                Charsets.UTF_8));
    FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
    FragmentContext context =
        new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
    SimpleRootExec exec =
        new SimpleRootExec(
            ImplCreator.getExec(
                context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));

    while (exec.next()) {
      NullableVarCharVector c1 =
          exec.getValueVectorById(
              new SchemaPath("col3", ExpressionPosition.UNKNOWN), NullableVarCharVector.class);
      NullableVarCharVector.Accessor a1;
      a1 = c1.getAccessor();

      int count = 0;
      for (int i = 0; i < c1.getAccessor().getValueCount(); i++) {
        if (!a1.isNull(i)) {
          NullableVarCharHolder holder = new NullableVarCharHolder();
          a1.get(i, holder);
          // when offset is negative, substring return empty string.
          assertEquals("", holder.toString());
          ++count;
        }
      }
      assertEquals(50, count);
    }

    if (context.getFailureCause() != null) {
      throw context.getFailureCause();
    }
    assertTrue(!context.isFailed());
  }
コード例 #4
0
  public OrderedPartitionRecordBatch(
      OrderedPartitionSender pop, RecordBatch incoming, FragmentContext context)
      throws OutOfMemoryException {
    super(pop, context);
    this.incoming = incoming;
    this.partitions = pop.getDestinations().size();
    this.sendingMajorFragmentWidth = pop.getSendingWidth();
    this.recordsToSample = pop.getRecordsToSample();
    this.samplingFactor = pop.getSamplingFactor();
    this.completionFactor = pop.getCompletionFactor();

    DistributedCache cache = context.getDrillbitContext().getCache();
    this.mmap = cache.getMultiMap(MULTI_CACHE_CONFIG);
    this.tableMap = cache.getMap(SINGLE_CACHE_CONFIG);
    Preconditions.checkNotNull(tableMap);

    this.mapKey =
        String.format(
            "%s_%d", context.getHandle().getQueryId(), context.getHandle().getMajorFragmentId());
    this.minorFragmentSampleCount = cache.getCounter(mapKey);

    SchemaPath outputPath = popConfig.getRef();
    MaterializedField outputField =
        MaterializedField.create(outputPath, Types.required(TypeProtos.MinorType.INT));
    this.partitionKeyVector =
        (IntVector) TypeHelper.getNewVector(outputField, oContext.getAllocator());
  }
コード例 #5
0
 public ExternalSortBatch(ExternalSort popConfig, FragmentContext context, RecordBatch incoming)
     throws OutOfMemoryException {
   super(popConfig, context, true);
   this.incoming = incoming;
   DrillConfig config = context.getConfig();
   Configuration conf = new Configuration();
   conf.set("fs.default.name", config.getString(ExecConstants.EXTERNAL_SORT_SPILL_FILESYSTEM));
   try {
     this.fs = FileSystem.get(conf);
   } catch (IOException e) {
     throw new RuntimeException(e);
   }
   SPILL_BATCH_GROUP_SIZE = config.getInt(ExecConstants.EXTERNAL_SORT_SPILL_GROUP_SIZE);
   SPILL_THRESHOLD = config.getInt(ExecConstants.EXTERNAL_SORT_SPILL_THRESHOLD);
   dirs = Iterators.cycle(config.getStringList(ExecConstants.EXTERNAL_SORT_SPILL_DIRS));
   oAllocator = oContext.getAllocator();
   copierAllocator =
       oAllocator.newChildAllocator(
           oAllocator.getName() + ":copier",
           PriorityQueueCopier.INITIAL_ALLOCATION,
           PriorityQueueCopier.MAX_ALLOCATION);
   FragmentHandle handle = context.getHandle();
   fileName =
       String.format(
           "%s_majorfragment%s_minorfragment%s_operator%s",
           QueryIdHelper.getQueryId(handle.getQueryId()),
           handle.getMajorFragmentId(),
           handle.getMinorFragmentId(),
           popConfig.getOperatorId());
 }
コード例 #6
0
  public SingleBatchSorter createNewSorter(FragmentContext context, VectorAccessible batch)
      throws ClassTransformationException, IOException, SchemaChangeException {
    CodeGenerator<SingleBatchSorter> cg =
        CodeGenerator.get(
            SingleBatchSorter.TEMPLATE_DEFINITION,
            context.getFunctionRegistry(),
            context.getOptions());
    ClassGenerator<SingleBatchSorter> g = cg.getRoot();

    generateComparisons(g, batch);

    return context.getImplementationClass(cg);
  }
コード例 #7
0
  private MSorter createNewMSorter(
      FragmentContext context,
      List<Ordering> orderings,
      VectorAccessible batch,
      MappingSet mainMapping,
      MappingSet leftMapping,
      MappingSet rightMapping)
      throws ClassTransformationException, IOException, SchemaChangeException {
    CodeGenerator<MSorter> cg =
        CodeGenerator.get(
            MSorter.TEMPLATE_DEFINITION, context.getFunctionRegistry(), context.getOptions());
    ClassGenerator<MSorter> g = cg.getRoot();
    g.setMappingSet(mainMapping);

    for (Ordering od : orderings) {
      // first, we rewrite the evaluation stack for each side of the comparison.
      ErrorCollector collector = new ErrorCollectorImpl();
      final LogicalExpression expr =
          ExpressionTreeMaterializer.materialize(
              od.getExpr(), batch, collector, context.getFunctionRegistry());
      if (collector.hasErrors()) {
        throw new SchemaChangeException(
            "Failure while materializing expression. " + collector.toErrorString());
      }
      g.setMappingSet(leftMapping);
      HoldingContainer left = g.addExpr(expr, ClassGenerator.BlkCreateMode.FALSE);
      g.setMappingSet(rightMapping);
      HoldingContainer right = g.addExpr(expr, ClassGenerator.BlkCreateMode.FALSE);
      g.setMappingSet(mainMapping);

      // next we wrap the two comparison sides and add the expression block for the comparison.
      LogicalExpression fh =
          FunctionGenerationHelper.getOrderingComparator(
              od.nullsSortHigh(), left, right, context.getFunctionRegistry());
      HoldingContainer out = g.addExpr(fh, ClassGenerator.BlkCreateMode.FALSE);
      JConditional jc = g.getEvalBlock()._if(out.getValue().ne(JExpr.lit(0)));

      if (od.getDirection() == Direction.ASCENDING) {
        jc._then()._return(out.getValue());
      } else {
        jc._then()._return(out.getValue().minus());
      }
      g.rotateBlock();
    }

    g.rotateBlock();
    g.getEvalBlock()._return(JExpr.lit(0));

    return context.getImplementationClass(cg);
  }
コード例 #8
0
  /**
   * Create a FragmentExecutor where we already have a root operator in memory.
   *
   * @param context
   * @param fragment
   * @param statusReporter
   * @param rootOperator
   */
  public FragmentExecutor(
      final FragmentContext context,
      final PlanFragment fragment,
      final FragmentStatusReporter statusReporter,
      final FragmentRoot rootOperator) {
    this.fragmentContext = context;
    this.statusReporter = statusReporter;
    this.fragment = fragment;
    this.rootOperator = rootOperator;
    this.fragmentName = QueryIdHelper.getQueryIdentifier(context.getHandle());
    this.receiverExecutor = new ReceiverExecutor(fragmentName, fragmentContext.getExecutor());

    context.setExecutorState(new ExecutorStateImpl());
  }
コード例 #9
0
  @Override
  public IterOutcome next() {
    try {
      RawFragmentBatch batch = fragProvider.getNext();

      // skip over empty batches. we do this since these are basically control messages.
      while (batch != null && batch.getHeader().getDef().getRecordCount() == 0) {
        batch = fragProvider.getNext();
      }

      if (batch == null) {
        batchLoader.clear();
        return IterOutcome.NONE;
      }

      //      logger.debug("Next received batch {}", batch);

      RecordBatchDef rbd = batch.getHeader().getDef();
      boolean schemaChanged = batchLoader.load(rbd, batch.getBody());
      //      System.out.println(rbd.getRecordCount());
      batch.release();
      if (schemaChanged) {
        this.schema = batchLoader.getSchema();
        return IterOutcome.OK_NEW_SCHEMA;
      } else {
        return IterOutcome.OK;
      }
    } catch (SchemaChangeException | IOException ex) {
      context.fail(ex);
      return IterOutcome.STOP;
    }
  }
コード例 #10
0
 public void readAllFixedFields(long recordsToRead) throws IOException {
   boolean useAsyncColReader =
       fragmentContext.getOptions().getOption(ExecConstants.PARQUET_COLUMNREADER_ASYNC).bool_val;
   if (useAsyncColReader) {
     readAllFixedFieldsParallel(recordsToRead);
   } else {
     readAllFixedFieldsSerial(recordsToRead);
     ;
   }
 }
コード例 #11
0
ファイル: TextFormatPlugin.java プロジェクト: dorlaor/drill
  @Override
  public RecordWriter getRecordWriter(FragmentContext context, EasyWriter writer)
      throws IOException {
    Map<String, String> options = Maps.newHashMap();

    options.put("location", writer.getLocation());

    FragmentHandle handle = context.getHandle();
    String fragmentId =
        String.format("%d_%d", handle.getMajorFragmentId(), handle.getMinorFragmentId());
    options.put("prefix", fragmentId);

    options.put("separator", ((TextFormatConfig) getConfig()).getDelimiter());
    options.put(
        FileSystem.FS_DEFAULT_NAME_KEY, ((FileSystemConfig) writer.getStorageConfig()).connection);

    options.put("extension", ((TextFormatConfig) getConfig()).getExtensions().get(0));

    RecordWriter recordWriter = new DrillTextRecordWriter(context.getAllocator());
    recordWriter.init(options);

    return recordWriter;
  }
コード例 #12
0
ファイル: DrillBuf.java プロジェクト: mdaparte/drill
 public DrillBuf reallocIfNeeded(int size) {
   if (this.capacity() >= size) {
     return this;
   }
   if (context != null) {
     return context.replace(this, size);
   } else if (fContext != null) {
     return fContext.replace(this, size);
   } else if (bufManager != null) {
     return bufManager.replace(this, size);
   } else {
     throw new UnsupportedOperationException(
         "Realloc is only available in the context of an operator's UDFs");
   }
 }
コード例 #13
0
  private void closeOutResources() {

    // first close the operators and release all memory.
    try {
      // Say executor was cancelled before setup. Now when executor actually runs, root is not
      // initialized, but this
      // method is called in finally. So root can be null.
      if (root != null) {
        root.close();
      }
    } catch (final Exception e) {
      fail(e);
    }

    // then close the fragment context.
    fragmentContext.close();
  }
コード例 #14
0
  public RecordWriter getRecordWriter(FragmentContext context, ParquetWriter writer)
      throws IOException, OutOfMemoryException {
    Map<String, String> options = Maps.newHashMap();

    options.put("location", writer.getLocation());

    FragmentHandle handle = context.getHandle();
    String fragmentId =
        String.format("%d_%d", handle.getMajorFragmentId(), handle.getMinorFragmentId());
    options.put("prefix", fragmentId);

    options.put(
        FileSystem.FS_DEFAULT_NAME_KEY, ((FileSystemConfig) writer.getStorageConfig()).connection);

    options.put(
        ExecConstants.PARQUET_BLOCK_SIZE,
        context.getOptions().getOption(ExecConstants.PARQUET_BLOCK_SIZE).num_val.toString());
    options.put(
        ExecConstants.PARQUET_PAGE_SIZE,
        context.getOptions().getOption(ExecConstants.PARQUET_PAGE_SIZE).num_val.toString());
    options.put(
        ExecConstants.PARQUET_DICT_PAGE_SIZE,
        context.getOptions().getOption(ExecConstants.PARQUET_DICT_PAGE_SIZE).num_val.toString());

    options.put(
        ExecConstants.PARQUET_WRITER_COMPRESSION_TYPE,
        context.getOptions().getOption(ExecConstants.PARQUET_WRITER_COMPRESSION_TYPE).string_val);

    options.put(
        ExecConstants.PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING,
        context
            .getOptions()
            .getOption(ExecConstants.PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING)
            .bool_val
            .toString());

    RecordWriter recordWriter = new ParquetRecordWriter(context, writer);
    recordWriter.init(options);

    return recordWriter;
  }
コード例 #15
0
 /**
  * Resume all the pauses within the current context. Note that this method will be called from
  * threads *other* than the one running this runnable(). Also, this method can be called multiple
  * times.
  */
 public synchronized void unpause() {
   fragmentContext.getExecutionControls().unpauseAll();
 }
コード例 #16
0
ファイル: TestSimpleSort.java プロジェクト: mdaparte/drill
  @Test
  public void sortOneKeyAscending(
      @Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection)
      throws Throwable {
    new NonStrictExpectations() {
      {
        bitContext.getMetrics();
        result = new MetricRegistry();
        bitContext.getAllocator();
        result = new TopLevelAllocator();
        bitContext.getOperatorCreatorRegistry();
        result = new OperatorCreatorRegistry(c);
        bitContext.getConfig();
        result = c;
        bitContext.getCompiler();
        result = CodeCompiler.getTestCompiler(c);
      }
    };

    final PhysicalPlanReader reader =
        new PhysicalPlanReader(
            c, c.getMapper(), CoordinationProtos.DrillbitEndpoint.getDefaultInstance());
    final PhysicalPlan plan =
        reader.readPhysicalPlan(
            Files.toString(FileUtils.getResourceAsFile("/sort/one_key_sort.json"), Charsets.UTF_8));
    final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
    final FragmentContext context =
        new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
    final SimpleRootExec exec =
        new SimpleRootExec(
            ImplCreator.getExec(
                context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));

    int previousInt = Integer.MIN_VALUE;
    int recordCount = 0;
    int batchCount = 0;

    while (exec.next()) {
      batchCount++;
      final IntVector c1 =
          exec.getValueVectorById(
              new SchemaPath("blue", ExpressionPosition.UNKNOWN), IntVector.class);
      final IntVector c2 =
          exec.getValueVectorById(
              new SchemaPath("green", ExpressionPosition.UNKNOWN), IntVector.class);

      final IntVector.Accessor a1 = c1.getAccessor();
      final IntVector.Accessor a2 = c2.getAccessor();

      for (int i = 0; i < c1.getAccessor().getValueCount(); i++) {
        recordCount++;
        assertTrue(previousInt <= a1.get(i));
        previousInt = a1.get(i);
        assertEquals(previousInt, a2.get(i));
      }
    }

    System.out.println(String.format("Sorted %,d records in %d batches.", recordCount, batchCount));

    if (context.getFailureCause() != null) {
      throw context.getFailureCause();
    }
    assertTrue(!context.isFailed());
  }
コード例 #17
0
  @Override
  public void setup(OperatorContext operatorContext, OutputMutator output)
      throws ExecutionSetupException {
    this.operatorContext = operatorContext;
    if (!isStarQuery()) {
      columnsFound = new boolean[getColumns().size()];
      nullFilledVectors = new ArrayList<>();
    }
    columnStatuses = new ArrayList<>();
    //    totalRecords = footer.getBlocks().get(rowGroupIndex).getRowCount();
    List<ColumnDescriptor> columns = footer.getFileMetaData().getSchema().getColumns();
    allFieldsFixedLength = true;
    ColumnDescriptor column;
    ColumnChunkMetaData columnChunkMetaData;
    int columnsToScan = 0;
    mockRecordsRead = 0;

    MaterializedField field;
    //    ParquetMetadataConverter metaConverter = new ParquetMetadataConverter();
    FileMetaData fileMetaData;

    logger.debug(
        "Reading row group({}) with {} records in file {}.",
        rowGroupIndex,
        footer.getBlocks().get(rowGroupIndex).getRowCount(),
        hadoopPath.toUri().getPath());
    totalRecordsRead = 0;

    // TODO - figure out how to deal with this better once we add nested reading, note also look
    // where this map is used below
    // store a map from column name to converted types if they are non-null
    Map<String, SchemaElement> schemaElements =
        ParquetReaderUtility.getColNameToSchemaElementMapping(footer);

    // loop to add up the length of the fixed width columns and build the schema
    for (int i = 0; i < columns.size(); ++i) {
      column = columns.get(i);
      SchemaElement se = schemaElements.get(column.getPath()[0]);
      MajorType mt =
          ParquetToDrillTypeConverter.toMajorType(
              column.getType(),
              se.getType_length(),
              getDataMode(column),
              se,
              fragmentContext.getOptions());
      field = MaterializedField.create(toFieldName(column.getPath()), mt);
      if (!fieldSelected(field)) {
        continue;
      }
      columnsToScan++;
      int dataTypeLength = getDataTypeLength(column, se);
      if (dataTypeLength == -1) {
        allFieldsFixedLength = false;
      } else {
        bitWidthAllFixedFields += dataTypeLength;
      }
    }
    //    rowGroupOffset =
    // footer.getBlocks().get(rowGroupIndex).getColumns().get(0).getFirstDataPageOffset();

    if (columnsToScan != 0 && allFieldsFixedLength) {
      recordsPerBatch =
          (int)
              Math.min(
                  Math.min(
                      batchSize / bitWidthAllFixedFields,
                      footer.getBlocks().get(0).getColumns().get(0).getValueCount()),
                  65535);
    } else {
      recordsPerBatch = DEFAULT_RECORDS_TO_READ_IF_NOT_FIXED_WIDTH;
    }

    try {
      ValueVector vector;
      SchemaElement schemaElement;
      final ArrayList<VarLengthColumn<? extends ValueVector>> varLengthColumns = new ArrayList<>();
      // initialize all of the column read status objects
      boolean fieldFixedLength;
      // the column chunk meta-data is not guaranteed to be in the same order as the columns in the
      // schema
      // a map is constructed for fast access to the correct columnChunkMetadata to correspond
      // to an element in the schema
      Map<String, Integer> columnChunkMetadataPositionsInList = new HashMap<>();
      BlockMetaData rowGroupMetadata = footer.getBlocks().get(rowGroupIndex);

      int colChunkIndex = 0;
      for (ColumnChunkMetaData colChunk : rowGroupMetadata.getColumns()) {
        columnChunkMetadataPositionsInList.put(
            Arrays.toString(colChunk.getPath().toArray()), colChunkIndex);
        colChunkIndex++;
      }
      for (int i = 0; i < columns.size(); ++i) {
        column = columns.get(i);
        columnChunkMetaData =
            rowGroupMetadata
                .getColumns()
                .get(columnChunkMetadataPositionsInList.get(Arrays.toString(column.getPath())));
        schemaElement = schemaElements.get(column.getPath()[0]);
        MajorType type =
            ParquetToDrillTypeConverter.toMajorType(
                column.getType(),
                schemaElement.getType_length(),
                getDataMode(column),
                schemaElement,
                fragmentContext.getOptions());
        field = MaterializedField.create(toFieldName(column.getPath()), type);
        // the field was not requested to be read
        if (!fieldSelected(field)) {
          continue;
        }

        fieldFixedLength = column.getType() != PrimitiveType.PrimitiveTypeName.BINARY;
        vector =
            output.addField(
                field,
                (Class<? extends ValueVector>)
                    TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode()));
        if (column.getType() != PrimitiveType.PrimitiveTypeName.BINARY) {
          if (column.getMaxRepetitionLevel() > 0) {
            final RepeatedValueVector repeatedVector = RepeatedValueVector.class.cast(vector);
            ColumnReader<?> dataReader =
                ColumnReaderFactory.createFixedColumnReader(
                    this,
                    fieldFixedLength,
                    column,
                    columnChunkMetaData,
                    recordsPerBatch,
                    repeatedVector.getDataVector(),
                    schemaElement);
            varLengthColumns.add(
                new FixedWidthRepeatedReader(
                    this,
                    dataReader,
                    getTypeLengthInBits(column.getType()),
                    -1,
                    column,
                    columnChunkMetaData,
                    false,
                    repeatedVector,
                    schemaElement));
          } else {
            columnStatuses.add(
                ColumnReaderFactory.createFixedColumnReader(
                    this,
                    fieldFixedLength,
                    column,
                    columnChunkMetaData,
                    recordsPerBatch,
                    vector,
                    schemaElement));
          }
        } else {
          // create a reader and add it to the appropriate list
          varLengthColumns.add(
              ColumnReaderFactory.getReader(
                  this, -1, column, columnChunkMetaData, false, vector, schemaElement));
        }
      }
      varLengthReader = new VarLenBinaryReader(this, varLengthColumns);

      if (!isStarQuery()) {
        List<SchemaPath> projectedColumns = Lists.newArrayList(getColumns());
        SchemaPath col;
        for (int i = 0; i < columnsFound.length; i++) {
          col = projectedColumns.get(i);
          assert col != null;
          if (!columnsFound[i] && !col.equals(STAR_COLUMN)) {
            nullFilledVectors.add(
                (NullableIntVector)
                    output.addField(
                        MaterializedField.create(
                            col.getAsUnescapedPath(), Types.optional(TypeProtos.MinorType.INT)),
                        (Class<? extends ValueVector>)
                            TypeHelper.getValueVectorClass(
                                TypeProtos.MinorType.INT, DataMode.OPTIONAL)));
          }
        }
      }
    } catch (Exception e) {
      handleAndRaise("Failure in setting up reader", e);
    }
  }
コード例 #18
0
ファイル: TestAllocators.java プロジェクト: mdaparte/drill
  @Test
  public void testAllocators() throws Exception {
    // Setup a drillbit (initializes a root allocator)
    final DrillConfig config = DrillConfig.create(TEST_CONFIGURATIONS);
    final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
    final Drillbit bit = new Drillbit(config, serviceSet);
    bit.run();
    final DrillbitContext bitContext = bit.getContext();
    FunctionImplementationRegistry functionRegistry =
        bitContext.getFunctionImplementationRegistry();
    StoragePluginRegistry storageRegistry = new StoragePluginRegistry(bitContext);

    // Create a few Fragment Contexts

    BitControl.PlanFragment.Builder pfBuilder1 = BitControl.PlanFragment.newBuilder();
    pfBuilder1.setMemInitial(1500000);
    BitControl.PlanFragment pf1 = pfBuilder1.build();
    BitControl.PlanFragment.Builder pfBuilder2 = BitControl.PlanFragment.newBuilder();
    pfBuilder2.setMemInitial(500000);
    BitControl.PlanFragment pf2 = pfBuilder1.build();

    FragmentContext fragmentContext1 = new FragmentContext(bitContext, pf1, null, functionRegistry);
    FragmentContext fragmentContext2 = new FragmentContext(bitContext, pf2, null, functionRegistry);

    // Get a few physical operators. Easiest way is to read a physical plan.
    PhysicalPlanReader planReader =
        new PhysicalPlanReader(
            config,
            config.getMapper(),
            CoordinationProtos.DrillbitEndpoint.getDefaultInstance(),
            storageRegistry);
    PhysicalPlan plan =
        planReader.readPhysicalPlan(
            Files.toString(FileUtils.getResourceAsFile(planFile), Charsets.UTF_8));
    List<PhysicalOperator> physicalOperators = plan.getSortedOperators();
    Iterator<PhysicalOperator> physicalOperatorIterator = physicalOperators.iterator();

    PhysicalOperator physicalOperator1 = physicalOperatorIterator.next();
    PhysicalOperator physicalOperator2 = physicalOperatorIterator.next();
    PhysicalOperator physicalOperator3 = physicalOperatorIterator.next();
    PhysicalOperator physicalOperator4 = physicalOperatorIterator.next();
    PhysicalOperator physicalOperator5 = physicalOperatorIterator.next();
    PhysicalOperator physicalOperator6 = physicalOperatorIterator.next();

    // Create some bogus Operator profile defs and stats to create operator contexts
    OpProfileDef def;
    OperatorStats stats;

    // Use some bogus operator type to create a new operator context.
    def =
        new OpProfileDef(
            physicalOperator1.getOperatorId(),
            UserBitShared.CoreOperatorType.MOCK_SUB_SCAN_VALUE,
            OperatorContext.getChildCount(physicalOperator1));
    stats = fragmentContext1.getStats().getOperatorStats(def, fragmentContext1.getAllocator());

    // Add a couple of Operator Contexts
    // Initial allocation = 1000000 bytes for all operators
    OperatorContext oContext11 = fragmentContext1.newOperatorContext(physicalOperator1, true);
    DrillBuf b11 = oContext11.getAllocator().buffer(1000000);

    OperatorContext oContext12 =
        fragmentContext1.newOperatorContext(physicalOperator2, stats, true);
    DrillBuf b12 = oContext12.getAllocator().buffer(500000);

    OperatorContext oContext21 = fragmentContext1.newOperatorContext(physicalOperator3, true);

    def =
        new OpProfileDef(
            physicalOperator4.getOperatorId(),
            UserBitShared.CoreOperatorType.TEXT_WRITER_VALUE,
            OperatorContext.getChildCount(physicalOperator4));
    stats = fragmentContext2.getStats().getOperatorStats(def, fragmentContext2.getAllocator());
    OperatorContext oContext22 =
        fragmentContext2.newOperatorContext(physicalOperator4, stats, true);
    DrillBuf b22 = oContext22.getAllocator().buffer(2000000);

    // New Fragment begins
    BitControl.PlanFragment.Builder pfBuilder3 = BitControl.PlanFragment.newBuilder();
    pfBuilder3.setMemInitial(1000000);
    BitControl.PlanFragment pf3 = pfBuilder3.build();

    FragmentContext fragmentContext3 = new FragmentContext(bitContext, pf3, null, functionRegistry);

    // New fragment starts an operator that allocates an amount within the limit
    def =
        new OpProfileDef(
            physicalOperator5.getOperatorId(),
            UserBitShared.CoreOperatorType.UNION_VALUE,
            OperatorContext.getChildCount(physicalOperator5));
    stats = fragmentContext3.getStats().getOperatorStats(def, fragmentContext3.getAllocator());
    OperatorContext oContext31 =
        fragmentContext3.newOperatorContext(physicalOperator5, stats, true);

    DrillBuf b31a = oContext31.getAllocator().buffer(200000);

    // Previously running operator completes
    b22.release();
    ((AutoCloseable) oContext22).close();

    // Fragment 3 asks for more and fails
    boolean outOfMem = false;
    try {
      DrillBuf b31b = oContext31.getAllocator().buffer(4400000);
      if (b31b != null) {
        b31b.release();
      } else {
        outOfMem = true;
      }
    } catch (Exception e) {
      outOfMem = true;
    }
    assertEquals(true, (boolean) outOfMem);

    // Operator is Exempt from Fragment limits. Fragment 3 asks for more and succeeds
    outOfMem = false;
    OperatorContext oContext32 = fragmentContext3.newOperatorContext(physicalOperator6, false);
    DrillBuf b32 = null;
    try {
      b32 = oContext32.getAllocator().buffer(4400000);
    } catch (Exception e) {
      outOfMem = true;
    } finally {
      if (b32 != null) {
        b32.release();
      } else {
        outOfMem = true;
      }
      closeOp(oContext32);
    }
    assertEquals(false, (boolean) outOfMem);

    b11.release();
    closeOp(oContext11);
    b12.release();
    closeOp(oContext12);
    closeOp(oContext21);
    b31a.release();
    closeOp(oContext31);

    fragmentContext1.close();
    fragmentContext2.close();
    fragmentContext3.close();

    bit.close();
    serviceSet.close();
  }
コード例 #19
0
 public WireRecordBatch(FragmentContext context, RawFragmentBatchProvider fragProvider) {
   this.fragProvider = fragProvider;
   this.context = context;
   this.batchLoader = new RecordBatchLoader(context.getAllocator());
 }
コード例 #20
0
  @Override
  public void run() {
    // if a cancel thread has already entered this executor, we have not reason to continue.
    if (!hasCloseoutThread.compareAndSet(false, true)) {
      return;
    }

    final Thread myThread = Thread.currentThread();
    myThreadRef.set(myThread);
    final String originalThreadName = myThread.getName();
    final FragmentHandle fragmentHandle = fragmentContext.getHandle();
    final DrillbitContext drillbitContext = fragmentContext.getDrillbitContext();
    final ClusterCoordinator clusterCoordinator = drillbitContext.getClusterCoordinator();
    final DrillbitStatusListener drillbitStatusListener = new FragmentDrillbitStatusListener();
    final String newThreadName = QueryIdHelper.getExecutorThreadName(fragmentHandle);

    try {

      myThread.setName(newThreadName);

      // if we didn't get the root operator when the executor was created, create it now.
      final FragmentRoot rootOperator =
          this.rootOperator != null
              ? this.rootOperator
              : drillbitContext.getPlanReader().readFragmentOperator(fragment.getFragmentJson());

      root = ImplCreator.getExec(fragmentContext, rootOperator);
      if (root == null) {
        return;
      }

      clusterCoordinator.addDrillbitStatusListener(drillbitStatusListener);
      updateState(FragmentState.RUNNING);

      acceptExternalEvents.countDown();
      injector.injectPause(fragmentContext.getExecutionControls(), "fragment-running", logger);

      final DrillbitEndpoint endpoint = drillbitContext.getEndpoint();
      logger.debug(
          "Starting fragment {}:{} on {}:{}",
          fragmentHandle.getMajorFragmentId(),
          fragmentHandle.getMinorFragmentId(),
          endpoint.getAddress(),
          endpoint.getUserPort());

      final UserGroupInformation queryUserUgi =
          fragmentContext.isImpersonationEnabled()
              ? ImpersonationUtil.createProxyUgi(fragmentContext.getQueryUserName())
              : ImpersonationUtil.getProcessUserUGI();

      queryUserUgi.doAs(
          new PrivilegedExceptionAction<Void>() {
            public Void run() throws Exception {
              injector.injectChecked(
                  fragmentContext.getExecutionControls(), "fragment-execution", IOException.class);
              /*
               * Run the query until root.next returns false OR we no longer need to continue.
               */
              while (shouldContinue() && root.next()) {
                // loop
              }

              return null;
            }
          });

    } catch (OutOfMemoryError | OutOfMemoryException e) {
      if (!(e instanceof OutOfMemoryError) || "Direct buffer memory".equals(e.getMessage())) {
        fail(UserException.memoryError(e).build(logger));
      } else {
        // we have a heap out of memory error. The JVM in unstable, exit.
        CatastrophicFailure.exit(
            e, "Unable to handle out of memory condition in FragmentExecutor.", -2);
      }
    } catch (AssertionError | Exception e) {
      fail(e);
    } finally {

      // no longer allow this thread to be interrupted. We synchronize here to make sure that cancel
      // can't set an
      // interruption after we have moved beyond this block.
      synchronized (myThreadRef) {
        myThreadRef.set(null);
        Thread.interrupted();
      }

      // We need to sure we countDown at least once. We'll do it here to guarantee that.
      acceptExternalEvents.countDown();

      // here we could be in FAILED, RUNNING, or CANCELLATION_REQUESTED
      cleanup(FragmentState.FINISHED);

      clusterCoordinator.removeDrillbitStatusListener(drillbitStatusListener);

      myThread.setName(originalThreadName);
    }
  }