public OrderedPartitionRecordBatch(
      OrderedPartitionSender pop, RecordBatch incoming, FragmentContext context)
      throws OutOfMemoryException {
    super(pop, context);
    this.incoming = incoming;
    this.partitions = pop.getDestinations().size();
    this.sendingMajorFragmentWidth = pop.getSendingWidth();
    this.recordsToSample = pop.getRecordsToSample();
    this.samplingFactor = pop.getSamplingFactor();
    this.completionFactor = pop.getCompletionFactor();

    DistributedCache cache = context.getDrillbitContext().getCache();
    this.mmap = cache.getMultiMap(MULTI_CACHE_CONFIG);
    this.tableMap = cache.getMap(SINGLE_CACHE_CONFIG);
    Preconditions.checkNotNull(tableMap);

    this.mapKey =
        String.format(
            "%s_%d", context.getHandle().getQueryId(), context.getHandle().getMajorFragmentId());
    this.minorFragmentSampleCount = cache.getCounter(mapKey);

    SchemaPath outputPath = popConfig.getRef();
    MaterializedField outputField =
        MaterializedField.create(outputPath, Types.required(TypeProtos.MinorType.INT));
    this.partitionKeyVector =
        (IntVector) TypeHelper.getNewVector(outputField, oContext.getAllocator());
  }
Ejemplo n.º 2
0
 public ExternalSortBatch(ExternalSort popConfig, FragmentContext context, RecordBatch incoming)
     throws OutOfMemoryException {
   super(popConfig, context, true);
   this.incoming = incoming;
   DrillConfig config = context.getConfig();
   Configuration conf = new Configuration();
   conf.set("fs.default.name", config.getString(ExecConstants.EXTERNAL_SORT_SPILL_FILESYSTEM));
   try {
     this.fs = FileSystem.get(conf);
   } catch (IOException e) {
     throw new RuntimeException(e);
   }
   SPILL_BATCH_GROUP_SIZE = config.getInt(ExecConstants.EXTERNAL_SORT_SPILL_GROUP_SIZE);
   SPILL_THRESHOLD = config.getInt(ExecConstants.EXTERNAL_SORT_SPILL_THRESHOLD);
   dirs = Iterators.cycle(config.getStringList(ExecConstants.EXTERNAL_SORT_SPILL_DIRS));
   oAllocator = oContext.getAllocator();
   copierAllocator =
       oAllocator.newChildAllocator(
           oAllocator.getName() + ":copier",
           PriorityQueueCopier.INITIAL_ALLOCATION,
           PriorityQueueCopier.MAX_ALLOCATION);
   FragmentHandle handle = context.getHandle();
   fileName =
       String.format(
           "%s_majorfragment%s_minorfragment%s_operator%s",
           QueryIdHelper.getQueryId(handle.getQueryId()),
           handle.getMajorFragmentId(),
           handle.getMinorFragmentId(),
           popConfig.getOperatorId());
 }
Ejemplo n.º 3
0
  /**
   * Create a FragmentExecutor where we already have a root operator in memory.
   *
   * @param context
   * @param fragment
   * @param statusReporter
   * @param rootOperator
   */
  public FragmentExecutor(
      final FragmentContext context,
      final PlanFragment fragment,
      final FragmentStatusReporter statusReporter,
      final FragmentRoot rootOperator) {
    this.fragmentContext = context;
    this.statusReporter = statusReporter;
    this.fragment = fragment;
    this.rootOperator = rootOperator;
    this.fragmentName = QueryIdHelper.getQueryIdentifier(context.getHandle());
    this.receiverExecutor = new ReceiverExecutor(fragmentName, fragmentContext.getExecutor());

    context.setExecutorState(new ExecutorStateImpl());
  }
Ejemplo n.º 4
0
  public RecordWriter getRecordWriter(FragmentContext context, ParquetWriter writer)
      throws IOException, OutOfMemoryException {
    Map<String, String> options = Maps.newHashMap();

    options.put("location", writer.getLocation());

    FragmentHandle handle = context.getHandle();
    String fragmentId =
        String.format("%d_%d", handle.getMajorFragmentId(), handle.getMinorFragmentId());
    options.put("prefix", fragmentId);

    options.put(
        FileSystem.FS_DEFAULT_NAME_KEY, ((FileSystemConfig) writer.getStorageConfig()).connection);

    options.put(
        ExecConstants.PARQUET_BLOCK_SIZE,
        context.getOptions().getOption(ExecConstants.PARQUET_BLOCK_SIZE).num_val.toString());
    options.put(
        ExecConstants.PARQUET_PAGE_SIZE,
        context.getOptions().getOption(ExecConstants.PARQUET_PAGE_SIZE).num_val.toString());
    options.put(
        ExecConstants.PARQUET_DICT_PAGE_SIZE,
        context.getOptions().getOption(ExecConstants.PARQUET_DICT_PAGE_SIZE).num_val.toString());

    options.put(
        ExecConstants.PARQUET_WRITER_COMPRESSION_TYPE,
        context.getOptions().getOption(ExecConstants.PARQUET_WRITER_COMPRESSION_TYPE).string_val);

    options.put(
        ExecConstants.PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING,
        context
            .getOptions()
            .getOption(ExecConstants.PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING)
            .bool_val
            .toString());

    RecordWriter recordWriter = new ParquetRecordWriter(context, writer);
    recordWriter.init(options);

    return recordWriter;
  }
Ejemplo n.º 5
0
  @Override
  public RecordWriter getRecordWriter(FragmentContext context, EasyWriter writer)
      throws IOException {
    Map<String, String> options = Maps.newHashMap();

    options.put("location", writer.getLocation());

    FragmentHandle handle = context.getHandle();
    String fragmentId =
        String.format("%d_%d", handle.getMajorFragmentId(), handle.getMinorFragmentId());
    options.put("prefix", fragmentId);

    options.put("separator", ((TextFormatConfig) getConfig()).getDelimiter());
    options.put(
        FileSystem.FS_DEFAULT_NAME_KEY, ((FileSystemConfig) writer.getStorageConfig()).connection);

    options.put("extension", ((TextFormatConfig) getConfig()).getExtensions().get(0));

    RecordWriter recordWriter = new DrillTextRecordWriter(context.getAllocator());
    recordWriter.init(options);

    return recordWriter;
  }
Ejemplo n.º 6
0
  @Override
  public void run() {
    // if a cancel thread has already entered this executor, we have not reason to continue.
    if (!hasCloseoutThread.compareAndSet(false, true)) {
      return;
    }

    final Thread myThread = Thread.currentThread();
    myThreadRef.set(myThread);
    final String originalThreadName = myThread.getName();
    final FragmentHandle fragmentHandle = fragmentContext.getHandle();
    final DrillbitContext drillbitContext = fragmentContext.getDrillbitContext();
    final ClusterCoordinator clusterCoordinator = drillbitContext.getClusterCoordinator();
    final DrillbitStatusListener drillbitStatusListener = new FragmentDrillbitStatusListener();
    final String newThreadName = QueryIdHelper.getExecutorThreadName(fragmentHandle);

    try {

      myThread.setName(newThreadName);

      // if we didn't get the root operator when the executor was created, create it now.
      final FragmentRoot rootOperator =
          this.rootOperator != null
              ? this.rootOperator
              : drillbitContext.getPlanReader().readFragmentOperator(fragment.getFragmentJson());

      root = ImplCreator.getExec(fragmentContext, rootOperator);
      if (root == null) {
        return;
      }

      clusterCoordinator.addDrillbitStatusListener(drillbitStatusListener);
      updateState(FragmentState.RUNNING);

      acceptExternalEvents.countDown();
      injector.injectPause(fragmentContext.getExecutionControls(), "fragment-running", logger);

      final DrillbitEndpoint endpoint = drillbitContext.getEndpoint();
      logger.debug(
          "Starting fragment {}:{} on {}:{}",
          fragmentHandle.getMajorFragmentId(),
          fragmentHandle.getMinorFragmentId(),
          endpoint.getAddress(),
          endpoint.getUserPort());

      final UserGroupInformation queryUserUgi =
          fragmentContext.isImpersonationEnabled()
              ? ImpersonationUtil.createProxyUgi(fragmentContext.getQueryUserName())
              : ImpersonationUtil.getProcessUserUGI();

      queryUserUgi.doAs(
          new PrivilegedExceptionAction<Void>() {
            public Void run() throws Exception {
              injector.injectChecked(
                  fragmentContext.getExecutionControls(), "fragment-execution", IOException.class);
              /*
               * Run the query until root.next returns false OR we no longer need to continue.
               */
              while (shouldContinue() && root.next()) {
                // loop
              }

              return null;
            }
          });

    } catch (OutOfMemoryError | OutOfMemoryException e) {
      if (!(e instanceof OutOfMemoryError) || "Direct buffer memory".equals(e.getMessage())) {
        fail(UserException.memoryError(e).build(logger));
      } else {
        // we have a heap out of memory error. The JVM in unstable, exit.
        CatastrophicFailure.exit(
            e, "Unable to handle out of memory condition in FragmentExecutor.", -2);
      }
    } catch (AssertionError | Exception e) {
      fail(e);
    } finally {

      // no longer allow this thread to be interrupted. We synchronize here to make sure that cancel
      // can't set an
      // interruption after we have moved beyond this block.
      synchronized (myThreadRef) {
        myThreadRef.set(null);
        Thread.interrupted();
      }

      // We need to sure we countDown at least once. We'll do it here to guarantee that.
      acceptExternalEvents.countDown();

      // here we could be in FAILED, RUNNING, or CANCELLATION_REQUESTED
      cleanup(FragmentState.FINISHED);

      clusterCoordinator.removeDrillbitStatusListener(drillbitStatusListener);

      myThread.setName(originalThreadName);
    }
  }