@SuppressWarnings("unchecked") protected <T extends Number> Result genericGetNext(T number, byte dataType) throws ExecException { Result r = accumChild(null, number, dataType); if (r != null) { return r; } byte status; Result res; T left = null, right = null; res = lhs.getNext(left, dataType); status = res.returnStatus; if (status != POStatus.STATUS_OK || res.result == null) { return res; } left = (T) res.result; res = rhs.getNext(right, dataType); status = res.returnStatus; if (status != POStatus.STATUS_OK || res.result == null) { return res; } right = (T) res.result; res.result = add(left, right, dataType); return res; }
@Override public Result getNextBigInteger() throws ExecException { Result res = expr.getNextBigInteger(); if (res.returnStatus == POStatus.STATUS_OK && res.result != null) { res.result = ((BigInteger) res.result).negate(); } return res; }
@Override public Result getNextLong() throws ExecException { Result res = expr.getNextLong(); if (res.returnStatus == POStatus.STATUS_OK && res.result != null) { res.result = -1 * ((Long) res.result); } return res; }
/** * From the inputs, constructs the output tuple for this co-group in the required format which is * (key, {bag of tuples from input 1}, {bag of tuples from input 2}, ...) */ @Override public Result getNext(Tuple t) throws ExecException { Tuple res; if (firstTime) { firstTime = false; if (PigMapReduce.sJobConf != null) { String bagType = PigMapReduce.sJobConf.get("pig.cachedbag.type"); if (bagType != null && bagType.equalsIgnoreCase("default")) { useDefaultBag = true; } } } if (distinct) { // only set the key which has the whole // tuple res = mTupleFactory.newTuple(1); res.set(0, key); } else { // Create numInputs bags DataBag[] dbs = null; dbs = new DataBag[numInputs]; if (isAccumulative()) { // create bag wrapper to pull tuples in many batches // all bags have reference to the sample tuples buffer // which contains tuples from one batch POPackageTupleBuffer buffer = new POPackageTupleBuffer(); for (int i = 0; i < numInputs; i++) { dbs[i] = new AccumulativeBag(buffer, i); } } else { // create bag to pull all tuples out of iterator for (int i = 0; i < numInputs; i++) { dbs[i] = useDefaultBag ? BagFactory.getInstance().newDefaultBag() // In a very rare case if there is a POStream after this // POPackage in the pipeline and is also blocking the pipeline; // constructor argument should be 2 * numInputs. But for one obscure // case we don't want to pay the penalty all the time. : new InternalCachedBag(numInputs); } // For each indexed tup in the inp, sort them // into their corresponding bags based // on the index while (tupIter.hasNext()) { NullableTuple ntup = tupIter.next(); int index = ntup.getIndex(); Tuple copy = getValueTuple(ntup, index); if (numInputs == 1) { // this is for multi-query merge where // the numInputs is always 1, but the index // (the position of the inner plan in the // enclosed operator) may not be 1. dbs[0].add(copy); } else { dbs[index].add(copy); } if (reporter != null) reporter.progress(); } } // Construct the output tuple by appending // the key and all the above constructed bags // and return it. res = mTupleFactory.newTuple(numInputs + 1); res.set(0, key); int i = -1; for (DataBag bag : dbs) { i++; if (inner[i] && !isAccumulative()) { if (bag.size() == 0) { detachInput(); Result r = new Result(); r.returnStatus = POStatus.STATUS_NULL; return r; } } res.set(i + 1, bag); } } detachInput(); Result r = new Result(); r.result = res; r.returnStatus = POStatus.STATUS_OK; return r; }