@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); setupGCBins(context.getConfiguration()); genomeAdmin = HBaseGenomeAdmin.getHBaseGenomeAdmin(context.getConfiguration()); variationAdmin = VariationAdmin.getInstance(context.getConfiguration()); genomeName = context.getConfiguration().get("genome"); parentGenome = context.getConfiguration().get("parent"); genome = genomeAdmin.getGenomeTable().getGenome(genomeName); if (genome == null) throw new IOException("Genome " + genome + " is missing."); try { SNVProbability table = (SNVProbability) variationAdmin.getTable(VariationTables.SNVP.getTableName()); snvProbabilities = table.getProbabilities(); SizeProbability sizeTable = (SizeProbability) variationAdmin.getTable(VariationTables.SIZE.getTableName()); sizeProbabilities = sizeTable.getProbabilities(); variationList = sizeTable.getVariationList(); variationList.add("SNV"); } catch (ProbabilityException e) { throw new InterruptedException("Failed to start mapper: " + e); } varTable = (VariationCountPerBin) variationAdmin.getTable(VariationTables.VPB.getTableName()); }
@Override protected void setup(Context context) throws IOException { super.bindCurrentConfiguration(context.getConfiguration()); cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME).toUpperCase(); // only used in Build job, not in Merge job cuboidLevel = context.getConfiguration().getInt(BatchConstants.CFG_CUBE_CUBOID_LEVEL, 0); KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata(); cubeDesc = CubeManager.getInstance(config).getCube(cubeName).getDescriptor(); measuresDescs = cubeDesc.getMeasures(); codec = new BufferedMeasureEncoder(measuresDescs); aggs = new MeasureAggregators(measuresDescs); input = new Object[measuresDescs.size()]; result = new Object[measuresDescs.size()]; needAggr = new boolean[measuresDescs.size()]; if (cuboidLevel > 0) { for (int i = 0; i < measuresDescs.size(); i++) { needAggr[i] = !measuresDescs.get(i).getFunction().getMeasureType().onlyAggrInBaseCuboid(); } } }
@Override protected void setup(final Context context) { try { schema = Schema.fromJson( context .getConfiguration() .get(AddElementsFromHdfsJobFactory.SCHEMA) .getBytes(CommonConstants.UTF_8)); } catch (final UnsupportedEncodingException e) { throw new SchemaException("Unable to deserialise schema from JSON"); } try { final Class<?> elementConverterClass = Class.forName( context .getConfiguration() .get(AccumuloStoreConstants.ACCUMULO_ELEMENT_CONVERTER_CLASS)); elementConverter = (AccumuloElementConverter) elementConverterClass.getConstructor(Schema.class).newInstance(schema); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalArgumentException( "Failed to create accumulo element converter from class", e); } }
@SuppressWarnings("deprecation") @Override protected void cleanup(Context context) throws IOException, InterruptedException { super.cleanup(context); List<Cluster> newKMeansClusters = new ArrayList<Cluster>(); List<Cluster> newCanopyClusters = new ArrayList<Cluster>(); for (Cluster kMeansCluster : _clusters.keySet()) { Cluster canopyCluster = _kMeansToCanopyMap.get(kMeansCluster); // Set a new Cluster center Vector center = new Vector(); center.setElements(new double[kMeansCluster.getCenterVector().getElements().length]); List<Vector> vectors = new ArrayList<Vector>(); for (Vector currentVector : _clusters.get(kMeansCluster)) { vectors.add(new Vector(currentVector)); // Sums the vectors to a new vector in order to find the one that is the closest to all // others, it will be our new cluster center. for (int i = 0; i < currentVector.getElements().length; i++) center.getElements()[i] += currentVector.getElements()[i]; } // Divides the vector's elements in order to find its real location (it will be a fictive // vector) for (int i = 0; i < center.getElements().length; i++) center.getElements()[i] = center.getElements()[i] / vectors.size(); Cluster newKMeansCluster = new Cluster(center); canopyCluster.setIsCovered(newKMeansCluster.isConvergedWithOtherCluster(kMeansCluster)); newKMeansClusters.add(newKMeansCluster); newCanopyClusters.add(canopyCluster); // Adding the vectors to the new cluster center for (Vector vector : vectors) { context.write(newKMeansCluster, vector); } } Configuration conf = context.getConfiguration(); Path outPath = new Path(conf.get("centers.path")); FileSystem fs = FileSystem.get(conf); if (fs.exists(outPath)) fs.delete(outPath, true); SequenceFile.Writer writer = SequenceFile.createWriter( fs, context.getConfiguration(), outPath, Cluster.class, Cluster.class); context.getCounter(Counter.CONVERGED).setValue(0); for (int i = 0; i < newKMeansClusters.size(); i++) { writer.append(newCanopyClusters.get(i), newKMeansClusters.get(i)); if (newCanopyClusters.get(i).getIsCovered()) context.getCounter(Counter.CONVERGED).increment(1); } writer.close(); }
@Override public void setup(Context job) { minkeylength = job.getConfiguration().getInt("cloudgen.minkeylength", 0); maxkeylength = job.getConfiguration().getInt("cloudgen.maxkeylength", 0); minvaluelength = job.getConfiguration().getInt("cloudgen.minvaluelength", 0); maxvaluelength = job.getConfiguration().getInt("cloudgen.maxvaluelength", 0); tableName = new Text(job.getConfiguration().get("cloudgen.tablename")); }
protected void setup(Context context) throws IOException, InterruptedException { String[] termConfig = context.getConfiguration().get(XEBIA_TWITTER_TERMS).split("[\\s,]+"); terms = new HashSet<String>(Arrays.asList(termConfig)); outputKey = new Text(); textSeperator = context.getConfiguration().get("mapred.textoutputformat.separator", "\t"); }
protected void setup(Context context) throws IOException, InterruptedException { nPartitions = 0; currentSize = 0; long outputMap = context.getConfiguration().getLong("estimatedSampleSize", 0); totalPartitions = context.getConfiguration().getInt("nPartitions", 0); targetSize = outputMap / totalPartitions; log.debug("Target size: " + targetSize); }
/** Configures the Reduce plan, the POPackage operator and the reporter thread */ @SuppressWarnings("unchecked") @Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); inIllustrator = inIllustrator(context); if (inIllustrator) pack = getPack(context); Configuration jConf = context.getConfiguration(); SpillableMemoryManager.configure(ConfigurationUtil.toProperties(jConf)); context .getConfiguration() .set( PigConstants.TASK_INDEX, Integer.toString(context.getTaskAttemptID().getTaskID().getId())); sJobContext = context; sJobConfInternal.set(context.getConfiguration()); sJobConf = context.getConfiguration(); try { PigContext.setPackageImportList( (ArrayList<String>) ObjectSerializer.deserialize(jConf.get("udf.import.list"))); pigContext = (PigContext) ObjectSerializer.deserialize(jConf.get("pig.pigContext")); // This attempts to fetch all of the generated code from the distributed cache, and resolve // it SchemaTupleBackend.initialize(jConf, pigContext); if (rp == null) rp = (PhysicalPlan) ObjectSerializer.deserialize(jConf.get("pig.reducePlan")); stores = PlanHelper.getPhysicalOperators(rp, POStore.class); if (!inIllustrator) pack = (POPackage) ObjectSerializer.deserialize(jConf.get("pig.reduce.package")); // To be removed if (rp.isEmpty()) log.debug("Reduce Plan empty!"); else { ByteArrayOutputStream baos = new ByteArrayOutputStream(); rp.explain(baos); log.debug(baos.toString()); } pigReporter = new ProgressableReporter(); if (!(rp.isEmpty())) { roots = rp.getRoots().toArray(new PhysicalOperator[1]); leaf = rp.getLeaves().get(0); } // Get the UDF specific context MapRedUtil.setupUDFContext(jConf); } catch (IOException ioe) { String msg = "Problem while configuring reduce plan."; throw new RuntimeException(msg, ioe); } log.info( "Aliases being processed per job phase (AliasName[line,offset]): " + jConf.get("pig.alias.location")); Utils.setDefaultTimeZone(PigMapReduce.sJobConfInternal.get()); }
@Override public void setup(Context context) throws IOException { // get lbfgs_data_max_index and lbfgs_l2_c l2_c = context.getConfiguration().getFloat("lbfgs_l2_c", (float) 1.0); int max_index = context.getConfiguration().getInt("lbfgs_data_max_index", -2); weight = new float[max_index + 1]; // read weightFile in distributed cache FileOperator.readArrayLocal(new Path("weightFile"), context.getConfiguration(), weight); }
/* * Setup gets called exactly once for each mapper, before map() gets called the first time. * It's a good place to do configuration or setup that can be shared across many calls to map */ @Override public void setup(Context context) { targetGram = context.getConfiguration().get("targetWord").toLowerCase(); try { funcNum = Integer.parseInt(context.getConfiguration().get("funcNum")); } catch (NumberFormatException e) { /* Do nothing. */ } }
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); batchID = context.getConfiguration().get(ConfigConstants.BATCH_ID); commandPath = context.getConfiguration().get(HADOOP_CONVERTER_PATH); String outputFolder = context.getConfiguration().get(HADOOP_CONVERTER_OUTPUT_PATH); resultExtention = context.getConfiguration().get(HADOOP_CONVERTER_OUTPUT_EXTENSION_PATH); batchFolder = new File(outputFolder, batchID); batchFolder.mkdirs(); }
@Override public void setup(Context context) throws IOException { // get lbfgs_data_max_index int max_index = context.getConfiguration().getInt("lbfgs_data_max_index", -2); weight = new float[max_index + 1]; gradient = new double[max_index + 1]; // read weightFile in distributed cache FileOperator.readArrayLocal(new Path("weightFile"), context.getConfiguration(), weight); System.out.println("more memory lr"); }
@Override protected void setup(Context ctx) throws IOException, InterruptedException { similarity = ClassUtils.instantiateAs( ctx.getConfiguration().get(SIMILARITY_CLASSNAME), VectorSimilarityMeasure.class); norms = new RandomAccessSparseVector(Integer.MAX_VALUE); nonZeroEntries = new RandomAccessSparseVector(Integer.MAX_VALUE); maxValues = new RandomAccessSparseVector(Integer.MAX_VALUE); threshold = Double.parseDouble(ctx.getConfiguration().get(THRESHOLD)); }
@Override public void setup(Context context) throws TableNotFoundException, IOException { taskId = context.getTaskAttemptID().getTaskID().getId(); schemaFileLocation = context.getConfiguration().get(SCHEMA_FILE_LOCATION_CONF); FileSystem fs = FileSystem.get(context.getConfiguration()); columns = generateColumnsFromSchemaFile(fs, schemaFileLocation); delimiter = context.getConfiguration().get(DELIMITER_CONF); rowKeyColumn = context.getConfiguration().get(ROW_KEY_COLUMN_CONF); }
@SuppressWarnings("deprecation") private void loadDepartmentsMap(URI uriUncompressedFile, Context context) throws IOException { FileSystem dfs = FileSystem.get(context.getConfiguration()); try { deptMapReader = new MapFile.Reader(dfs, uriUncompressedFile.toString(), context.getConfiguration()); } catch (Exception e) { context.getCounter(MYCOUNTER.LOAD_MAP_ERROR).increment(1); e.printStackTrace(); } }
@Override protected void setup(Context mapperContext) throws IOException, InterruptedException { String mapperAppContextXML = mapperContext.getConfiguration().get(MAPPER_APP_CONTEXT_XML); // processor batch size this.processorBatchSize = mapperContext.getConfiguration().getInt(MAPPER_DATA_PROCESSOR_BATCH_SIZE, 10000); // our record length? this.myRecordLength = mapperContext.getConfiguration().getInt(MAPPER_RECORD_LENGTH, -1); if (myRecordLength == -1) { throw new IOException( "USPSDataFileMapper must have the config property " + MAPPER_RECORD_LENGTH + " set > 0"); } // my records stored counter this.myTotalProcessedCounter = ((MapContext) mapperContext) .getCounter( USPS_COUNTERS_GROUP_NAME, MAPPER_RECORDS_PROCESSED_COUNTER + mapperContext.getTaskAttemptID().toString()); // overall job processed counter this.overallProcessedCounter = ((MapContext) mapperContext) .getCounter(USPS_COUNTERS_GROUP_NAME, OVERALL_RECORDS_PROCESSED_COUNTER); LOG.info( "USPSDataFileMapper configured: skipCopyrights=" + this.skipCopyrights + " processorBatchSize=" + processorBatchSize + " myRecordLength=" + this.myRecordLength + " mapperAppContextXML=" + mapperAppContextXML); // init static stuff if (!initialized) { synchronized (synchLock) { if (!initialized) { ApplicationContext context = new ClassPathXmlApplicationContext(mapperAppContextXML); classFinder = (ClassFinder) context.getBean("classFinder"); uspsUtils = (USPSUtils) context.getBean("uspsUtils"); idGenerator = (USPSIdGenerator) context.getBean("uspsIdGenerator"); lineParser = (USPSRecordParser) context.getBean("uspsLineParser"); dataProcessor = (USPSDataProcessor) context.getBean("uspsDataProcessor"); initialized = true; } } } }
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); filter = PcapFilters.valueOf( context.getConfiguration().get(PcapFilterConfigurator.PCAP_FILTER_NAME_CONF)) .create(); filter.configure(context.getConfiguration()); start = Long.parseUnsignedLong(context.getConfiguration().get(START_TS_CONF)); end = Long.parseUnsignedLong(context.getConfiguration().get(END_TS_CONF)); }
private static void WriteNewCentroids(Context context) throws IOException { FileSystem fs = FileSystem.get(context.getConfiguration()); Path nextCFile = new Path(context.getConfiguration().get("NEXTCFILE")); DataOutputStream d = new DataOutputStream(fs.create(nextCFile, false)); BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(d)); for (String centroid : newCentroids) { writer.write(centroid + "\n"); } writer.close(); }
@Override public void setup(Context context) throws IOException, InterruptedException { super.setup(context); String fieldStrs = context.getConfiguration().get("higo.index.fields"); split = context.getConfiguration().get("higo.column.split", split); String custfields = context.getConfiguration().get("higo.column.custfields", ""); usedthedate = context.getConfiguration().getBoolean("higo.column.userthedate", usedthedate); this.thedate = null; if (usedthedate) { InputSplit inputSplit = context.getInputSplit(); Path filepath = ((FileSplit) inputSplit).getPath(); String inputbase = context.getConfiguration().get("higo.input.base"); this.thedate = JobIndexPublic.parseThedate(new Path(inputbase), filepath); System.out.println( "thedatepath: " + thedate + "@" + filepath.toString() + "@" + inputbase + ""); } if (custfields == null || custfields.isEmpty()) { String[] fieldslist = fieldStrs.split(","); this.fields = new String[fieldslist.length]; this.isDate = new Boolean[fieldslist.length]; this.isString = new Boolean[fieldslist.length]; this.isStore = new Boolean[fieldslist.length]; for (int i = 0; i < fieldslist.length; i++) { String[] fieldSchema = fieldslist[i].split(":"); String fieldName = fieldSchema[0].trim().toLowerCase(); String type = fieldSchema[1]; this.isStore[i] = Boolean.valueOf(fieldSchema[3]); this.fields[i] = fieldName; this.isDate[i] = type.equalsIgnoreCase("tdate"); this.isString[i] = type.equalsIgnoreCase("string"); } } else { String[] fieldslist = custfields.split(","); this.fields = new String[fieldslist.length]; this.isDate = new Boolean[fieldslist.length]; this.isString = new Boolean[fieldslist.length]; this.isStore = new Boolean[fieldslist.length]; for (int i = 0; i < fieldslist.length; i++) { this.isStore[i] = Boolean.valueOf(false); this.fields[i] = fieldslist[i]; this.isDate[i] = false; this.isString[i] = true; } } }
@Override public void cleanup(Context context) throws IOException { Configuration conf = context.getConfiguration(); String taskId = conf.get("mapred.task.id"); String path = conf.get("PageRankMassPath"); Preconditions.checkNotNull(taskId); Preconditions.checkNotNull(path); // Write to a file the amount of PageRank mass we've seen in this reducer. FileSystem fs = FileSystem.get(context.getConfiguration()); FSDataOutputStream out = fs.create(new Path(path + "/" + taskId), false); out.writeFloat(totalMass); out.close(); }
@Override protected void setup(Context context) throws IOException, InterruptedException { type = context.getConfiguration().get("type"); FileSplit fileSplit = (FileSplit) context.getInputSplit(); filePath = fileSplit.getPath(); dmPlatyRuleDAO = new DMPlatyRuleDAOImpl<String, Integer>(); if (isLocalRunMode(context)) { String dmMobilePlayFilePath = context.getConfiguration().get(ConstantEnum.DM_MOBILE_PLATY_FILEPATH.name()); dmPlatyRuleDAO.parseDMObj(new File(dmMobilePlayFilePath)); } else { File dmMobilePlayFile = new File(ConstantEnum.DM_MOBILE_PLATY.name().toLowerCase()); dmPlatyRuleDAO.parseDMObj(dmMobilePlayFile); } }
@Override protected void reduce(IntWritable row, Iterable<VectorWritable> partialVectors, Context ctx) throws IOException, InterruptedException { Vector partialVector = Vectors.merge(partialVectors); if (row.get() == NORM_VECTOR_MARKER) { Vectors.write(partialVector, normsPath, ctx.getConfiguration()); } else if (row.get() == MAXVALUE_VECTOR_MARKER) { Vectors.write(partialVector, maxValuesPath, ctx.getConfiguration()); } else if (row.get() == NUM_NON_ZERO_ENTRIES_VECTOR_MARKER) { Vectors.write(partialVector, numNonZeroEntriesPath, ctx.getConfiguration(), true); } else { ctx.write(row, new VectorWritable(partialVector)); } }
// Init counter for remaining pairs for each decade. @Override public void setup(Context context) { int pairsNum = Integer.parseInt(context.getConfiguration().get("pairsPerDecade", "-1")); for (int i = 0; i < 12; i++) { pairsPerDecade[i] = pairsNum; } }
public void reduce(CoordsTimestampTuple tuple, Iterable<IntWritable> timestamps, Context context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); int numberOfRecords = conf.getInt("number.of.records", 0); int lastTimestamp = 0; double gridDensity = 1.0; Iterator<IntWritable> it = timestamps.iterator(); if (it.hasNext()) { lastTimestamp = it.next().get(); } while (it.hasNext()) { int timestamp = it.next().get(); gridDensity = updatedGridDensity(gridDensity, lastTimestamp, timestamp); lastTimestamp = timestamp; } if (lastTimestamp < numberOfRecords) { gridDensity = updatedGridDensity(gridDensity, lastTimestamp, numberOfRecords); } String outputValue = new DecimalFormat("#0.00000", SYMBOLS).format(gridDensity); context.write(tuple.getCoords(), new Text(outputValue)); }
/* (non-Javadoc) * @see org.apache.hadoop.mapreduce.Mapper#setup(org.apache.hadoop.mapreduce.Mapper.Context) */ protected void setup(Context context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); if (conf.getBoolean("debug.on", false)) { LOG.setLevel(Level.DEBUG); System.out.println("in debug mode"); } fieldDelim = conf.get("field.delim", ","); subFieldDelim = conf.get("sub.field.delim", ":"); String ratingFilePrefix = conf.get("utp.rating.file.prefix", "rating"); isRatingFileSplit = ((FileSplit) context.getInputSplit()).getPath().getName().startsWith(ratingFilePrefix); String ratingStatFilePrefix = conf.get("utp.rating.stat.file.prefix", "stat"); isRatingStatFileSplit = ((FileSplit) context.getInputSplit()) .getPath() .getName() .startsWith(ratingStatFilePrefix); linearCorrelation = conf.getBoolean("utp.correlation.linear", true); int ratingTimeWindow = conf.getInt("utp.rating.time.window.hour", -1); ratingTimeCutoff = ratingTimeWindow > 0 ? System.currentTimeMillis() / 1000 - ratingTimeWindow * 60L * 60L : -1; minInputRating = conf.getInt("utp.min.input.rating", -1); minCorrelation = conf.getInt("utp.min.correlation", -1); userRatingWithContext = conf.getBoolean("utp.user.rating.with.context", false); LOG.info("isRatingFileSplit:" + isRatingFileSplit); }
@Override public void setup(Context context) throws IOException { Configuration conf = context.getConfiguration(); Path cMemMatrixPath = new Path(conf.get(RECONSTRUCTIONMATRIX)); Path dMemMatrixPath = new Path(conf.get(MATRIXY2X)); Path zmPath = new Path(conf.get(ZMPATH)); Path meanPath = new Path(conf.get(YMPATH)); int inMemMatrixNumRows = conf.getInt(YCOLS, 0); int inMemMatrixNumCols = conf.getInt(XCOLS, 0); ERR_SAMPLE_RATE = conf.getFloat(ERRSAMPLERATE, 1); Path tmpPath = cMemMatrixPath.getParent(); DistributedRowMatrix distMatrix = new DistributedRowMatrix(cMemMatrixPath, tmpPath, inMemMatrixNumRows, inMemMatrixNumCols); distMatrix.setConf(conf); matrixC = PCACommon.toDenseMatrix(distMatrix); distMatrix = new DistributedRowMatrix(dMemMatrixPath, tmpPath, inMemMatrixNumRows, inMemMatrixNumCols); distMatrix.setConf(conf); matrixY2X = PCACommon.toDenseMatrix(distMatrix); try { zm = PCACommon.toDenseVector(zmPath, conf); ym = PCACommon.toDenseVector(meanPath, conf); } catch (IOException e) { e.printStackTrace(); } xiCt = new DenseVector(matrixC.numRows()); sumOfErr = new DenseVector(matrixC.numRows()); sumOfyi = new DenseVector(matrixC.numRows()); sumOfyc = new DenseVector(matrixC.numRows()); }
protected void setup(Context context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); this.reduceSleepCount = conf.getInt(REDUCE_SLEEP_COUNT, reduceSleepCount); this.reduceSleepDuration = reduceSleepCount == 0 ? 0 : conf.getLong(REDUCE_SLEEP_TIME, 100) / reduceSleepCount; vertexName = conf.get(org.apache.tez.mapreduce.hadoop.MRJobConfig.VERTEX_NAME); }
public void setup(Context context) { Configuration conf = context.getConfiguration(); minlat = conf.getDouble(MINLAT, 0); minlon = conf.getDouble(MINLON, 0); maxlat = conf.getDouble(MAXLAT, 0); maxlon = conf.getDouble(MAXLON, 0); }
@Override protected void setup(Context context) throws IOException, InterruptedException { // Get the source index; (employee = 1, salary = 2) // Added as configuration in driver FileSplit fsFileSplit = (FileSplit) context.getInputSplit(); intSrcIndex = Integer.parseInt(context.getConfiguration().get(fsFileSplit.getPath().getName())); // Initialize the list of fields to emit as output based on // intSrcIndex (1=employee, 2=current salary, 3=historical salary) if (intSrcIndex == 1) // employee { lstRequiredAttribList.add(1); // FName lstRequiredAttribList.add(2); // LName lstRequiredAttribList.add(3); // Gender } else // salary { lstRequiredAttribList.add(1); // Salary lstRequiredAttribList.add(3); // Effective-to-date (Value of // 9999-01-01 indicates current // salary) } }
/** fix a stripe */ @Override public void map(LongWritable key, Text fileText, Context context) throws IOException, InterruptedException { BlockFixerHelper helper = new BlockFixerHelper(context.getConfiguration()); String fileStr = fileText.toString(); LOG.info("fixing " + fileStr); Path file = new Path(fileStr); try { boolean fixed = helper.fixFile(file, context); if (fixed) { context.getCounter(Counter.FILES_SUCCEEDED).increment(1L); } else { context.getCounter(Counter.FILES_NOACTION).increment(1L); } } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); // report file as failed context.getCounter(Counter.FILES_FAILED).increment(1L); String outkey = fileStr; String outval = "failed"; context.write(new Text(outkey), new Text(outval)); } context.progress(); }