public Writable call(Class<?> protocol, Writable param, long receivedTime) throws IOException { try { Invocation call = (Invocation) param; if (verbose) log("Call: " + call); Method method = protocol.getMethod(call.getMethodName(), call.getParameterClasses()); method.setAccessible(true); int qTime = (int) (System.currentTimeMillis() - receivedTime); long startNanoTime = System.nanoTime(); Object value = method.invoke(instance, call.getParameters()); long processingMicroTime = (System.nanoTime() - startNanoTime) / 1000; if (LOG.isDebugEnabled()) { LOG.debug( "Served: " + call.getMethodName() + " queueTime (millisec)= " + qTime + " procesingTime (microsec)= " + processingMicroTime); } rpcMetrics.rpcQueueTime.inc(qTime); rpcMetrics.rpcProcessingTime.inc(processingMicroTime); MetricsTimeVaryingRate m = (MetricsTimeVaryingRate) rpcMetrics.registry.get(call.getMethodName()); if (m == null) { try { m = new MetricsTimeVaryingRate(call.getMethodName(), rpcMetrics.registry); } catch (IllegalArgumentException iae) { // the metrics has been registered; re-fetch the handle LOG.debug("Error register " + call.getMethodName(), iae); m = (MetricsTimeVaryingRate) rpcMetrics.registry.get(call.getMethodName()); } } // record call time in microseconds m.inc(processingMicroTime); if (verbose) log("Return: " + value); return new ObjectWritable(method.getReturnType(), value); } catch (InvocationTargetException e) { Throwable target = e.getTargetException(); if (target instanceof IOException) { throw (IOException) target; } else { IOException ioe = new IOException(target.toString()); ioe.setStackTrace(target.getStackTrace()); throw ioe; } } catch (Throwable e) { if (!(e instanceof IOException)) { LOG.error("Unexpected throwable object ", e); } IOException ioe = new IOException(e.toString()); ioe.setStackTrace(e.getStackTrace()); throw ioe; } }
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { final boolean logDebug = LOG.isDebugEnabled(); long startTime = 0; if (logDebug) { startTime = System.currentTimeMillis(); } ObjectWritable value = null; try { value = (ObjectWritable) client.call( new Invocation(method, args), getAddress(), protocol, ticket, rpcTimeout); } catch (RemoteException re) { throw re; } catch (ConnectException ce) { needCheckDnsUpdate = true; throw ce; } catch (NoRouteToHostException nrhe) { needCheckDnsUpdate = true; throw nrhe; } catch (PortUnreachableException pue) { needCheckDnsUpdate = true; throw pue; } catch (UnknownHostException uhe) { needCheckDnsUpdate = true; throw uhe; } if (logDebug) { long callTime = System.currentTimeMillis() - startTime; LOG.debug("Call: " + method.getName() + " " + callTime); } return value.get(); }
/** * Get a proxy connection to a remote server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @param rpcTimeout timeout for each RPC * @param timeout time in milliseconds before giving up * @return the proxy * @throws IOException if the far end through a RemoteException */ static <T extends VersionedProtocol> ProtocolProxy<T> waitForProtocolProxy( Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, long timeout, int rpcTimeout) throws IOException { long startTime = System.currentTimeMillis(); UserGroupInformation ugi = null; try { ugi = UserGroupInformation.login(conf); } catch (LoginException le) { throw new RuntimeException("Couldn't login!"); } IOException ioe; while (true) { try { return getProtocolProxy( protocol, clientVersion, addr, ugi, conf, NetUtils.getDefaultSocketFactory(conf), rpcTimeout); } catch (ConnectException se) { // namenode has not been started LOG.info("Server at " + addr + " not available yet, Zzzzz..."); ioe = se; } catch (SocketTimeoutException te) { // namenode is busy LOG.info("Problem connecting to server: " + addr); ioe = te; } // check if timed out if (System.currentTimeMillis() - timeout >= startTime) { throw ioe; } // wait for retry try { Thread.sleep(1000); } catch (InterruptedException ie) { // IGNORE } } }
/* * (non-Javadoc) * * @see * org.apache.mina.core.service.IoHandlerAdapter#sessionIdle(org.apache. * mina.core.session.IoSession, org.apache.mina.core.session.IdleStatus) */ public void sessionIdle(IoSession session, IdleStatus status) throws Exception { if (IdleStatus.BOTH_IDLE.equals(status)) { Long l = (Long) session.getAttribute("last"); if (l != null && System.currentTimeMillis() - l > 60 * 1000) { session.close(true); } } }
private synchronized InetSocketAddress getAddress() { if (needCheckDnsUpdate && address != null && address.getHostName() != null && System.currentTimeMillis() - this.timeLastDnsCheck > MIN_DNS_CHECK_INTERVAL_MSEC) { try { InetSocketAddress newAddr = NetUtils.resolveAddress(address); if (newAddr != null) { LOG.info("DNS change: " + newAddr); address = newAddr; } } finally { this.timeLastDnsCheck = System.currentTimeMillis(); } } needCheckDnsUpdate = false; return address; }
/** * Queries the tables to extract salient information (feature structure name, reference details) * for a transgene query */ @SuppressWarnings({"unchecked", "unused"}) public List<Record> getAll() { String query = "select * from fc_mapping"; LOG.debug("Query : " + query); long startTime = System.currentTimeMillis(); List<Record> results = this.jdbcTemplate.query( query, new Object[] {}, (RowMapper) new FCMappingResultSetExtractor()); return results; }
static { // Try to load native hadoop library and set fallback flag appropriately LOG.debug("Trying to load the custom-built native-hadoop library..."); try { System.loadLibrary("hadoop"); LOG.info("Loaded the native-hadoop library"); nativeCodeLoaded = true; } catch (Throwable t) { // Ignore failure to load LOG.debug("Failed to load native-hadoop with error: " + t); LOG.debug("java.library.path=" + System.getProperty("java.library.path")); } if (!nativeCodeLoaded) { LOG.warn( "Unable to load native-hadoop library for your platform... " + "using builtin-java classes where applicable"); } }
/** * Get the archive entries in classpath as an array of Path * * @param conf Configuration that contains the classpath setting */ public static Path[] getArchiveClassPaths(Configuration conf) { String classpath = conf.get("mapred.job.classpath.archives"); if (classpath == null) return null; ArrayList list = Collections.list(new StringTokenizer(classpath, System.getProperty("path.separator"))); Path[] paths = new Path[list.size()]; for (int i = 0; i < list.size(); i++) { paths[i] = new Path((String) list.get(i)); } return paths; }
/** * Add an file path to the current set of classpath entries It adds the file to cache as well. * * @param file Path of the file to be added * @param conf Configuration that contains the classpath setting */ public static void addFileToClassPath(Path file, Configuration conf) throws IOException { String classpath = conf.get("mapred.job.classpath.files"); conf.set( "mapred.job.classpath.files", classpath == null ? file.toString() : classpath + System.getProperty("path.separator") + file.toString()); URI uri = file.makeQualified(file.getFileSystem(conf)).toUri(); addCacheFile(uri, conf); }
/** * main() has some simple utility methods. * * @param argv Command line parameters. * @exception Exception if the filesystem does not exist. */ public static void main(String[] argv) throws Exception { StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG); Configuration tconf = new Configuration(); if (argv.length >= 1) { SecondaryNameNode secondary = new SecondaryNameNode(tconf); int ret = secondary.processArgs(argv); System.exit(ret); } // Create a never ending deamon Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); checkpointThread.start(); }
private static URI addArchiveToClassPathHelper(Path archive, Configuration conf) throws IOException { String classpath = conf.get("mapred.job.classpath.archives"); // the scheme/authority use ':' as separator. put the unqualified path in classpath String archivePath = archive.toUri().getPath(); conf.set( "mapred.job.classpath.archives", classpath == null ? archivePath : classpath + System.getProperty("path.separator") + archivePath); return archive.makeQualified(archive.getFileSystem(conf)).toUri(); }
VerProtocolImpl[] getSupportedProtocolVersions(RPC.RpcKind rpcKind, String protocolName) { VerProtocolImpl[] resultk = new VerProtocolImpl[getProtocolImplMap(rpcKind).size()]; int i = 0; for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv : getProtocolImplMap(rpcKind).entrySet()) { if (pv.getKey().protocol.equals(protocolName)) { resultk[i++] = new VerProtocolImpl(pv.getKey().version, pv.getValue()); } } if (i == 0) { return null; } VerProtocolImpl[] result = new VerProtocolImpl[i]; System.arraycopy(resultk, 0, result, 0, i); return result; }
private static long deleteOldTempFiles(File file) { long size = 0; if (file.isDirectory()) { File[] files = file.listFiles(); if (files != null) { for (int i = 0; i < files.length; i++) { long modified = files[i].lastModified(); long now = System.currentTimeMillis() - modified; float elapsedmins = now / (60 * 1000F); if (elapsedmins > DELETE_WAIT) files[i].delete(); try { Thread.sleep(10); } catch (Exception e) { } } return size; } else return 0; } else return file.length(); }
// // The main work loop // public void run() { // // Poll the Namenode (once every 5 minutes) to find the size of the // pending edit log. // long period = 5 * 60; // 5 minutes long lastCheckpointTime = 0; if (checkpointPeriod < period) { period = checkpointPeriod; } while (shouldRun) { try { Thread.sleep(1000 * period); } catch (InterruptedException ie) { // do nothing } if (!shouldRun) { break; } try { long now = System.currentTimeMillis(); long size = namenode.getEditLogSize(); if (size >= checkpointSize || now >= lastCheckpointTime + 1000 * checkpointPeriod) { doCheckpoint(); lastCheckpointTime = now; } } catch (IOException e) { LOG.error("Exception in doCheckpoint: "); LOG.error(StringUtils.stringifyException(e)); e.printStackTrace(); checkpointImage.imageDigest = null; } catch (Throwable e) { LOG.error("Throwable Exception in doCheckpoint: "); LOG.error(StringUtils.stringifyException(e)); e.printStackTrace(); Runtime.getRuntime().exit(-1); } } }
private void logOut(String browserId) throws Exception { // ttt2 the right way to do it is to go through all the sessions of the current browser, which // would require a new field and a new index; // not sure if it's worth it, but this would work: A logs in, forgets to log out, B delets the // cookies, logs in, A sees B is logged in, then B // restores the cookies and uses A's account if (browserId == null) { return; } List<LoginInfo> loginInfos = loginInfoDb.getLoginsForBrowser(browserId); long expireTarget = System.currentTimeMillis() - Utils.ONE_DAY; for (LoginInfo loginInfo : loginInfos) { if (loginInfo.expiresOn <= expireTarget) { LOG.info(String.format("LoginInfo %s is enough in the past", loginInfo)); } else { LOG.info(String.format("Logging out: %s", loginInfo)); loginInfoDb.updateExpireTime(browserId, loginInfo.sessionId, expireTarget); } } }
public void testcheckOutputSpecsForbidRecordCompression() throws IOException { Job job = Job.getInstance(new Configuration(), "testcheckOutputSpecsForbidRecordCompression"); FileSystem fs = FileSystem.getLocal(job.getConfiguration()); Path outputdir = new Path(System.getProperty("test.build.data", "/tmp") + "/output"); fs.delete(outputdir, true); // Without outputpath, FileOutputFormat.checkoutputspecs will throw // InvalidJobConfException FileOutputFormat.setOutputPath(job, outputdir); // SequenceFileAsBinaryOutputFormat doesn't support record compression // It should throw an exception when checked by checkOutputSpecs SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true); SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK); try { new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(job); } catch (Exception e) { fail( "Block compression should be allowed for " + "SequenceFileAsBinaryOutputFormat:Caught " + e.getClass().getName()); } SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.RECORD); try { new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(job); fail("Record compression should not be allowed for " + "SequenceFileAsBinaryOutputFormat"); } catch (InvalidJobConfException ie) { // expected } catch (Exception e) { fail( "Expected " + InvalidJobConfException.class.getName() + "but caught " + e.getClass().getName()); } }
/** * Normally sets the path and a few attributes that the JSPs are likely to need. Also verifies the * login information. If necessary, just redirects to the login page. * * @param target * @param request * @param httpServletResponse * @param secured * @return true if the request is already handled so the .jsp shouldn't get called * @throws Exception */ private boolean prepareForJspGet( String target, Request request, HttpServletResponse httpServletResponse, boolean secured) throws Exception { LoginInfo.SessionInfo sessionInfo = UserHelpers.getSessionInfo(request); LOG.info( String.format( "hndl - %s ; %s; %s ; %s", target, request.getPathInfo(), request.getMethod(), secured ? "secured" : "not secured")); String path = request.getUri().getDecodedPath(); boolean redirectToLogin = path.equals(PATH_LOGOUT); LoginInfo loginInfo = null; if (sessionInfo.isNull()) { redirectToLogin = true; LOG.info("Null session info. Logging in again."); } else { loginInfo = loginInfoDb.get( sessionInfo.browserId, sessionInfo.sessionId); // ttt2 use a cache, to avoid going to DB if (loginInfo == null || loginInfo.expiresOn < System.currentTimeMillis()) { LOG.info("Session has expired. Logging in again. Info: " + loginInfo); redirectToLogin = true; } } if (!path.equals(PATH_LOGIN) && !path.equals(PATH_SIGNUP) && !path.equals(PATH_ERROR)) { if (redirectToLogin) { // ttt2 perhaps store URI, to return to it after login logOut(sessionInfo.browserId); addLoginParams(request, loginInfo); httpServletResponse.sendRedirect(PATH_LOGIN); return true; } User user = userDb.get(loginInfo.userId); if (user == null) { WebUtils.redirectToError("Unknown user", request, httpServletResponse); return true; } if (!user.active) { WebUtils.redirectToError("Account is not active", request, httpServletResponse); return true; } request.setAttribute(VAR_FEED_DB, feedDb); request.setAttribute(VAR_USER_DB, userDb); request.setAttribute(VAR_ARTICLE_DB, articleDb); request.setAttribute(VAR_READ_ARTICLES_COLL_DB, readArticlesCollDb); request.setAttribute(VAR_USER, user); request.setAttribute(VAR_LOGIN_INFO, loginInfo); MultiMap<String> params = new MultiMap<>(); params.put(PARAM_PATH, path); request.setParameters(params); } if (path.equals(PATH_LOGIN)) { addLoginParams(request, loginInfo); } return false; }
public void testFormat() throws Exception { JobConf job = new JobConf(conf); FileSystem fs = FileSystem.getLocal(conf); Path dir = new Path(System.getProperty("test.build.data", ".") + "/mapred"); Path file = new Path(dir, "test.seq"); Reporter reporter = Reporter.NULL; int seed = new Random().nextInt(); // LOG.info("seed = "+seed); Random random = new Random(seed); fs.delete(dir, true); FileInputFormat.setInputPaths(job, dir); // for a variety of lengths for (int length = 0; length < MAX_LENGTH; length += random.nextInt(MAX_LENGTH / 10) + 1) { // LOG.info("creating; entries = " + length); // create a file with length entries SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, file, IntWritable.class, BytesWritable.class); try { for (int i = 0; i < length; i++) { IntWritable key = new IntWritable(i); byte[] data = new byte[random.nextInt(10)]; random.nextBytes(data); BytesWritable value = new BytesWritable(data); writer.append(key, value); } } finally { writer.close(); } // try splitting the file in a variety of sizes InputFormat<IntWritable, BytesWritable> format = new SequenceFileInputFormat<IntWritable, BytesWritable>(); IntWritable key = new IntWritable(); BytesWritable value = new BytesWritable(); for (int i = 0; i < 3; i++) { int numSplits = random.nextInt(MAX_LENGTH / (SequenceFile.SYNC_INTERVAL / 20)) + 1; // LOG.info("splitting: requesting = " + numSplits); InputSplit[] splits = format.getSplits(job, numSplits); // LOG.info("splitting: got = " + splits.length); // check each split BitSet bits = new BitSet(length); for (int j = 0; j < splits.length; j++) { RecordReader<IntWritable, BytesWritable> reader = format.getRecordReader(splits[j], job, reporter); try { int count = 0; while (reader.next(key, value)) { // if (bits.get(key.get())) { // LOG.info("splits["+j+"]="+splits[j]+" : " + // key.get()); // LOG.info("@"+reader.getPos()); // } assertFalse("Key in multiple partitions.", bits.get(key.get())); bits.set(key.get()); count++; } // LOG.info("splits["+j+"]="+splits[j]+" count=" + // count); } finally { reader.close(); } } assertEquals("Some keys in no partition.", length, bits.cardinality()); } } }
public void testBinary() throws IOException, InterruptedException { Configuration conf = new Configuration(); Job job = new Job(conf); Path outdir = new Path(System.getProperty("test.build.data", "/tmp"), "outseq"); Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed); FileOutputFormat.setOutputPath(job, outdir); SequenceFileAsBinaryOutputFormat.setSequenceFileOutputKeyClass(job, IntWritable.class); SequenceFileAsBinaryOutputFormat.setSequenceFileOutputValueClass(job, DoubleWritable.class); SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true); SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK); BytesWritable bkey = new BytesWritable(); BytesWritable bval = new BytesWritable(); TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration()); OutputFormat<BytesWritable, BytesWritable> outputFormat = new SequenceFileAsBinaryOutputFormat(); OutputCommitter committer = outputFormat.getOutputCommitter(context); committer.setupJob(job); RecordWriter<BytesWritable, BytesWritable> writer = outputFormat.getRecordWriter(context); IntWritable iwritable = new IntWritable(); DoubleWritable dwritable = new DoubleWritable(); DataOutputBuffer outbuf = new DataOutputBuffer(); LOG.info("Creating data by SequenceFileAsBinaryOutputFormat"); try { for (int i = 0; i < RECORDS; ++i) { iwritable = new IntWritable(r.nextInt()); iwritable.write(outbuf); bkey.set(outbuf.getData(), 0, outbuf.getLength()); outbuf.reset(); dwritable = new DoubleWritable(r.nextDouble()); dwritable.write(outbuf); bval.set(outbuf.getData(), 0, outbuf.getLength()); outbuf.reset(); writer.write(bkey, bval); } } finally { writer.close(context); } committer.commitTask(context); committer.commitJob(job); InputFormat<IntWritable, DoubleWritable> iformat = new SequenceFileInputFormat<IntWritable, DoubleWritable>(); int count = 0; r.setSeed(seed); SequenceFileInputFormat.setInputPaths(job, outdir); LOG.info("Reading data by SequenceFileInputFormat"); for (InputSplit split : iformat.getSplits(job)) { RecordReader<IntWritable, DoubleWritable> reader = iformat.createRecordReader(split, context); MapContext<IntWritable, DoubleWritable, BytesWritable, BytesWritable> mcontext = new MapContextImpl<IntWritable, DoubleWritable, BytesWritable, BytesWritable>( job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), split); reader.initialize(split, mcontext); try { int sourceInt; double sourceDouble; while (reader.nextKeyValue()) { sourceInt = r.nextInt(); sourceDouble = r.nextDouble(); iwritable = reader.getCurrentKey(); dwritable = reader.getCurrentValue(); assertEquals( "Keys don't match: " + "*" + iwritable.get() + ":" + sourceInt + "*", sourceInt, iwritable.get()); assertTrue( "Vals don't match: " + "*" + dwritable.get() + ":" + sourceDouble + "*", Double.compare(dwritable.get(), sourceDouble) == 0); ++count; } } finally { reader.close(); } } assertEquals("Some records not found", RECORDS, count); }
@Override public final void run() { try { // before preparing the job localize // all the archives TaskAttemptID taskid = t.getTaskID(); LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir"); File jobCacheDir = null; if (conf.getJar() != null) { jobCacheDir = new File(new Path(conf.getJar()).getParent().toString()); } File workDir = new File( lDirAlloc .getLocalPathToRead( TaskTracker.getJobCacheSubdir() + Path.SEPARATOR + t.getJobID() + Path.SEPARATOR + t.getTaskID() + Path.SEPARATOR + MRConstants.WORKDIR, conf) .toString()); URI[] archives = DistributedCache.getCacheArchives(conf); URI[] files = DistributedCache.getCacheFiles(conf); FileStatus fileStatus; FileSystem fileSystem; Path localPath; String baseDir; if ((archives != null) || (files != null)) { if (archives != null) { String[] archivesTimestamps = DistributedCache.getArchiveTimestamps(conf); Path[] p = new Path[archives.length]; for (int i = 0; i < archives.length; i++) { fileSystem = FileSystem.get(archives[i], conf); fileStatus = fileSystem.getFileStatus(new Path(archives[i].getPath())); String cacheId = DistributedCache.makeRelative(archives[i], conf); String cachePath = TaskTracker.getCacheSubdir() + Path.SEPARATOR + cacheId; if (lDirAlloc.ifExists(cachePath, conf)) { localPath = lDirAlloc.getLocalPathToRead(cachePath, conf); } else { localPath = lDirAlloc.getLocalPathForWrite(cachePath, fileStatus.getLen(), conf); } baseDir = localPath.toString().replace(cacheId, ""); p[i] = DistributedCache.getLocalCache( archives[i], conf, new Path(baseDir), fileStatus, true, Long.parseLong(archivesTimestamps[i]), new Path(workDir.getAbsolutePath()), false); } DistributedCache.setLocalArchives(conf, stringifyPathArray(p)); } if ((files != null)) { String[] fileTimestamps = DistributedCache.getFileTimestamps(conf); Path[] p = new Path[files.length]; for (int i = 0; i < files.length; i++) { fileSystem = FileSystem.get(files[i], conf); fileStatus = fileSystem.getFileStatus(new Path(files[i].getPath())); String cacheId = DistributedCache.makeRelative(files[i], conf); String cachePath = TaskTracker.getCacheSubdir() + Path.SEPARATOR + cacheId; if (lDirAlloc.ifExists(cachePath, conf)) { localPath = lDirAlloc.getLocalPathToRead(cachePath, conf); } else { localPath = lDirAlloc.getLocalPathForWrite(cachePath, fileStatus.getLen(), conf); } baseDir = localPath.toString().replace(cacheId, ""); p[i] = DistributedCache.getLocalCache( files[i], conf, new Path(baseDir), fileStatus, false, Long.parseLong(fileTimestamps[i]), new Path(workDir.getAbsolutePath()), false); } DistributedCache.setLocalFiles(conf, stringifyPathArray(p)); } Path localTaskFile = new Path(t.getJobFile()); FileSystem localFs = FileSystem.getLocal(conf); localFs.delete(localTaskFile, true); OutputStream out = localFs.create(localTaskFile); try { conf.writeXml(out); } finally { out.close(); } } if (!prepare()) { return; } String sep = System.getProperty("path.separator"); StringBuffer classPath = new StringBuffer(); // start with same classpath as parent process classPath.append(System.getProperty("java.class.path")); classPath.append(sep); if (!workDir.mkdirs()) { if (!workDir.isDirectory()) { LOG.fatal("Mkdirs failed to create " + workDir.toString()); } } String jar = conf.getJar(); if (jar != null) { // if jar exists, it into workDir File[] libs = new File(jobCacheDir, "lib").listFiles(); if (libs != null) { for (int i = 0; i < libs.length; i++) { classPath.append(sep); // add libs from jar to classpath classPath.append(libs[i]); } } classPath.append(sep); classPath.append(new File(jobCacheDir, "classes")); classPath.append(sep); classPath.append(jobCacheDir); } // include the user specified classpath // archive paths Path[] archiveClasspaths = DistributedCache.getArchiveClassPaths(conf); if (archiveClasspaths != null && archives != null) { Path[] localArchives = DistributedCache.getLocalCacheArchives(conf); if (localArchives != null) { for (int i = 0; i < archives.length; i++) { for (int j = 0; j < archiveClasspaths.length; j++) { if (archives[i].getPath().equals(archiveClasspaths[j].toString())) { classPath.append(sep); classPath.append(localArchives[i].toString()); } } } } } // file paths Path[] fileClasspaths = DistributedCache.getFileClassPaths(conf); if (fileClasspaths != null && files != null) { Path[] localFiles = DistributedCache.getLocalCacheFiles(conf); if (localFiles != null) { for (int i = 0; i < files.length; i++) { for (int j = 0; j < fileClasspaths.length; j++) { if (files[i].getPath().equals(fileClasspaths[j].toString())) { classPath.append(sep); classPath.append(localFiles[i].toString()); } } } } } classPath.append(sep); classPath.append(workDir); // Build exec child jmv args. Vector<String> vargs = new Vector<String>(8); File jvm = // use same jvm as parent new File(new File(System.getProperty("java.home"), "bin"), "java"); vargs.add(jvm.toString()); // Add child (task) java-vm options. // // The following symbols if present in mapred.child.java.opts value are // replaced: // + @taskid@ is interpolated with value of TaskID. // Other occurrences of @ will not be altered. // // Example with multiple arguments and substitutions, showing // jvm GC logging, and start of a passwordless JVM JMX agent so can // connect with jconsole and the likes to watch child memory, threads // and get thread dumps. // // <property> // <name>mapred.child.java.opts</name> // <value>-verbose:gc -Xloggc:/tmp/@[email protected] \ // -Dcom.sun.management.jmxremote.authenticate=false \ // -Dcom.sun.management.jmxremote.ssl=false \ // </value> // </property> // String javaOpts = conf.get("mapred.child.java.opts", "-Xmx200m"); javaOpts = javaOpts.replace("@taskid@", taskid.toString()); String[] javaOptsSplit = javaOpts.split(" "); // Add java.library.path; necessary for loading native libraries. // // 1. To support native-hadoop library i.e. libhadoop.so, we add the // parent processes' java.library.path to the child. // 2. We also add the 'cwd' of the task to it's java.library.path to help // users distribute native libraries via the DistributedCache. // 3. The user can also specify extra paths to be added to the // java.library.path via mapred.child.java.opts. // String libraryPath = System.getProperty("java.library.path"); if (libraryPath == null) { libraryPath = workDir.getAbsolutePath(); } else { libraryPath += sep + workDir; } boolean hasUserLDPath = false; for (int i = 0; i < javaOptsSplit.length; i++) { if (javaOptsSplit[i].startsWith("-Djava.library.path=")) { javaOptsSplit[i] += sep + libraryPath; hasUserLDPath = true; break; } } if (!hasUserLDPath) { vargs.add("-Djava.library.path=" + libraryPath); } for (int i = 0; i < javaOptsSplit.length; i++) { vargs.add(javaOptsSplit[i]); } // add java.io.tmpdir given by mapred.child.tmp String tmp = conf.get("mapred.child.tmp", "./tmp"); Path tmpDir = new Path(tmp); // if temp directory path is not absolute // prepend it with workDir. if (!tmpDir.isAbsolute()) { tmpDir = new Path(workDir.toString(), tmp); } FileSystem localFs = FileSystem.getLocal(conf); if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()) { throw new IOException("Mkdirs failed to create " + tmpDir.toString()); } vargs.add("-Djava.io.tmpdir=" + tmpDir.toString()); // Add classpath. vargs.add("-classpath"); vargs.add(classPath.toString()); // Setup the log4j prop long logSize = TaskLog.getTaskLogLength(conf); vargs.add( "-Dhadoop.log.dir=" + new File(System.getProperty("hadoop.log.dir")).getAbsolutePath()); vargs.add("-Dhadoop.root.logger=INFO,TLA"); vargs.add("-Dhadoop.tasklog.taskid=" + taskid); vargs.add("-Dhadoop.tasklog.totalLogFileSize=" + logSize); if (conf.getProfileEnabled()) { if (conf.getProfileTaskRange(t.isMapTask()).isIncluded(t.getPartition())) { File prof = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.PROFILE); vargs.add(String.format(conf.getProfileParams(), prof.toString())); } } // Add main class and its arguments vargs.add(Child.class.getName()); // main of Child // pass umbilical address InetSocketAddress address = tracker.getTaskTrackerReportAddress(); vargs.add(address.getAddress().getHostAddress()); vargs.add(Integer.toString(address.getPort())); vargs.add(taskid.toString()); // pass task identifier String pidFile = null; if (tracker.isTaskMemoryManagerEnabled()) { pidFile = lDirAlloc .getLocalPathForWrite( (TaskTracker.getPidFilesSubdir() + Path.SEPARATOR + taskid), this.conf) .toString(); } // set memory limit using ulimit if feasible and necessary ... String[] ulimitCmd = Shell.getUlimitMemoryCommand(conf); List<String> setup = null; if (ulimitCmd != null) { setup = new ArrayList<String>(); for (String arg : ulimitCmd) { setup.add(arg); } } // Set up the redirection of the task's stdout and stderr streams File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT); File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR); stdout.getParentFile().mkdirs(); tracker.getTaskTrackerInstrumentation().reportTaskLaunch(taskid, stdout, stderr); Map<String, String> env = new HashMap<String, String>(); StringBuffer ldLibraryPath = new StringBuffer(); ldLibraryPath.append(workDir.toString()); String oldLdLibraryPath = null; oldLdLibraryPath = System.getenv("LD_LIBRARY_PATH"); if (oldLdLibraryPath != null) { ldLibraryPath.append(sep); ldLibraryPath.append(oldLdLibraryPath); } env.put("LD_LIBRARY_PATH", ldLibraryPath.toString()); jvmManager.launchJvm( this, jvmManager.constructJvmEnv( setup, vargs, stdout, stderr, logSize, workDir, env, pidFile, conf)); synchronized (lock) { while (!done) { lock.wait(); } } tracker.getTaskTrackerInstrumentation().reportTaskEnd(t.getTaskID()); if (exitCodeSet) { if (!killed && exitCode != 0) { if (exitCode == 65) { tracker.getTaskTrackerInstrumentation().taskFailedPing(t.getTaskID()); } throw new IOException("Task process exit with nonzero status of " + exitCode + "."); } } } catch (FSError e) { LOG.fatal("FSError", e); try { tracker.fsError(t.getTaskID(), e.getMessage()); } catch (IOException ie) { LOG.fatal(t.getTaskID() + " reporting FSError", ie); } } catch (Throwable throwable) { LOG.warn(t.getTaskID() + " Child Error", throwable); ByteArrayOutputStream baos = new ByteArrayOutputStream(); throwable.printStackTrace(new PrintStream(baos)); try { tracker.reportDiagnosticInfo(t.getTaskID(), baos.toString()); } catch (IOException e) { LOG.warn(t.getTaskID() + " Reporting Diagnostics", e); } } finally { try { URI[] archives = DistributedCache.getCacheArchives(conf); URI[] files = DistributedCache.getCacheFiles(conf); if (archives != null) { for (int i = 0; i < archives.length; i++) { DistributedCache.releaseCache(archives[i], conf); } } if (files != null) { for (int i = 0; i < files.length; i++) { DistributedCache.releaseCache(files[i], conf); } } } catch (IOException ie) { LOG.warn("Error releasing caches : Cache files might not have been cleaned up"); } tracker.reportTaskFinished(t.getTaskID(), false); if (t.isMapTask()) { tracker.addFreeMapSlot(); } else { tracker.addFreeReduceSlot(); } } }
/** * Service. * * @param o the o * @param session the session */ void service(IoBuffer o, IoSession session) { try { // System.out.println(o.remaining() + "/" + o.capacity()); session.setAttribute("last", System.currentTimeMillis()); SimpleIoBuffer in = (SimpleIoBuffer) session.getAttribute("buf"); if (in == null) { in = SimpleIoBuffer.create(4096); session.setAttribute("buf", in); } byte[] data = new byte[o.remaining()]; o.get(data); in.append(data); // log.debug("recv: " + data.length + ", " + // session.getRemoteAddress()); while (in.length() > 5) { in.mark(); /** * Byte 1: head of the package<br> * bit 7-6: "01", indicator of MDC<br> * bit 5: encrypt indicator, "0": no; "1": encrypted<br> * bit 4: zip indicator, "0": no, "1": ziped<br> * bit 0-3: reserved<br> * Byte 2-5: length of data<br> * Byte[…]: data array<br> */ byte head = in.read(); /** test the head indicator, if not correct close it */ if ((head & 0xC0) != 0x40) { log.info("flag is not correct! flag:" + head + ",from: " + session.getRemoteAddress()); session.close(true); return; } int len = in.getInt(); if (len <= 0 || len > MAX_SIZE) { log.error( "mdcconnector.Wrong lendth: " + len + "/" + MAX_SIZE + " - " + session.getRemoteAddress()); session.close(true); break; } if (in.length() < len) { in.reset(); break; } else { // do it // log.info("stub.package.size: " + len); byte[] b = new byte[len]; in.read(b); if (TConn.DEBUG) { log.debug("recv: " + Bean.toString(b)); } /** test the zip flag */ if ((head & 0x10) > 0) { b = Zip.unzip(b); } final TConn d = (TConn) session.getAttribute("conn"); if (d != null) { /** test the encrypted flag */ if ((head & 0x20) > 0) { b = DES.decode(b, d.deskey); } final byte[] bb = b; /** test if the packet is for mdc or app */ new WorkerTask() { @Override public void onExecute() { d.process(bb); } }.schedule(0); session.setAttribute("last", System.currentTimeMillis()); } } } } catch (Throwable e) { log.error("closing stub: " + session.getRemoteAddress(), e); session.close(true); } }
public TempFile(File file) { this.file = file; created = System.currentTimeMillis(); }
public boolean getOld() { long now = System.currentTimeMillis() - created; float elapsedmins = now / (60 * 1000F); return elapsedmins > DELETE_WAIT; }
private void handleLoginPost( Request request, HttpServletResponse httpServletResponse, boolean secured) throws Exception { String userId = request.getParameter(PARAM_USER_ID); String password = request.getParameter(PARAM_PASSWORD); String rememberAccountStr = request.getParameter(PARAM_REMEMBER_ACCOUNT); boolean rememberAccount = Boolean.parseBoolean(rememberAccountStr); LoginInfo.SessionInfo sessionInfo = UserHelpers.getSessionInfo(request); logOut(sessionInfo.browserId); User user = userDb.get(userId); if (user == null) { WebUtils.redirectToError("User " + userId + " not found", request, httpServletResponse); return; } if (!user.checkPassword(password)) { WebUtils.redirectToError("Invalid password", request, httpServletResponse); return; } if (!user.active) { WebUtils.redirectToError( "Account for User " + userId + " needs to be activated", request, httpServletResponse); return; } LOG.info("Logged in user " + userId); sessionInfo.sessionId = null; if (sessionInfo.browserId == null) { sessionInfo.browserId = getRandomId(); } else { for (LoginInfo loginInfo : loginInfoDb.getLoginsForBrowser(sessionInfo.browserId)) { if (userId.equals(loginInfo.userId)) { sessionInfo.sessionId = loginInfo.sessionId; break; } } } long expireOn = System.currentTimeMillis() + Config.getConfig().loginExpireInterval; if (sessionInfo.sessionId == null) { sessionInfo.sessionId = getRandomId(); Config config = Config.getConfig(); loginInfoDb.add( new LoginInfo( sessionInfo.browserId, sessionInfo.sessionId, userId, expireOn, rememberAccount, config.defaultStyle, config.defaultItemsPerPage, config.defaultFeedDateFormat)); LOG.info(String.format("Logging in in a new session. User: %s", user)); } else { loginInfoDb.updateExpireTime(sessionInfo.browserId, sessionInfo.sessionId, expireOn); LOG.info(String.format("Logging in in an existing session. User: %s", user)); } WebUtils.saveCookies( httpServletResponse, secured, sessionInfo.browserId, sessionInfo.sessionId); httpServletResponse.sendRedirect("/"); }