@Override public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception { Object usrLogin = request.getSession().getAttribute(Constants.SESS_USER_KEY); String sevPath = request.getServletPath(); logger.info("ServletPath: " + sevPath); String rootPath = ""; if (!StringUtil.isBlankOrNull(sevPath) && !sevPath.equals("/" + showLogin) && !sevPath.equals("/" + login)) { int len = sevPath.split("/").length; for (int i = 2; i < len; i++) { rootPath += "../"; } if (usrLogin == null) { // response.sendRedirect(rootPath+showLogin); StringBuffer toLoginScript = new StringBuffer(); toLoginScript.append("<script type=\"text/javascript\">"); toLoginScript.append("top.window.location=\"" + rootPath + showLogin + "\""); toLoginScript.append("</script>"); logger.info(toLoginScript); PrintWriter writer = response.getWriter(); writer.write(toLoginScript.toString()); writer.flush(); } } return true; }
void processPage(String pageUri) { String search = "href=\""; try { log.info("Calling to Google: " + pageUri); String inputString = UrlUtils.getURL(pageUri); log.info(inputString); int next = 0; Pattern cite = Pattern.compile("<cite>(.*?)</cite>"); Matcher matcher = cite.matcher(inputString); while (matcher.find()) { String newURI = "http://" + matcher .group(1) .replaceAll("\"", "") .replaceAll("<b>|</b>", "") .replaceAll("[ \\t\\n\\r]+", "") .trim(); log.info(newURI); profiles.addDeviceIfNotAlreadyKnown(newURI); } } catch (Exception e) { log.error(e.toString(), e); System.exit(0); } }
// TODO check if minimum required fields are set? @Override public int sendMail() { int sendResult = -999; if (mail != null) { LOGGER.trace( "Sending Mail: \nfrom = '" + mail.getFrom() + "'\n" + "replyTo='" + mail.getTo() + "'\n" + "\n" + mail); MailSenderPluginApi mailPlugin = (MailSenderPluginApi) getContext().getWiki().getPluginApi("mailsender", getContext()); sendResult = mailPlugin.sendMail(mail, getMailConfiguration()); LOGGER.info( "Sent Mail from '" + mail.getFrom() + "' to '" + mail.getTo() + "'. Result was '" + sendResult + "'. Time: " + Calendar.getInstance().getTimeInMillis()); } else { LOGGER.info( "Mail Object is null. Send result was '" + sendResult + "'. Time: " + Calendar.getInstance().getTimeInMillis()); } return sendResult; }
/** Generates JMS messages */ public void generateAndSendMessages(final CacheFlushMessageJms cacheFlushMessageJms) { try { final String valueJMSMessage = xmlMapper.getXmlMapper().writeValueAsString(cacheFlushMessageJms); if (StringUtils.isNotEmpty(valueJMSMessage)) { jmsTemplate.send( new MessageCreator() { public Message createMessage(Session session) throws JMSException { TextMessage message = session.createTextMessage(valueJMSMessage); if (logger.isDebugEnabled()) { logger.info("Sending JMS message: " + valueJMSMessage); } return message; } }); } else { logger.warn("Cache Flush Message Jms Message is empty"); } } catch (JmsException e) { logger.error("Exception during create/send message process"); } catch (JsonProcessingException e) { logger.error("Exception during build message process"); } }
/** Searches for the node or nodes that match the path element for the given parent node */ private List<NodeRef> getDirectDescendents(NodeRef pathRootNodeRef, String pathElement) { if (logger.isDebugEnabled()) { logger.debug( "Getting direct descendents: \n" + " Path Root: " + pathRootNodeRef + "\n" + " Path Element: " + pathElement); } List<NodeRef> results = null; // if this contains no wildcards, then we can fasttrack it if (!WildCard.containsWildcards(pathElement)) { // a specific name is required NodeRef foundNodeRef = fileFolderService.searchSimple(pathRootNodeRef, pathElement); if (foundNodeRef == null) { results = Collections.emptyList(); } else { results = Collections.singletonList(foundNodeRef); } } else { // escape for the Lucene syntax search String escapedPathElement = SearchLanguageConversion.convertCifsToLucene(pathElement); // do the lookup List<org.alfresco.service.cmr.model.FileInfo> childInfos = fileFolderService.search(pathRootNodeRef, escapedPathElement, false); // convert to noderefs results = new ArrayList<NodeRef>(childInfos.size()); for (org.alfresco.service.cmr.model.FileInfo info : childInfos) { results.add(info.getNodeRef()); } } // done return results; }
@Override public Cacheable getBlock( BlockCacheKey key, boolean caching, boolean repeat, boolean updateCacheMetrics) { CacheablePair contentBlock = backingMap.get(key); if (contentBlock == null) { if (!repeat && updateCacheMetrics) stats.miss(caching); return null; } if (updateCacheMetrics) stats.hit(caching); // If lock cannot be obtained, that means we're undergoing eviction. try { contentBlock.recentlyAccessed.set(System.nanoTime()); synchronized (contentBlock) { if (contentBlock.serializedData == null) { // concurrently evicted LOG.warn("Concurrent eviction of " + key); return null; } return contentBlock.deserializer.deserialize( contentBlock.serializedData.asReadOnlyBuffer()); } } catch (Throwable t) { LOG.error("Deserializer threw an exception. This may indicate a bug.", t); return null; } }
private void postNext() throws HibernateException, SQLException { this.hasNext = rs.next(); if (!hasNext) { log.debug("exhausted results"); close(); } else { log.debug("retrieving next results"); if (single) { nextResult = types[0].nullSafeGet(rs, names[0], sess, null); } else { Object[] nextResults = new Object[types.length]; for (int i = 0; i < types.length; i++) { nextResults[i] = types[i].nullSafeGet(rs, names[i], sess, null); } nextResult = nextResults; } if (holderConstructor != null) { try { if (nextResult == null || !nextResult.getClass().isArray()) { nextResult = holderConstructor.newInstance(new Object[] {nextResult}); } else { nextResult = holderConstructor.newInstance((Object[]) nextResult); } } catch (Exception e) { throw new QueryException( "Could not instantiate: " + holderConstructor.getDeclaringClass(), e); } } } }
/** * Given the map taskAttemptID, returns the TaskAttemptInfo. Deconstructs the map's taskAttemptID * and looks up the jobStory with the parts taskType, id of task, id of task attempt. * * @param taskTracker tasktracker * @param taskAttemptID task-attempt * @return TaskAttemptInfo for the map task-attempt */ @SuppressWarnings("deprecation") private synchronized TaskAttemptInfo getMapTaskAttemptInfo( TaskTracker taskTracker, TaskAttemptID taskAttemptID) { assert (taskAttemptID.isMap()); JobID jobid = (JobID) taskAttemptID.getJobID(); assert (jobid == getJobID()); // Get splits for the TaskAttempt RawSplit split = splits[taskAttemptID.getTaskID().getId()]; int locality = getClosestLocality(taskTracker, split); TaskID taskId = taskAttemptID.getTaskID(); if (!taskId.isMap()) { assert false : "Task " + taskId + " is not MAP :"; } TaskAttemptInfo taskAttemptInfo = jobStory.getMapTaskAttemptInfoAdjusted(taskId.getId(), taskAttemptID.getId(), locality); if (LOG.isDebugEnabled()) { LOG.debug( "get an attempt: " + taskAttemptID.toString() + ", state=" + taskAttemptInfo.getRunState() + ", runtime=" + ((taskId.isMap()) ? taskAttemptInfo.getRuntime() : ((ReduceTaskAttemptInfo) taskAttemptInfo).getReduceRuntime())); } return taskAttemptInfo; }
private void setScriptsNames(ToolBoxDTO toolBoxDTO, String barDir) throws BAMToolboxDeploymentException { String analyticsDir = barDir + File.separator + BAMToolBoxDeployerConstants.SCRIPTS_DIR; if (new File(analyticsDir).exists()) { ArrayList<String> scriptNames = getFilesInDirectory(analyticsDir); int i = 0; for (String aFile : scriptNames) { if (aFile.equalsIgnoreCase(BAMToolBoxDeployerConstants.ANALYZERS_PROPERTIES_FILE)) { scriptNames.remove(i); break; } i++; } if (scriptNames.size() == 0) { toolBoxDTO.setScriptsParentDirectory(null); log.warn("No scripts available in the specified directory"); } else { toolBoxDTO.setScriptsParentDirectory(analyticsDir); toolBoxDTO.setScriptNames(scriptNames); setCronForAnalyticScripts(toolBoxDTO, analyticsDir); } } else { log.warn("No Analytics found for toolbox :" + toolBoxDTO.getName()); } }
static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy( DatanodeID datanodeid, Configuration conf, int socketTimeout, boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException { final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname); InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr); if (LOG.isDebugEnabled()) { LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr); } // Since we're creating a new UserGroupInformation here, we know that no // future RPC proxies will be able to re-use the same connection. And // usages of this proxy tend to be one-off calls. // // This is a temporary fix: callers should really achieve this by using // RPC.stopProxy() on the resulting object, but this is currently not // working in trunk. See the discussion on HDFS-1965. Configuration confWithNoIpcIdle = new Configuration(conf); confWithNoIpcIdle.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); UserGroupInformation ticket = UserGroupInformation.createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString()); ticket.addToken(locatedBlock.getBlockToken()); return createClientDatanodeProtocolProxy( addr, ticket, confWithNoIpcIdle, NetUtils.getDefaultSocketFactory(conf), socketTimeout); }
protected Set<Class> processJarUrl(URL url, String basepath, Class clazz) throws IOException { Set<Class> set = new HashSet<Class>(); String path = url.getFile().substring(5, url.getFile().indexOf("!")); JarFile jar = new JarFile(path); for (Enumeration entries = jar.entries(); entries.hasMoreElements(); ) { JarEntry entry = (JarEntry) entries.nextElement(); if (entry.getName().startsWith(basepath) && entry.getName().endsWith(".class")) { try { String name = entry.getName(); // Ignore anonymous // TODO RM what about the other anonymous classes like $2, $3 ? if (name.contains("$1")) { continue; } URL classURL = classLoader.getResource(name); ClassReader reader = new ClassReader(classURL.openStream()); ClassScanner visitor = getScanner(clazz); reader.accept(visitor, 0); if (visitor.isMatch()) { Class c = loadClass(visitor.getClassName()); if (c != null) { set.add(c); } } } catch (Exception e) { if (logger.isDebugEnabled()) { Throwable t = ExceptionHelper.getRootException(e); logger.debug(String.format("%s: caused by: %s", e.toString(), t.toString())); } } } } return set; }
@Override public void deleteResource() throws Exception { log.info("delete resource: " + path + " ..."); Operation op = new Remove(address); ComplexResult res = connection.executeComplex(op); if (!res.isSuccess()) throw new IllegalArgumentException( "Delete for [" + path + "] failed: " + res.getFailureDescription()); if (path.contains("server-group")) { // This was a server group level deployment - TODO do we also need to remove the entry in // /deployments ? /* for (PROPERTY_VALUE val : address) { if (val.getKey().equals("deployment")) { ComplexResult res2 = connection.executeComplex(new Operation("remove",val.getKey(),val.getValue())); if (!res2.isSuccess()) throw new IllegalArgumentException("Removal of [" + path + "] falied : " + res2.getFailureDescription()); } } */ } log.info(" ... done"); }
public synchronized NetworkStatsItemBean[] getSearchItems() { if (null == queryString) { return null; } if (!isFreshSearch) { return searchItemBeans; } try { ItemType searchItems[] = getSearchResults(queryString); if (null == searchItems) { return null; } searchItemBeans = new NetworkStatsItemBean[searchItems.length]; for (int i = 0; i < searchItems.length; i++) { searchItemBeans[i] = new NetworkStatsItemBean(searchItems[i]); } isFreshSearch = false; Thread t = new Thread(new NetworkStatsItemDetailer(this, searchItemBeans)); t.start(); return searchItemBeans; } catch (Exception e) { if (log.isErrorEnabled()) { log.error("Failed to read the available search items because of " + e); } } return null; }
private Map<String, String> getPermissions(ContentEntity n) throws SDataException { Map<String, String> map = new HashMap<String, String>(); if (n instanceof ContentCollection) { map.put("read", String.valueOf(contentHostingService.allowGetCollection(n.getId()))); map.put("remove", String.valueOf(contentHostingService.allowRemoveCollection(n.getId()))); map.put("write", String.valueOf(contentHostingService.allowUpdateCollection(n.getId()))); String ref = n.getReference(); Reference reference = entityManager.newReference(n.getReference()); if (log.isDebugEnabled()) { log.debug("Got Reference " + reference + " for " + n.getReference()); } Collection<?> groups = reference.getAuthzGroups(); String user = sessionManager.getCurrentSessionUserId(); map.put( "admin", String.valueOf( authZGroupService.isAllowed( sessionManager.getCurrentSessionUserId(), AuthzGroupService.SECURE_UPDATE_AUTHZ_GROUP, groups))); } else { map.put("read", String.valueOf(contentHostingService.allowGetResource(n.getId()))); map.put("remove", String.valueOf(contentHostingService.allowRemoveResource(n.getId()))); map.put("write", String.valueOf(contentHostingService.allowUpdateResource(n.getId()))); } return map; }
/** Initialization procedure. Reads all faces-config.xml files and registers the beans found. */ public boolean init(ServletContext servletContext, ClassLoader classLoader) { // list of beans found List<FacesConfigEntry> facesConfigEntries = new ArrayList<FacesConfigEntry>(); // get the list of faces configuration files to process Set<URL> facesConfigs = getFacesConfigFiles(servletContext, classLoader); // process all configuration files for (URL url : facesConfigs) { processFacesConfig(url, facesConfigEntries); } // Create bean name lookup map from all entries found for (FacesConfigEntry entry : facesConfigEntries) { beanNameMap.put(entry.getBeanClass(), entry.getName()); } // debug statement containing number of classes found if (log.isDebugEnabled()) { log.debug("Found " + beanNameMap.size() + " bean names in faces configuration."); } // we will always enable this resolver return true; }
@Override public ResourceSchema getSchema(String location, Job job) throws IOException { if (!partitionKeysSet) { Set<String> keys = getPartitionColumns(location, job); if (!(keys == null || keys.size() == 0)) { // re-edit the pigSchema to contain the new partition keys. ResourceFieldSchema[] fields = pigSchema.getFields(); LOG.debug("Schema: " + Arrays.toString(fields)); ResourceFieldSchema[] newFields = Arrays.copyOf(fields, fields.length + keys.size()); int index = fields.length; for (String key : keys) { newFields[index++] = new ResourceFieldSchema(new FieldSchema(key, DataType.CHARARRAY)); } pigSchema.setFields(newFields); LOG.debug("Added partition fields: " + keys + " to loader schema"); LOG.debug("Schema is: " + Arrays.toString(newFields)); } partitionKeysSet = true; } return pigSchema; }
@Test public void testInheritedMethodsImplemented() throws Exception { int errors = 0; for (Method m : FileSystem.class.getDeclaredMethods()) { if (Modifier.isStatic(m.getModifiers()) || Modifier.isPrivate(m.getModifiers()) || Modifier.isFinal(m.getModifiers())) { continue; } try { MustNotImplement.class.getMethod(m.getName(), m.getParameterTypes()); try { HarFileSystem.class.getDeclaredMethod(m.getName(), m.getParameterTypes()); LOG.error("HarFileSystem MUST not implement " + m); errors++; } catch (NoSuchMethodException ex) { // Expected } } catch (NoSuchMethodException exc) { try { HarFileSystem.class.getDeclaredMethod(m.getName(), m.getParameterTypes()); } catch (NoSuchMethodException exc2) { LOG.error("HarFileSystem MUST implement " + m); errors++; } } } assertTrue((errors + " methods were not overridden correctly - see log"), errors <= 0); }
private void setJasperResourceNames(ToolBoxDTO toolBoxDTO, String barDir) throws BAMToolboxDeploymentException { String jasperDirectory = barDir + File.separator + BAMToolBoxDeployerConstants.DASHBOARD_DIR + File.separator + BAMToolBoxDeployerConstants.JASPER_DIR; if (new File(jasperDirectory).exists()) { toolBoxDTO.setJasperParentDirectory( barDir + File.separator + BAMToolBoxDeployerConstants.DASHBOARD_DIR + File.separator + BAMToolBoxDeployerConstants.JASPER_DIR); Properties properties = new Properties(); try { properties.load( new FileInputStream( barDir + File.separator + BAMToolBoxDeployerConstants.DASHBOARD_DIR + File.separator + BAMToolBoxDeployerConstants.JASPER_META_FILE)); setJasperTabAndJrxmlNames(toolBoxDTO, properties); toolBoxDTO.setDataSource(properties.getProperty(BAMToolBoxDeployerConstants.DATASOURCE)); toolBoxDTO.setDataSourceConfiguration( barDir + File.separator + BAMToolBoxDeployerConstants.DASHBOARD_DIR + File.separator + properties.getProperty(BAMToolBoxDeployerConstants.DATASOURCE_CONFIGURATION)); } catch (FileNotFoundException e) { log.error( "No " + BAMToolBoxDeployerConstants.JASPER_META_FILE + " found in dir:" + barDir + File.separator + BAMToolBoxDeployerConstants.DASHBOARD_DIR, e); throw new BAMToolboxDeploymentException( "No " + BAMToolBoxDeployerConstants.JASPER_META_FILE + " found in dir:" + barDir + File.separator + BAMToolBoxDeployerConstants.DASHBOARD_DIR, e); } catch (IOException e) { log.error(e.getMessage(), e); throw new BAMToolboxDeploymentException(e.getMessage(), e); } } else { toolBoxDTO.setJasperParentDirectory(null); toolBoxDTO.setJasperTabs(new ArrayList<JasperTabDTO>()); } }
/** * Given the reduce taskAttemptID, returns the TaskAttemptInfo. Deconstructs the reduce * taskAttemptID and looks up the jobStory with the parts taskType, id of task, id of task * attempt. * * @param taskTracker tasktracker * @param taskAttemptID task-attempt * @return TaskAttemptInfo for the reduce task-attempt */ private TaskAttemptInfo getReduceTaskAttemptInfo( TaskTracker taskTracker, TaskAttemptID taskAttemptID) { assert (!taskAttemptID.isMap()); TaskID taskId = taskAttemptID.getTaskID(); TaskType taskType; if (taskAttemptID.isMap()) { taskType = TaskType.MAP; } else { taskType = TaskType.REDUCE; } TaskAttemptInfo taskAttemptInfo = jobStory.getTaskAttemptInfo(taskType, taskId.getId(), taskAttemptID.getId()); if (LOG.isDebugEnabled()) { LOG.debug( "get an attempt: " + taskAttemptID.toString() + ", state=" + taskAttemptInfo.getRunState() + ", runtime=" + ((taskAttemptID.isMap()) ? taskAttemptInfo.getRuntime() : ((ReduceTaskAttemptInfo) taskAttemptInfo).getReduceRuntime())); } return taskAttemptInfo; }
/* * Recover file. * Try and open file in append mode. * Doing this, we get a hold of the file that crashed writer * was writing to. Once we have it, close it. This will * allow subsequent reader to see up to last sync. * NOTE: This is the same algorithm that HBase uses for file recovery * @param fs * @throws Exception */ private void recoverFile(final FileSystem fs) throws Exception { LOG.info("Recovering File Lease"); // set the soft limit to be 1 second so that the // namenode triggers lease recovery upon append request cluster.setLeasePeriod(1000, FSConstants.LEASE_HARDLIMIT_PERIOD); // Trying recovery int tries = 60; boolean recovered = false; FSDataOutputStream out = null; while (!recovered && tries-- > 0) { try { out = fs.append(file1); LOG.info("Successfully opened for appends"); recovered = true; } catch (IOException e) { LOG.info("Failed open for append, waiting on lease recovery"); try { Thread.sleep(1000); } catch (InterruptedException ex) { // ignore it and try again } } } if (out != null) { out.close(); } if (!recovered) { fail("Recovery should take < 1 min"); } LOG.info("Past out lease recovery"); }
@Override public void cacheBlock(BlockCacheKey blockName, Cacheable toBeCached) { ByteBuffer storedBlock; try { storedBlock = backingStore.alloc(toBeCached.getSerializedLength()); } catch (InterruptedException e) { LOG.warn("SlabAllocator was interrupted while waiting for block to become available"); LOG.warn(e); return; } CacheablePair newEntry = new CacheablePair(toBeCached.getDeserializer(), storedBlock); toBeCached.serialize(storedBlock); synchronized (this) { CacheablePair alreadyCached = backingMap.putIfAbsent(blockName, newEntry); if (alreadyCached != null) { backingStore.free(storedBlock); throw new RuntimeException("already cached " + blockName); } if (actionWatcher != null) { actionWatcher.onInsertion(blockName, this); } } newEntry.recentlyAccessed.set(System.nanoTime()); this.size.addAndGet(newEntry.heapSize()); }
protected static void assertEquals(Object theObject, Object theProperty) { if (!theObject.equals(theProperty)) { logger.error(String.valueOf(theObject) + " does not equal:" + String.valueOf(theProperty)); } else { logger.debug("Woohoo!"); } }
/** * Based on configured options, will either return a pre-existing proxy, generate a new proxy, or * perform an actual load. * * @return The result of the proxy/load operation. * @throws HibernateException */ protected Object proxyOrLoad( final LoadEvent event, final EntityPersister persister, final EntityKey keyToLoad, final LoadEventListener.LoadType options) throws HibernateException { if (log.isTraceEnabled()) { log.trace( "loading entity: " + MessageHelper.infoString( persister, event.getEntityId(), event.getSession().getFactory())); } if (!persister.hasProxy()) { // this class has no proxies (so do a shortcut) return load(event, persister, keyToLoad, options); } else { final PersistenceContext persistenceContext = event.getSession().getPersistenceContext(); // look for a proxy Object proxy = persistenceContext.getProxy(keyToLoad); if (proxy != null) { return returnNarrowedProxy(event, persister, keyToLoad, options, persistenceContext, proxy); } else { if (options.isAllowProxyCreation()) { return createProxyIfNecessary(event, persister, keyToLoad, options, persistenceContext); } else { // return a newly loaded object return load(event, persister, keyToLoad, options); } } } }
/** * Stop the proxy. Proxy must either implement {@link Closeable} or must have associated {@link * RpcInvocationHandler}. * * @param proxy the RPC proxy object to be stopped * @throws HadoopIllegalArgumentException if the proxy does not implement {@link Closeable} * interface or does not have closeable {@link InvocationHandler} */ public static void stopProxy(Object proxy) { if (proxy == null) { throw new HadoopIllegalArgumentException("Cannot close proxy since it is null"); } try { if (proxy instanceof Closeable) { ((Closeable) proxy).close(); return; } else { InvocationHandler handler = Proxy.getInvocationHandler(proxy); if (handler instanceof Closeable) { ((Closeable) handler).close(); return; } } } catch (IOException e) { LOG.error("Closing proxy or invocation handler caused exception", e); } catch (IllegalArgumentException e) { LOG.error("RPC.stopProxy called on non proxy: class=" + proxy.getClass().getName(), e); } // If you see this error on a mock object in a unit test you're // developing, make sure to use MockitoUtil.mockProtocol() to // create your mock. throw new HadoopIllegalArgumentException( "Cannot close proxy - is not Closeable or " + "does not provide closeable invocation handler " + proxy.getClass()); }
/** * Figure out the best string describing type to use. * * @param titleItemType the item's title * @param itemIntrospectItemType best guess based on structure * @param qmdItemType the type declared in metadata, if any * @return the string describing the item. */ private static String obtainTypeString( String titleItemType, String itemIntrospectItemType, String qmdItemType) { log.debug("qmdItemType: " + qmdItemType); log.debug("titleItemType: " + titleItemType); log.debug("itemIntrospectItemType: " + itemIntrospectItemType); // if we can't find any other approach String itemType = itemIntrospectItemType; // start with item title if (titleItemType != null) { if (isExactType(titleItemType)) { return titleItemType; } titleItemType = guessType(titleItemType); if (titleItemType != null) itemType = titleItemType; } // next try to figure out from qmd_itemtype metadata if (qmdItemType != null) { if (isExactType(qmdItemType)) { return qmdItemType; } qmdItemType = guessType(qmdItemType); if (qmdItemType != null) itemType = qmdItemType; } log.debug("returning itemType: " + itemType); return itemType; }
// Register protocol and its impl for rpc calls void registerProtocolAndImpl(RpcKind rpcKind, Class<?> protocolClass, Object protocolImpl) { String protocolName = RPC.getProtocolName(protocolClass); long version; try { version = RPC.getProtocolVersion(protocolClass); } catch (Exception ex) { LOG.warn("Protocol " + protocolClass + " NOT registered as cannot get protocol version "); return; } getProtocolImplMap(rpcKind) .put( new ProtoNameVer(protocolName, version), new ProtoClassProtoImpl(protocolClass, protocolImpl)); LOG.debug( "RpcKind = " + rpcKind + " Protocol Name = " + protocolName + " version=" + version + " ProtocolImpl=" + protocolImpl.getClass().getName() + " protocolClass=" + protocolClass.getName()); }
/** * List the statuses of the files/directories in the given path if the path is a directory. * * @param f given path * @return the statuses of the files/directories in the given path * @throws IOException */ @Override public FileStatus[] listStatus(Path f) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("SwiftFileSystem.listStatus for: " + f); } return store.listSubPaths(f); }
public void addKnownInput( String hostName, int port, InputAttemptIdentifier srcAttemptIdentifier, int srcPhysicalIndex) { String identifier = InputHost.createIdentifier(hostName, port); InputHost host = knownSrcHosts.get(identifier); if (host == null) { host = new InputHost(hostName, port, inputContext.getApplicationId(), srcPhysicalIndex); assert identifier.equals(host.getIdentifier()); InputHost old = knownSrcHosts.putIfAbsent(identifier, host); if (old != null) { host = old; } } if (LOG.isDebugEnabled()) { LOG.debug("Adding input: " + srcAttemptIdentifier + ", to host: " + host); } host.addKnownInput(srcAttemptIdentifier); lock.lock(); try { boolean added = pendingHosts.offer(host); if (!added) { String errorMessage = "Unable to add host: " + host.getIdentifier() + " to pending queue"; LOG.error(errorMessage); throw new TezUncheckedException(errorMessage); } wakeLoop.signal(); } finally { lock.unlock(); } }
/* * check to see if hit the limit for max # completed apps kept */ protected synchronized void checkAppNumCompletedLimit() { // check apps kept in state store. while (completedAppsInStateStore > this.maxCompletedAppsInStateStore) { ApplicationId removeId = completedApps.get(completedApps.size() - completedAppsInStateStore); RMApp removeApp = rmContext.getRMApps().get(removeId); LOG.info( "Max number of completed apps kept in state store met:" + " maxCompletedAppsInStateStore = " + maxCompletedAppsInStateStore + ", removing app " + removeApp.getApplicationId() + " from state store."); rmContext.getStateStore().removeApplication(removeApp); completedAppsInStateStore--; } // check apps kept in memorty. while (completedApps.size() > this.maxCompletedAppsInMemory) { ApplicationId removeId = completedApps.remove(); LOG.info( "Application should be expired, max number of completed apps" + " kept in memory met: maxCompletedAppsInMemory = " + this.maxCompletedAppsInMemory + ", removing app " + removeId + " from memory: "); rmContext.getRMApps().remove(removeId); this.applicationACLsManager.removeApplication(removeId); } }
@Override public void fetchFailed( String host, InputAttemptIdentifier srcAttemptIdentifier, boolean connectFailed) { // TODO NEWTEZ. Implement logic to report fetch failures after a threshold. // For now, reporting immediately. LOG.info( "Fetch failed for src: " + srcAttemptIdentifier + "InputIdentifier: " + srcAttemptIdentifier + ", connectFailed: " + connectFailed); failedShufflesCounter.increment(1); if (srcAttemptIdentifier == null) { String message = "Received fetchFailure for an unknown src (null)"; LOG.fatal(message); inputContext.fatalError(null, message); } else { InputReadErrorEvent readError = new InputReadErrorEvent( "Fetch failure while fetching from " + TezRuntimeUtils.getTaskAttemptIdentifier( inputContext.getSourceVertexName(), srcAttemptIdentifier.getInputIdentifier().getInputIndex(), srcAttemptIdentifier.getAttemptNumber()), srcAttemptIdentifier.getInputIdentifier().getInputIndex(), srcAttemptIdentifier.getAttemptNumber()); List<Event> failedEvents = Lists.newArrayListWithCapacity(1); failedEvents.add(readError); inputContext.sendEvents(failedEvents); } }