@Test @SuppressWarnings("deprecation") public void testLog4j1Logging() { final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); logger.trace("Foobar TRACE"); AppenderForTests.hasNoLastEvent("at Trace level"); assertFalse(logger.isTraceEnabled()); logger.debug("Foobar DEBUG"); AppenderForTests.hasNoLastEvent("at Debug level"); assertFalse(logger.isDebugEnabled()); logger.info("Foobar INFO"); AppenderForTests.hasNoLastEvent("at Info level"); assertFalse(logger.isInfoEnabled()); logger.warn("Foobar WARN"); AppenderForTests.hasLastEvent("at Warn level"); assertTrue(logger.isEnabledFor(Priority.WARN)); logger.error("Foobar ERROR"); AppenderForTests.hasLastEvent("at Error level"); assertTrue(logger.isEnabledFor(Priority.ERROR)); logger.fatal("Foobar FATAL"); AppenderForTests.hasLastEvent("at Fatal level"); assertTrue(logger.isEnabledFor(Priority.FATAL)); }
public static SNode setAttribute(SNode node, IAttributeDescriptor descriptor, SNode value) { Iterable<SNode> oldlist = getAttributes(node, descriptor); if (Sequence.fromIterable(oldlist).isEmpty() && (value != null)) { addAttribute(node, descriptor, value); } else if ((value == null)) { deleteAttribute(node, descriptor, value); } else { if (Sequence.fromIterable(oldlist).count() > 1) { if (LOG.isEnabledFor(Level.ERROR)) { LOG.error( Sequence.fromIterable(oldlist).count() + " nodes match signle value attribute during attribute replacing. Only the first found node replaced."); } if (LOG.isEnabledFor(Level.ERROR)) { LOG.error( " node=" + node.getReference() + "; attribute=" + ((String) BHReflection.invoke( SNodeOperations.asSConcept( SNodeOperations.getConcept(Sequence.fromIterable(oldlist).first())), SMethodTrimmedId.create("getRole", null, "1653mnvAgoG"))) + " (" + Sequence.fromIterable(oldlist).first().getNodeId() + ")"); } } SNodeOperations.replaceWithAnother(Sequence.fromIterable(oldlist).first(), value); descriptor.update(value); } return value; }
public static SNode getAttribute(SNode node, IAttributeDescriptor descriptor) { Iterable<SNode> list = getAttributes(node, descriptor); if (Sequence.fromIterable(list).isEmpty()) { return null; } if (Sequence.fromIterable(list).count() > 1) { if (LOG.isEnabledFor(Level.ERROR)) { LOG.error( Sequence.fromIterable(list).count() + " nodes match single value attribute. The first found node returned as the value."); } if (LOG.isEnabledFor(Level.ERROR)) { LOG.error( " node=" + node.getReference() + "; attribute=" + ((String) BHReflection.invoke( SNodeOperations.asSConcept( SNodeOperations.getConcept(Sequence.fromIterable(list).first())), SMethodTrimmedId.create("getRole", null, "1653mnvAgoG"))) + " (" + Sequence.fromIterable(list).first().getNodeId() + ")"); } } return Sequence.fromIterable(list).first(); }
/** * remove a SRDI cache entry * * @param peer peer id we send the request, null for sending to all * @param id peer id of the SRDI route that we want to remove from the cache */ protected void removeSrdi(String peer, PeerID id) { SrdiMessage srdiMsg; try { srdiMsg = new SrdiMessageImpl( group.getPeerID(), 1, // only one hop "route", id.toString(), null, // 0 means remove new Long(0).longValue()); if (LOG.isEnabledFor(Level.DEBUG)) { LOG.debug("sending a router SRDI message delete route " + id); } if (peer == null) { PeerID destPeer = srdi.getReplicaPeer(id.toString()); // don't push anywhere if we do not have replica // or we are trying to push to ouself if (destPeer != null && (!destPeer.equals(localPeerId))) { srdi.pushSrdi(destPeer, srdiMsg); } } } catch (Exception e) { if (LOG.isEnabledFor(Level.DEBUG)) { LOG.debug("Removing srdi entry failed", e); } } }
/** * Send a message to the peer which is represented by the current PeerViewElement. * * @param msg the message to send * @param serviceName the service name on the destination peer to which the message will be * demultiplexed * @param serviceParam the service param on the destination peer to which the message will be * demultiplexed * @return true if the message was successfully handed off to the endpoint for delivery, false * otherwise */ public boolean sendMessage(Message msg, String serviceName, String serviceParam) { if (throttling) { if (LOG.isEnabledFor(Level.WARN)) { LOG.warn("Declining to send -- throttling on " + this); } return false; } Messenger sendVia = getCachedMessenger(); if (null == sendVia) { // There is nothing really we can do. if (LOG.isEnabledFor(Level.WARN)) { LOG.warn("Could not get messenger for " + getDestAddress()); } OutgoingMessageEvent event = new OutgoingMessageEvent( msg, new IOException("Couldn't get messenger for " + getDestAddress())); messageSendFailed(event); return false; } sendVia.sendMessage(msg, serviceName, serviceParam, this); return true; }
/** * Execute method of the rule. This holds the rule conditions and actions. The required objects * are initialized through init() method. return - execution result of rule either SUCCESS or * FAILURE */ public static RulesResult execute(RulesContext ctx, String ruleId) throws RIFPOJORulesException { // Construct rules result object RulesResult rulesResult = new RulesResult(ruleId); String recoveryReason = null; boolean result = false; Double recoveryAmount = 0d; if (ctx != null) { List<Object> inputObjs = ctx.getContextObject(); recoveryReason = (String) inputObjs.get(0); if (recoveryReason == null) { if (LOG.isEnabledFor(Level.ERROR)) { LOG.error( "Input Object recoveryReason is null during intialization for the rule " + ruleId); } throw new RIFPOJORulesException( "Unable to initialize the required rules objects for rule " + ruleId); } result = (Boolean) inputObjs.get(1); recoveryAmount = (Double) inputObjs.get(2); if (recoveryAmount == null) { if (LOG.isEnabledFor(Level.ERROR)) { LOG.error( "Input Object recoveryAmount is null during intialization for the rule " + ruleId); } throw new RIFPOJORulesException( "Unable to initialize the required rules objects for rule " + ruleId); } } else { if (LOG.isEnabledFor(Level.ERROR)) { LOG.error("Input Context Object is null during intialization for the rule " + ruleId); } throw new RIFPOJORulesException( "Unable to initialize the required rules objects for rule " + ruleId); } if (LOG.isInfoEnabled()) { LOG.info("Executing the actions for the rule " + ruleId); } // EXTRACT_START TPL0717.0001.01 if ('D' == recoveryReason.charAt(0) || 'S' == recoveryReason.charAt(0) || "R99".equalsIgnoreCase(recoveryReason)) { if (recoveryAmount != null && !(recoveryAmount.doubleValue() == 0)) { // Defect ESPRD00684744 result = true; } } // EXTRACT_END rulesResult.setReturnBooleanValue(result); rulesResult.setRuleStatus(true); return rulesResult; }
Log4JLogger(String className) { logger = LogManager.getLogger(className); isFatalEnabled = logger.isEnabledFor(Level.FATAL); isErrorEnabled = logger.isEnabledFor(Level.ERROR); isWarnEnabled = logger.isEnabledFor(Level.WARN); isInfoEnabled = logger.isEnabledFor(Level.INFO); isDebugEnabled = logger.isEnabledFor(Level.DEBUG); if (hasTrace) isTraceEnabled = logger.isEnabledFor(Level.TRACE); }
/* * push all srdi entries to the rednezvous SRDI cache (new connection) *@param all if true push all entries, otherwise just deltas */ protected void pushSrdi(String peer, boolean all) { SrdiMessage srdiMsg; Vector routeIx = new Vector(); // 10182002tra:Route info don't expire unless the peer disappears // This approach is used to limit the SRDI traffic. The key // point here is that SRDI is used to tell a peer that another // has a route to the destination it is looking for. The information // that SRDI cache is not so much the specific route info but rather // the fact that a peer has knowledge of a route to another peer // We don't want to update the SRDI cache on every route update. // The SRDI cache will be flushed when the peer disconnect from // the rendezvous. // We cannot support concurrent modification of the map while we // do that: we must synchronize... for (Iterator each = router.getAllRoutedRouteAddresses(); each.hasNext(); ) { PeerID pid = router.addr2pid((EndpointAddress) each.next()); SrdiMessage.Entry entry = new SrdiMessage.Entry(pid.toString(), "", Long.MAX_VALUE); routeIx.addElement(entry); } try { // check if we have anything to send if (routeIx.size() == 0) { return; } srdiMsg = new SrdiMessageImpl( group.getPeerID(), 1, // one hop "route", routeIx); if (LOG.isEnabledFor(Level.DEBUG)) { LOG.debug("Sending a SRDI messsage of [All=" + all + "] routes"); } // this will replicate entry to the SRDI replica peers srdi.replicateEntries(srdiMsg); } catch (Exception e) { if (LOG.isEnabledFor(Level.WARN)) { LOG.warn("SRDI Push failed", e); } } }
/** * Register a class with the factory from its class name. We override the standard implementation * to get the mime type from the class and use that as the key to register the class with the * factory. * * @param className The class name which will be regiestered. * @return boolean true if the class was registered otherwise false. */ protected boolean registerAssoc(String className) { boolean registeredSomething = false; // LOG.debug( "Registering : " + className ); try { Class docClass = Class.forName(className); Instantiator instantiator = (Instantiator) (docClass.getField("INSTANTIATOR").get(null)); MimeMediaType[] mimeTypes = instantiator.getSupportedMimeTypes(); for (int eachType = 0; eachType < mimeTypes.length; eachType++) { // LOG.debug( " Registering Type : " + mimeTypes[eachType].getMimeMediaType() ); registeredSomething |= registerInstantiator(mimeTypes[eachType], instantiator); } } catch (Exception all) { if (LOG.isEnabledFor(Level.WARN)) { LOG.warn("Failed to register '" + className + "'", all); } } return registeredSomething; }
/** @return a possibly empty list of converted deltas. */ public List<IResourceDescription.Delta> convert(IJavaElementDelta delta) { /* * a file was opened or closed or there is no relevant structural change in the delta * - we do not expect an IResourceDescription.Delta * * Deltas without any affected children or without content changes * are considered to be irrelevant */ if (delta.getFlags() == IJavaElementDelta.F_AST_AFFECTED || delta.getFlags() == (IJavaElementDelta.F_AST_AFFECTED | IJavaElementDelta.F_CONTENT | IJavaElementDelta.F_FINE_GRAINED)) { return Collections.emptyList(); } try { List<IResourceDescription.Delta> result = Lists.newArrayListWithExpectedSize(2); convertCompilationUnits(delta, result); return result; } catch (Throwable throwable) { if (throwable instanceof VirtualMachineError) { throw (VirtualMachineError) throwable; } if (LOGGER.isEnabledFor(Level.ERROR)) { LOGGER.error(throwable.getMessage(), throwable); } return Collections.emptyList(); } }
/** * cleanup any edge peers when trying to forward an SRDI query so we are guaranteed to the best of * our knowledge that the peer is a rendezvous. This is not perfect, as it may take time for the * peerview to converge but at least we can remove any peers that is not a rendezvous. */ protected Vector cleanupAnyEdges(String src, Vector results) { Vector clean = new Vector(results.size()); PeerID pid = null; // put the peerview as a vector of PIDs Vector rpvId = srdi.getGlobalPeerView(); // remove any peers not in the current peerview // these peers may be gone or have become edges for (int i = 0; i < results.size(); i++) { pid = (PeerID) results.elementAt(i); // eliminate the src of the query so we don't resend // the query to whom send it to us if (src.equals(pid.toString())) { continue; } // remove the local also, so we don't send to ourself if (localPeerId.equals(pid)) { continue; } if (rpvId.contains(pid)) { // ok that's a good RDV to the best if (LOG.isEnabledFor(Level.DEBUG)) { LOG.debug("valid rdv for SRDI forward " + pid); } clean.add(pid); } else { // cleanup our SRDI cache for that peer srdiIndex.remove(pid); } } return clean; }
/** {@inheritDoc} */ public void saveKeyStore(KeyStore store, char[] password) throws KeyStoreException, IOException { if (LOG.isEnabledFor(Level.DEBUG)) { LOG.debug("Writing " + store + " to " + keystore_location); } try { OutputStream os = null; if ("file".equalsIgnoreCase(keystore_location.getScheme())) { os = new FileOutputStream(new File(keystore_location)); } else { os = keystore_location.toURL().openConnection().getOutputStream(); } store.store(os, password); } catch (NoSuchAlgorithmException failed) { KeyStoreException failure = new KeyStoreException("NoSuchAlgorithmException during keystore processing"); failure.initCause(failed); throw failure; } catch (CertificateException failed) { KeyStoreException failure = new KeyStoreException("CertificateException during keystore processing"); failure.initCause(failed); throw failure; } }
/** * This method handles submitting and then waiting for the request from the server. It uses the * ClientRequest API to actually write the request and then read back the response. This * implementation will block for a response from the server. * * @param <T> Return type * @param clientRequest ClientRequest implementation used to write the request and read the * response * @param operationName Simple string representing the type of request * @return Data returned by the individual requests */ private <T> void requestAsync( ClientRequest<T> delegate, NonblockingStoreCallback callback, long timeoutMs, String operationName) { ClientRequestExecutor clientRequestExecutor = null; try { clientRequestExecutor = pool.checkout(destination); } catch (Exception e) { // If we can't check out a socket from the pool, we'll usually get // either an IOException (subclass) or an UnreachableStoreException // error. However, in the case of asynchronous calls, we want the // error to be reported via our callback, not returned to the caller // directly. if (!(e instanceof UnreachableStoreException)) e = new UnreachableStoreException("Failure in " + operationName + ": " + e.getMessage(), e); try { callback.requestComplete(e, 0); } catch (Exception ex) { if (logger.isEnabledFor(Level.WARN)) logger.warn(ex, ex); } return; } NonblockingStoreCallbackClientRequest<T> clientRequest = new NonblockingStoreCallbackClientRequest<T>(delegate, clientRequestExecutor, callback); clientRequestExecutor.addClientRequest(clientRequest, timeoutMs); }
/** * Constructs an instance of {@link StructuredDocument} matching the mime-type specified by the * <CODE>mimetype</CODE> parameter. The <CODE>doctype</CODE> parameter identifies the base type of * the {@link StructuredDocument}. * * @param mimetype Specifies the mime media type to be associated with the {@link * StructuredDocument} to be created. * @param reader A Reader from which the document will be constructed. * @return StructuredDocument The instance of {@link StructuredDocument} or null if it could not * be created. * @throws IOException If there is a problem reading from the stream. * @throws NoSuchElementException if the mime-type has not been registerd. * @throws UnsupportedOperationException if the mime-type provided is not a text oriented * mimetype. */ public static StructuredDocument newStructuredDocument(MimeMediaType mimetype, Reader reader) throws IOException { if (!factory.loadedProperty) { factory.loadedProperty = factory.doLoadProperty(); } Instantiator instantiator = (Instantiator) factory.getInstantiator(mimetype.getMimeMediaType()); if (!(instantiator instanceof TextInstantiator)) { // XXX 20020502 [email protected] we could probably do something // really inefficient that would allow it to work, but better not to. // if ReaderInputStream existed, it would be easy to do. if (LOG.isEnabledFor(Level.WARN)) LOG.warn( "Document Class '" + instantiator.getClass().getName() + "' associated with '" + mimetype + "' is not a text oriented document"); throw new UnsupportedOperationException( "Document Class '" + instantiator.getClass().getName() + "' associated with '" + mimetype + "' is not a text oriented document"); } return ((TextInstantiator) instantiator).newInstance(mimetype, reader); }
/** * Execute method of the rule. This holds the rule conditions and actions. The required objects * are initialized through init() method. return - execution result of rule either SUCCESS or * FAILURE */ public static RulesResult execute(RulesContext ctx, String ruleId) throws RIFPOJORulesException { // Construct rules result object RulesResult rulesResult = new RulesResult(ruleId); String value = null; List systemList = null; boolean checkRange = false; if (ctx != null) { List<Object> inputObjs = ctx.getContextObject(); value = (String) inputObjs.get(0); if (value == null) { if (LOG.isEnabledFor(Level.ERROR)) { LOG.error("Input Object value is null during intialization for the rule " + ruleId); } throw new RIFPOJORulesException( "Unable to initialize the required rules objects for rule " + ruleId); } systemList = (List) inputObjs.get(1); if (systemList == null) { if (LOG.isEnabledFor(Level.ERROR)) { LOG.error("Input Object systemList is null during intialization for the rule " + ruleId); } throw new RIFPOJORulesException( "Unable to initialize the required rules objects for rule " + ruleId); } } else { if (LOG.isEnabledFor(Level.ERROR)) { LOG.error("Input Context Object is null during intialization for the rule " + ruleId); } throw new RIFPOJORulesException( "Unable to initialize the required rules objects for rule " + ruleId); } if (LOG.isInfoEnabled()) { LOG.info("Executing the actions for the rule " + ruleId); } // EXTRACT_START PGM4004.9005.NH01 checkRange = systemList.contains(value); // EXTRACT_END rulesResult.setReturnBooleanValue(checkRange); rulesResult.setRuleStatus(true); return rulesResult; }
/** * Make this transport as up and running. * * <p>When this method is called, all the services are already registered with the peergroup. So * we do not need to delay binding any further. All the public methods, which could be called * between init and startApp are defensive regarding the services possibly not being there. */ public int startApp(String[] arg) { resolver = group.getResolverService(); membership = group.getMembershipService(); if (null == resolver) { if (LOG.isEnabledFor(Level.DEBUG)) { LOG.debug("Endpoint Router start stalled until resolver service available"); } return Module.START_AGAIN_STALLED; } if (null == membership) { if (LOG.isEnabledFor(Level.DEBUG)) { LOG.debug("Endpoint Router start stalled until membership service available"); } return Module.START_AGAIN_STALLED; } resolver.registerHandler(routerSName, this); // create and register the srdi service srdiIndex = new SrdiIndex(group, srdiIndexerFileName); // Srdi is a thread but we are not going to start, // since the service is reactive. srdi = new Srdi(group, routerSName, this, srdiIndex, 0, 0); resolver.registerSrdiHandler(routerSName, this); // obtain credential for resolver messages try { // FIXME 20041008 bondolo this needs to be converted to dynamically managing credentials. // get the only credential "nobody" credential = (Credential) membership.getDefaultCredential(); if (null != credential) { credentialDoc = credential.getDocument(MimeMediaType.XMLUTF8); } } catch (Exception e) { if (LOG.isEnabledFor(Level.ERROR)) { LOG.error("failed to get credential", e); } } // get the RouteCM cache service routeCM = router.getRouteCM(); return 0; }
/** * Registers the pre-defined set of StructuredDocument sub-classes so that this factory can * construct them. * * @return boolean true if at least one of the StructuredDocument sub-classes could be registered * otherwise false. */ private boolean doLoadProperty() { try { return registerFromResources("net.jxta.impl.config", "StructuredDocumentInstanceTypes"); } catch (MissingResourceException notFound) { if (LOG.isEnabledFor(Level.WARN)) { LOG.warn("Could not find net.jxta.impl.config properties file!"); } return false; } }
public synchronized LogEvent log(LogEvent ev) { org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(ev.getRealm()); if (logger.isEnabledFor(_level)) { ByteArrayOutputStream w = new ByteArrayOutputStream(); PrintStream p = new PrintStream(w); ev.dump(p, ""); logger.log(_level, w.toString()); } return ev; }
public void executeQuery(CompiledQuery query, CompiledQueryMix queryMix) { double timeInSeconds; String queryString = query.getQueryString(); String parametrizedQueryString = query.getParametrizedQueryString(); String encodedParamString = query.getEncodedParamString(); byte queryType = query.getQueryType(); int queryNr = query.getNr(); NetQuery qe; if (query.source.isParametrized) qe = new NetQuery( serverURL, parametrizedQueryString, encodedParamString, queryType, defaultGraph, timeout); else qe = new NetQuery(serverURL, queryString, "", queryType, defaultGraph, timeout); int queryMixRun = queryMix.getRun() + 1; InputStream is = qe.exec(); if (is == null) { // then Timeout! double t = this.timeout / 1000.0; System.out.println("Query " + queryNr + ": " + t + " seconds timeout!"); queryMix.reportTimeOut(); // inc. timeout counter queryMix.setCurrent(0, t); qe.close(); return; } int resultCount = 0; try { // Write XML result into result if (queryType == Query.SELECT_TYPE) resultCount = countResults(is); else resultCount = countBytes(is); timeInSeconds = qe.getExecutionTimeInSeconds(); } catch (SocketTimeoutException e) { double t = this.timeout / 1000.0; System.out.println("Query " + queryNr + ": " + t + " seconds timeout!"); queryMix.reportTimeOut(); // inc. timeout counter queryMix.setCurrent(0, t); qe.close(); return; } if (logger.isEnabledFor(Level.ALL) && queryMixRun > 0) logResultInfo(queryNr, queryMixRun, timeInSeconds, queryString, queryType, resultCount); queryMix.setCurrent(resultCount, timeInSeconds); qe.close(); }
/* * push srdi entries to the SRDI rendezvous cache * @param all if true push all entries, otherwise just deltas */ protected void pushSrdi(ID peer, PeerID id) { SrdiMessage srdiMsg; try { srdiMsg = new SrdiMessageImpl( group.getPeerID(), 1, // only one hop "route", id.toString(), null, new Long(Long.MAX_VALUE).longValue()); // maximum expiration // 10182002tra:Route info don't expire unless the peer disappears // This approach is used to limit the SRDI traffic. The key // point here is that SRDI is used to tell a peer that another // has a route to the destination it is looking for. The information // that SRDI cache is not so much the specific route info but rather // the fact that a peer has knowledge of a route to another peer // We don't want to update the SRDI cache on every route update. // The SRDI cache will be flushed when the peer disconnect from // the rendezvous. if (LOG.isEnabledFor(Level.DEBUG)) { LOG.debug("sending a router SRDI message add route " + id); } if (peer == null) { PeerID destPeer = srdi.getReplicaPeer(id.toString()); peer = destPeer; } // don't push anywhere if we do not have a replica // or we are trying to send the query to ourself if (!localPeerId.equals(peer)) { srdi.pushSrdi(peer, srdiMsg); } } catch (Exception e) { if (LOG.isEnabledFor(Level.WARN)) { LOG.warn("SRDI push failed", e); } } }
private void outputModules( StructuredTextDocument doc, Hashtable modulesTable, String mainTag, MimeMediaType encodeAs) { Enumeration allClasses = modulesTable.keys(); while (allClasses.hasMoreElements()) { ModuleClassID mcid = (ModuleClassID) allClasses.nextElement(); Object val = modulesTable.get(mcid); // For applications, we ignore the role ID. It is not meaningfull, // and a new one is assigned on the fly when loading this adv. if (val instanceof Advertisement) { TextElement m = doc.createElement(mainTag); doc.appendChild(m); if (!(modulesTable == appsTable || mcid.equals(mcid.getBaseClass()))) { // It is not an app and there is a role ID. Output it. TextElement i = doc.createElement(mcidTag, mcid.toString()); m.appendChild(i); } StructuredTextDocument advdoc = (StructuredTextDocument) ((Advertisement) val).getDocument(encodeAs); StructuredDocumentUtils.copyElements(doc, m, advdoc); } else if (val instanceof ModuleSpecID) { TextElement m; if (modulesTable == appsTable || mcid.equals(mcid.getBaseClass())) { // Either it is an app or there is no role ID. // So the specId is good enough. m = doc.createElement(mainTag, ((ModuleSpecID) val).toString()); doc.appendChild(m); } else { // The role ID matters, so the classId must be separate. m = doc.createElement(mainTag); doc.appendChild(m); TextElement i; i = doc.createElement(mcidTag, mcid.toString()); m.appendChild(i); i = doc.createElement(msidTag, ((ModuleSpecID) val).toString()); m.appendChild(i); } } else { if (LOG.isEnabledFor(Level.WARN)) LOG.warn("unsupported class in modules table"); } } }
@Override public SearchResults find(SearchQuery query, ProgressMonitor monitor) { myResults = new SearchResults<SNode>(); SearchScope queryScope = query.getScope(); monitor.start("Searching applicable nodes", myScripts.size()); try { for (RefactoringScript scriptInstance : myScripts) { if (monitor.isCanceled()) { break; } Collection<AbstractMigrationRefactoring> refactorings = scriptInstance.getRefactorings(); for (AbstractMigrationRefactoring ref : refactorings) { if (monitor.isCanceled()) { break; } monitor.step(scriptInstance.getName() + " [" + ref.getAdditionalInfo() + "]"); Set<SNode> instances = FindUsagesFacade.getInstance() .findInstances( queryScope, Collections.singleton(ref.getApplicableConcept()), false, new EmptyProgressMonitor()); for (SNode instance : instances) { try { if (ref.isApplicableInstanceNode(instance)) { String category = StringUtil.escapeXml(scriptInstance.getName()) + " </b>[" + StringUtil.escapeXml(ref.getAdditionalInfo()) + "]<b>"; SearchResult<SNode> result = new SearchResult<SNode>(instance, category); myMigrationBySearchResult.put(result, ref); myResults.getSearchResults().add(result); } } catch (Throwable th) { if (LOG.isEnabledFor(Level.ERROR)) { LOG.error("Failed to evaluate script applicability", th); } } } } monitor.advance(1); } fireResultsChanged(); return myResults; } finally { monitor.done(); } }
/** * @param level string representation of Logging-levels as used in log4j * @return true if specified logging-level is enabled */ public static boolean isEnabledFor(String level) { // go from level-string to log4j-level-object org.apache.log4j.Level log4jLevel = org.apache.log4j.Level.toLevel(level); if (forceJuli || log4j == null) { // must translate from log4j-level to jul-level java.util.logging.Level julLevel = toJuliLevel(log4jLevel.toString()); // check level against jul return juli.isLoggable(julLevel); } else { // check level against log4j return log4j.isEnabledFor(log4jLevel); } }
/** Default constructor. */ public URIKeyStoreManager(String type, String provider, URI location) throws NoSuchProviderException, KeyStoreException { if (null == type) { type = DEFAULT_KEYSTORE_TYPE; provider = null; } // special case for forcing bc provider for jdk < 1.5 since jdk 1.4.x // jsse pkcs12 is readonly. if ("pkcs12".equalsIgnoreCase(type)) { if ("BC".equals(provider)) { provider = null; } boolean hasJDK15 = System.getProperty("java.specification.version", "0.0").compareTo("1.5") >= 0; provider = hasJDK15 ? null : "BC"; } if (!location.isAbsolute()) { throw new IllegalArgumentException("location must be an absolute URI"); } if ("file".equalsIgnoreCase(location.getScheme())) { File asFile = new File(location); if (asFile.exists() && !asFile.isFile()) { throw new IllegalArgumentException("location must refer to a file"); } } if (LOG.isEnabledFor(Level.INFO)) { LOG.info("pse location = " + location); } keystore_type = type; keystore_provider = provider; keystore_location = location; // check if we can get an instance. if (null == keystore_provider) { KeyStore.getInstance(keystore_type); } else { KeyStore.getInstance(keystore_type, keystore_provider); } }
public void close() { if (!isClosed.compareAndSet(false, true)) { if (logger.isEnabledFor(Level.INFO)) { logger.info("already closed!"); } return; } try { this.clientChannel.keyFor(this.selector).cancel(); } catch (Exception e) { if (logger.isEnabledFor(Level.WARN)) { logger.warn(e.getMessage(), e); } } try { this.clientChannel.close(); } catch (IOException e) { if (logger.isEnabledFor(Level.WARN)) { logger.warn(e.getMessage(), e); } } }
/** * Private constructor. Use instantiator * * @param root Description of the Parameter */ private RouteAdv(Element root) { if (!XMLElement.class.isInstance(root)) { throw new IllegalArgumentException(getClass().getName() + " only supports XLMElement"); } XMLElement doc = (XMLElement) root; String doctype = doc.getName(); String typedoctype = ""; Attribute itsType = doc.getAttribute("type"); if (null != itsType) { typedoctype = itsType.getValue(); } if (!doctype.equals(getAdvertisementType()) && !getAdvertisementType().equals(typedoctype)) { throw new IllegalArgumentException( "Could not construct : " + getClass().getName() + "from doc containing a " + doc.getName()); } Enumeration elements = doc.getChildren(); while (elements.hasMoreElements()) { XMLElement elem = (XMLElement) elements.nextElement(); if (!handleElement(elem)) { if (LOG.isEnabledFor(Level.DEBUG)) { LOG.debug("Unhandled Element: " + elem.toString()); } } } // HACK Compatibility setDestPeerID(getDestPeerID()); // Sanity Check!!! if (hasALoop()) { throw new IllegalArgumentException("Route contains a loop!"); } }
/** * Set the encapsulated Peer Advertisement. * * @param adv is the RdvAdvertisement to be set. * @return RdvAdvertisement the old Advertisement of the Peer represented by this object */ RdvAdvertisement setRdvAdvertisement(RdvAdvertisement adv) { if (!radv.getPeerID().equals(adv.getPeerID())) { if (LOG.isEnabledFor(Level.ERROR)) { LOG.error("adv refers to a different peer"); } throw new IllegalArgumentException("adv refers to a different peer"); } RdvAdvertisement old = radv; this.radv = adv; setLastUpdateTime(TimeUtils.timeNow()); return old; }
/** {@inheritDoc} */ public KeyStore loadKeyStore(char[] password) throws KeyStoreException, IOException { if (LOG.isEnabledFor(Level.DEBUG)) { LOG.debug( "Loading (" + keystore_type + "," + keystore_provider + ") store from " + keystore_location); } try { KeyStore store; if (null == keystore_provider) { store = KeyStore.getInstance(keystore_type); } else { store = KeyStore.getInstance(keystore_type, keystore_provider); } store.load(keystore_location.toURL().openStream(), password); return store; } catch (NoSuchAlgorithmException failed) { KeyStoreException failure = new KeyStoreException("NoSuchAlgorithmException during keystore processing"); failure.initCause(failed); throw failure; } catch (CertificateException failed) { KeyStoreException failure = new KeyStoreException("CertificateException during keystore processing"); failure.initCause(failed); throw failure; } catch (NoSuchProviderException failed) { KeyStoreException failure = new KeyStoreException("NoSuchProviderException during keystore processing"); failure.initCause(failed); throw failure; } }
/** * Return a messenger suitable for sending to this peer. * * @return a messenger to this PVE peer or if <code>null</code> if peer is unreachable. */ private Messenger getCachedMessenger() { boolean updateAlive = false; synchronized (this) { if ((null == cachedMessenger) || cachedMessenger.isClosed()) { cachedMessenger = null; if (LOG.isEnabledFor(Level.DEBUG)) { LOG.debug("Getting cached Messenger for " + radv.getName()); } updateAlive = true; cachedMessenger = endpoint.getMessenger(getDestAddress(), radv.getRouteAdv()); } } if (updateAlive) { setAlive(null != cachedMessenger); } return cachedMessenger; }
@Override public boolean isWarnEnabled() { return wrappedLogger.isEnabledFor(Level.WARN); }