/** PENDING javadocs */ public void run() { assert (monitors != null); boolean reloaded = false; for (Iterator<Monitor> i = monitors.iterator(); i.hasNext(); ) { Monitor m = i.next(); try { if (m.hasBeenModified()) { if (!reloaded) { reloaded = true; } } } catch (IOException ioe) { if (LOGGER.isLoggable(Level.SEVERE)) { LOGGER.severe( "Unable to access url " + m.uri.toString() + ". Monitoring for this resource will no longer occur."); } if (LOGGER.isLoggable(Level.FINE)) { LOGGER.log(Level.FINE, ioe.toString(), ioe); } i.remove(); } } if (reloaded) { reload(sc); } }
public <T> List<ExtensionComponent<T>> findComponents(Class<T> type, Hudson hudson) { List<ExtensionFinder> finders; if (type==ExtensionFinder.class) { // Avoid infinite recursion of using ExtensionFinders to find ExtensionFinders finders = Collections.<ExtensionFinder>singletonList(new ExtensionFinder.Sezpoz()); } else { finders = hudson.getExtensionList(ExtensionFinder.class); } /** * See {@link ExtensionFinder#scout(Class, Hudson)} for the dead lock issue and what this does. */ if (LOGGER.isLoggable(Level.FINER)) LOGGER.log(Level.FINER,"Scout-loading ExtensionList: "+type, new Throwable()); for (ExtensionFinder finder : finders) { finder.scout(type, hudson); } List<ExtensionComponent<T>> r = new ArrayList<ExtensionComponent<T>>(); for (ExtensionFinder finder : finders) { try { r.addAll(finder._find(type, hudson)); } catch (AbstractMethodError e) { // backward compatibility for (T t : finder.findExtensions(type, hudson)) r.add(new ExtensionComponent<T>(t)); } } return r; }
/** * is invoked in an implementation-specific fashion to determine if an instance is still valid * to be returned by the pool. It will only be invoked on an "activated" instance. * * @param an instance of {@link Session} maintained by this pool. * @return <code>true</code> if the connection is still alive and operative (checked by asking * its user name), <code>false</code> otherwise. */ @Override public boolean validateObject(Object obj) { ISession session = (ISession) obj; boolean valid = !session.isClosed(); // MAKE PROPER VALIDITY CHECK HERE as for GEOT-1273 if (valid) { try { if (LOGGER.isLoggable(Level.FINEST)) { LOGGER.finest(" Validating SDE Connection " + session); } /* * Validate the connection's health with testServer instead of getUser. The * former is lighter weight, getUser() forced a server round trip and under * heavy concurrency ate about 30% the time */ session.testServer(); } catch (IOException e) { LOGGER.info( "Can't validate SeConnection, discarding it: " + session + ". Reason: " + e.getMessage()); valid = false; } } return valid; }
public void doIndex(StaplerRequest req, StaplerResponse rsp) throws IOException, ServletException { List<Ancestor> l = req.getAncestors(); for (int i = l.size() - 1; i >= 0; i--) { Ancestor a = l.get(i); if (a.getObject() instanceof SearchableModelObject) { SearchableModelObject smo = (SearchableModelObject) a.getObject(); if (LOGGER.isLoggable(Level.FINE)) { LOGGER.fine( String.format( "smo.displayName=%s, searchName=%s", smo.getDisplayName(), smo.getSearchName())); } SearchIndex index = smo.getSearchIndex(); String query = req.getParameter("q"); if (query != null) { SuggestedItem target = find(index, query, smo); if (target != null) { // found rsp.sendRedirect2(req.getContextPath() + target.getUrl()); return; } } } } // no exact match. show the suggestions rsp.setStatus(SC_NOT_FOUND); req.getView(this, "search-failed.jelly").forward(req, rsp); }
@Override public CoordinateReferenceSystem createCRS(int srid, Connection cx) throws SQLException { // if the official EPSG database has an answer, use that one CoordinateReferenceSystem crs = super.createCRS(srid, cx); if (crs != null) return crs; // otherwise try to use the Ingres spatial_ref_sys WKT String sql = "SELECT srtext FROM spatial_ref_sys WHERE srid = " + srid; Statement st = null; ResultSet rs = null; try { st = cx.createStatement(); rs = st.executeQuery(sql.toString()); if (rs.next()) { String wkt = rs.getString(1); if (wkt != null) { try { return CRS.parseWKT(wkt); } catch (Exception e) { if (LOGGER.isLoggable(Level.FINE)) LOGGER.log(Level.FINE, "Could not parse WKT " + wkt, e); return null; } } } } finally { dataStore.closeSafe(rs); dataStore.closeSafe(st); } return null; }
Monitor(URI uri) throws IOException { this.uri = uri; this.timestamp = getLastModified(); if (LOGGER.isLoggable(Level.INFO)) { LOGGER.log(Level.INFO, "Monitoring {0} for modifications", uri.toURL().toExternalForm()); } }
/** * Try to resolve email address using resolvers. * * @return User address or null if resolution failed */ public static String resolve(User u) { if (LOGGER.isLoggable(Level.FINE)) { LOGGER.fine("Resolving e-mail address for \"" + u + "\" ID=" + u.getId()); } for (MailAddressResolver r : all()) { String email = r.findMailAddressFor(u); if (email != null) { if (LOGGER.isLoggable(Level.FINE)) { LOGGER.fine(r + " resolved " + u.getId() + " to " + email); } return email; } } // fall back logic return resolveFast(u); }
void addDisplayNamesToSearchIndex(SearchIndexBuilder sib, Collection<TopLevelItem> items) { for (TopLevelItem item : items) { if (LOGGER.isLoggable(Level.FINE)) { LOGGER.fine( (String.format( "Adding url=%s,displayName=%s", item.getSearchUrl(), item.getDisplayName()))); } sib.add(item.getSearchUrl(), item.getDisplayName()); } }
boolean hasBeenModified() throws IOException { long temp = getLastModified(); if (timestamp < temp) { timestamp = temp; if (LOGGER.isLoggable(Level.INFO)) { LOGGER.log(Level.INFO, "{0} changed!", uri.toURL().toExternalForm()); } return true; } return false; }
/** * make the URI specified file in version control. * * @param uri * @throws Exception */ public void versionControl(final String uri) throws Exception { if (LOGGER.isLoggable(Level.INFO)) { LOGGER.info("versionControl '" + uri + "'"); } final VersionControlMethod httpMethod = new VersionControlMethod(uri); client.executeMethod(httpMethod); processResponse(httpMethod, true); httpMethod.releaseConnection(); }
public boolean handleIndexRequest( RequestImpl req, ResponseImpl rsp, Object node, MetaClass nodeMetaClass) throws IOException, ServletException { Stapler stapler = req.stapler; // TODO: find the list of welcome pages for this class by reading web.xml RequestDispatcher indexJsp = createRequestDispatcher(req, node.getClass(), node, "index.jsp"); if (indexJsp != null) { if (LOGGER.isLoggable(Level.FINE)) LOGGER.fine("Invoking index.jsp on " + node); stapler.forward(indexJsp, req, rsp); return true; } return false; }
public WebConfigResourceMonitor(ServletContext sc, Collection<URI> uris) { assert (uris != null); this.sc = sc; for (URI uri : uris) { if (monitors == null) { monitors = new ArrayList<Monitor>(uris.size()); } try { Monitor m = new Monitor(uri); monitors.add(m); } catch (IOException ioe) { if (LOGGER.isLoggable(Level.SEVERE)) { LOGGER.severe( "Unable to setup resource monitor for " + uri.toString() + ". Resource will not be monitored for changes."); } if (LOGGER.isLoggable(Level.FINE)) { LOGGER.log(Level.FINE, ioe.toString(), ioe); } } } }
/** * When there are mutiple suggested items, this method can narrow down the resultset to the * SuggestedItem that has a url that contains the query. This is useful is one job has a display * name that matches another job's project name. * * @param r A list of Suggested items. It is assumed that there is at least one SuggestedItem in * r. * @param query A query string * @return Returns the SuggestedItem which has a search url that contains the query. If no * SuggestedItems have a search url which contains the query, then the first SuggestedItem in * the List is returned. */ static SuggestedItem findClosestSuggestedItem(List<SuggestedItem> r, String query) { for (SuggestedItem curItem : r) { if (LOGGER.isLoggable(Level.FINE)) { LOGGER.fine( String.format("item's searchUrl:%s;query=%s", curItem.item.getSearchUrl(), query)); } if (curItem.item.getSearchUrl().contains(Util.rawEncode(query))) { return curItem; } } // couldn't find an item with the query in the url so just // return the first one return r.get(0); }
@Override protected String handleObjectPost(Object object) throws Exception { String workspace = getAttribute("workspace"); DataStoreInfo ds = (DataStoreInfo) object; if (ds.getWorkspace() != null) { // ensure the specifried workspace matches the one dictated by the uri WorkspaceInfo ws = (WorkspaceInfo) ds.getWorkspace(); if (!workspace.equals(ws.getName())) { throw new RestletException( "Expected workspace " + workspace + " but client specified " + ws.getName(), Status.CLIENT_ERROR_FORBIDDEN); } } else { ds.setWorkspace(catalog.getWorkspaceByName(workspace)); } ds.setEnabled(true); // if no namespace parameter set, set it // TODO: we should really move this sort of thing to be something central if (!ds.getConnectionParameters().containsKey("namespace")) { WorkspaceInfo ws = ds.getWorkspace(); NamespaceInfo ns = catalog.getNamespaceByPrefix(ws.getName()); if (ns == null) { ns = catalog.getDefaultNamespace(); } if (ns != null) { ds.getConnectionParameters().put("namespace", ns.getURI()); } } // attempt to set the datastore type try { DataAccessFactory factory = DataStoreUtils.aquireFactory(ds.getConnectionParameters()); ds.setType(factory.getDisplayName()); } catch (Exception e) { LOGGER.warning("Unable to determine datastore type from connection parameters"); if (LOGGER.isLoggable(Level.FINE)) { LOGGER.log(Level.FINE, "", e); } } catalog.validate((DataStoreInfo) object, false).throwIfInvalid(); catalog.add((DataStoreInfo) object); LOGGER.info("POST data store " + ds.getName()); return ds.getName(); }
protected void handleDelete( final HttpRequest request, final HttpResponse response, final NewObjectIdentifier oid) throws ArchiveException, IOException { long t1 = System.currentTimeMillis(); Coordinator.getInstance().delete(oid, true, false); t1 = System.currentTimeMillis() - t1; deleteStats.add(t1); if (LOGGER.isLoggable(Level.INFO)) { LOGGER.info("MEAS delete __ time " + t1); } HttpOutputStream out = (HttpOutputStream) response.getOutputStream(); writeMulticellConfig(out); }
protected void handle( final String pathInContext, final String pathParams, final HttpRequest request, final HttpResponse response, final HttpFields trailer) throws IOException { NewObjectIdentifier identifier; try { identifier = getRequestIdentifier(request, response, trailer); } catch (IllegalArgumentException e) { // already sent error reply in superclass return; } if (LOGGER.isLoggable(Level.FINE)) { LOGGER.fine("deleting object with id " + identifier); } try { handleDelete(request, response, identifier); } catch (NoSuchObjectException e) { if (LOGGER.isLoggable(Level.WARNING)) { LOGGER.log(Level.WARNING, "delete failed on NoSuchObjectException.", e); } sendError(response, trailer, HttpResponse.__404_Not_Found, e.getMessage()); } catch (ObjectLostException e) { if (LOGGER.isLoggable(Level.WARNING)) { LOGGER.log(Level.WARNING, "delete failed on ObjectLostException.", e); } sendError(response, trailer, HttpResponse.__410_Gone, e.getMessage()); } catch (ArchiveException e) { if (LOGGER.isLoggable(Level.WARNING)) { LOGGER.log(Level.WARNING, "delete failed on ArchiveException.", e); } sendError(response, trailer, HttpResponse.__400_Bad_Request, e.getMessage()); } catch (InternalException e) { if (LOGGER.isLoggable(Level.WARNING)) { LOGGER.log(Level.WARNING, "delete failed on InternalException.", e); } sendError(response, trailer, HttpResponse.__500_Internal_Server_Error, e.getMessage()); } catch (IllegalArgumentException e) { if (LOGGER.isLoggable(Level.WARNING)) { LOGGER.log(Level.WARNING, "delete failed on IllegalArgumentException.", e); } sendError(response, trailer, HttpResponse.__400_Bad_Request, e.getMessage()); } }
@Override public void accept(ByteBuffer buffer) { JobId jobId = new JobId(buffer.getLong()); int partition = buffer.getInt(); if (LOGGER.isLoggable(Level.FINE)) { LOGGER.fine( "Received initial dataset partition read request for JobId: " + jobId + " partition: " + partition + " on channel: " + ccb); } noc = new NetworkOutputChannel(ccb, 1); try { partitionManager.initializeDatasetPartitionReader(jobId, partition, noc); } catch (HyracksException e) { noc.abort(); } }
public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException { if (DEFAULT_PACKAGE_QNAME.equalsIgnoreCase(qName)) { defaultPackageElement = true; } else if (BEAN_QNAME.equalsIgnoreCase(qName)) { String className = defaultPackage + PACKAGE_SEPARATOR + attributes.getValue(CLASS_QNAME); if (LOGGER.isLoggable(Level.INFO)) { String msg = "Detected external constraints on class " + className; LOGGER.info(msg); } try { Class<?> clazz = ReflectionUtils.forName(className); BEAN_VALIDATION_HELPER.putConstrainedClass(clazz); } catch (ClassNotFoundException e) { String errMsg = "Loading found class failed. Exception: " + e.getMessage(); LOGGER.warning(errMsg); } } }
private static AmazonECSClient getAmazonECSClient(String credentialsId, String regionName) { final AmazonECSClient client; AmazonWebServicesCredentials credentials = getCredentials(credentialsId); if (credentials == null) { // no credentials provided, rely on com.amazonaws.auth.DefaultAWSCredentialsProviderChain // to use IAM Role define at the EC2 instance level ... client = new AmazonECSClient(); } else { if (LOGGER.isLoggable(Level.FINE)) { String awsAccessKeyId = credentials.getCredentials().getAWSAccessKeyId(); String obfuscatedAccessKeyId = StringUtils.left(awsAccessKeyId, 4) + StringUtils.repeat("*", awsAccessKeyId.length() - (2 * 4)) + StringUtils.right(awsAccessKeyId, 4); LOGGER.log( Level.FINE, "Connect to Amazon ECS with IAM Access Key {1}", new Object[] {obfuscatedAccessKeyId}); } client = new AmazonECSClient(credentials); } client.setRegion(getRegion(regionName)); return client; }
@Override public Object getValue(String attributeName) { Object result = super.getValue(attributeName); if ("type".equals(attributeName)) { if ((null != result) && !(result instanceof Class)) { FacesContext context = FacesContext.getCurrentInstance(); ELContext elContext = context.getELContext(); String classStr = (String) ((ValueExpression) result).getValue(elContext); if (null != classStr) { try { result = ReflectionUtil.forName(classStr); this.setValue(attributeName, result); } catch (ClassNotFoundException ex) { classStr = "java.lang." + classStr; boolean throwException = false; try { result = ReflectionUtil.forName(classStr); this.setValue(attributeName, result); } catch (ClassNotFoundException ex2) { throwException = true; } if (throwException) { String message = "Unable to obtain class for " + classStr; if (LOGGER.isLoggable(Level.INFO)) { LOGGER.log(Level.INFO, message, ex); } throw new TagAttributeException(tag, name, message, ex); } } } } } return result; }
private void createFile() throws IOException { Date date = new Date(); String dateSuffix = date.toString().replace(' ', '_'); String fileName = feedId.toString() + "_" + frameDistributor.getFeedRuntimeType() + "_" + frameDistributor.getPartition() + "_" + dateSuffix; file = new File(fileName); if (!file.exists()) { boolean success = file.createNewFile(); if (!success) { throw new IOException("Unable to create spill file for feed " + feedId); } } bos = new BufferedOutputStream(new FileOutputStream(file)); if (LOGGER.isLoggable(Level.INFO)) { LOGGER.info("Created Spill File for feed " + feedId); } }
/** * used to acquire and object of type V from the map, * * <p>when {@param create }== true, this method is equivalent to : * * <pre> * Object value = map.get("Key"); * * if ( counter == null ) { * value = new Object(); * map.put("Key", value); * } * * return value; * </pre> * * @param keyBytes the key of the entry * @param value an object to be reused, null creates a new object. * @param hash2 a hash code relating to the {@keyBytes} ( not the natural hash of {@keyBytes} ) * @param create false - if the {@keyBytes} can not be found null will be returned, true - if * the {@keyBytes} can not be found an value will be acquired * @return an entry.value whose entry.key equals {@param keyBytes} */ V acquire(DirectBytes keyBytes, V value, int hash2, boolean create) { lock(); try { hash2 = hashLookup.startSearch(hash2); while (true) { int pos = hashLookup.nextPos(); if (pos < 0) { return create ? acquireEntry(keyBytes, value, hash2) : null; } else { final long offset = entriesOffset + pos * entrySize; tmpBytes.storePositionAndSize(bytes, offset, entrySize); final boolean miss; if (LOGGER.isLoggable(Level.FINE)) { final long start0 = System.nanoTime(); miss = !keyEquals(keyBytes, tmpBytes); final long time0 = System.nanoTime() - start0; if (time0 > 1e6) LOGGER.fine("startsWith took " + time0 / 100000 / 10.0 + " ms."); } else { miss = !keyEquals(keyBytes, tmpBytes); } if (miss) continue; long valueLengthOffset = keyBytes.remaining() + tmpBytes.position(); tmpBytes.position(valueLengthOffset); // skip the value length // todo use the value length to limit reading below long valueLength = tmpBytes.readStopBit(); final long valueOffset = align(tmpBytes.position()); // includes the stop bit length. tmpBytes.position(valueOffset); return readObjectUsing(value, offset + valueOffset); } } } finally { unlock(); } }
/** * Queue maintenance. * * <p>Move projects between {@link #waitingList}, {@link #blockedProjects}, and {@link * #buildables} appropriately. */ public synchronized void maintain() { if (LOGGER.isLoggable(Level.FINE)) LOGGER.fine("Queue maintenance started " + this); Iterator<BlockedItem> itr = blockedProjects.values().iterator(); while (itr.hasNext()) { BlockedItem p = itr.next(); if (!isBuildBlocked(p.task)) { // ready to be executed LOGGER.fine(p.task.getFullDisplayName() + " no longer blocked"); itr.remove(); makeBuildable(new BuildableItem(p)); } } while (!waitingList.isEmpty()) { WaitingItem top = peek(); if (!top.timestamp.before(new GregorianCalendar())) return; // finished moving all ready items from queue waitingList.remove(top); Task p = top.task; if (!isBuildBlocked(p)) { // ready to be executed immediately LOGGER.fine(p.getFullDisplayName() + " ready to build"); makeBuildable(new BuildableItem(top)); } else { // this can't be built now because another build is in progress // set this project aside. LOGGER.fine(p.getFullDisplayName() + " is blocked"); blockedProjects.put(p, new BlockedItem(top)); } } if (sortingHandler != null) sortingHandler.sortBuildableItems(buildables); }
@Override public void run() { try { InputStream in = client.getInputStream(); CharBuffer buffer = CharBuffer.allocate(5000); char ch; while (true) { ch = (char) in.read(); if (((int) ch) == -1) { break; } while (ch != EOL) { buffer.put(ch); ch = (char) in.read(); } buffer.flip(); String s = new String(buffer.array(), 0, buffer.limit()); messageReceiver.sendMessage(s + "\n"); buffer.position(0); buffer.limit(5000); } } catch (Exception e) { e.printStackTrace(); if (LOGGER.isLoggable(Level.WARNING)) { LOGGER.warning("Unable to process mesages from client" + client); } } finally { if (client != null) { try { client.close(); } catch (Exception e) { e.printStackTrace(); } } } }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { if (request.isSecure()) { chain.doFilter(request, response); return; } HttpServletRequest httpRequest = (HttpServletRequest) request; StringBuffer buff = new StringBuffer("https://"); buff.append(httpRequest.getServerName()) .append(":") .append(sslPort) .append(httpRequest.getContextPath()) .append(httpRequest.getServletPath()); Map<String, String> kvp = new HashMap<String, String>(); if (httpRequest.getQueryString() != null) { for (String kvpString : httpRequest.getQueryString().split("&")) { String[] kvpArray = kvpString.split("="); if (kvpArray == null || kvpArray.length != 2) { LOGGER.warning("Unknown query parameter: " + kvpString); continue; } kvp.put(kvpArray[0], kvpArray[1]); } } String redirectURL = ResponseUtils.buildURL(buff.toString(), httpRequest.getPathInfo(), kvp, null); if (LOGGER.isLoggable(Level.INFO)) LOGGER.info("Redirecting " + httpRequest.getRequestURL() + " to " + redirectURL); ((HttpServletResponse) response).sendRedirect(redirectURL); }
private List<ZGroupLayerPainter> buildLayerPainters( Graphics2D graphics, StreamingRenderer renderer, String layerId, ProgressListener cancellationListener) throws IOException, FactoryException, NoninvertibleTransformException, SchemaException, TransformException { List<ZGroupLayerPainter> painters = new ArrayList<>(); boolean closePainters = true; try { for (Layer layer : layers) { // get the LiteFeatureTypeStyle for this layer final FeatureSource featureSource = layer.getFeatureSource(); if (featureSource == null) { throw new IllegalArgumentException("The layer does not contain a feature source"); } final FeatureType schema = featureSource.getSchema(); final ArrayList<LiteFeatureTypeStyle> lfts = renderer.createLiteFeatureTypeStyles(layer, graphics, false); if (lfts.isEmpty()) { continue; } else { if (LOGGER.isLoggable(Level.FINE)) { LOGGER.fine("Processing " + lfts.size() + " stylers for " + schema.getName()); } } // get the feature iterator we need FeatureCollection features = renderer.getFeatures(layer, schema, lfts); // While we could use a non mark feature iterator for single fts layers, // that would cause multiple connections to be open at the same time, // which in turn could cause deadlocks against connection pools, so we // are going to build a MarkFeatureIterator regardless // TODO: we could optimize down to simple streaming painting if we end up // with a single painter with a single fts (due to scale dependencies) // but we'd have to delay opening the MarkFeatureIterator to recognize the // situation int maxFeatures = SortedFeatureReader.getMaxFeaturesInMemory(layer.getQuery()); MarkFeatureIterator fi = MarkFeatureIterator.create(features, maxFeatures, cancellationListener); if (fi.hasNext()) { ZGroupLayerPainter painter = new ZGroupLayerPainter(fi, lfts, renderer, layerId); painters.add(painter); } else { fi.close(); } } // got to the end cleanly, no need to close the painters accumulated so far closePainters = false; } finally { if (closePainters) { for (ZGroupLayerPainter painter : painters) { try { painter.close(); } catch (Exception e) { LOGGER.log(Level.FINE, "Failed to close cleanly layer painter " + painter, e); } } } } validateSortBy(painters); return painters; }
/** * Resolves all the groups that the user is in. * * <p>We now use <a * href="http://msdn.microsoft.com/en-us/library/windows/desktop/ms680275(v=vs.85).aspx">tokenGroups</a> * attribute, which is a computed attribute that lists all the SIDs of the groups that the user is * directly/indirectly in. We then use that to retrieve all the groups in one query and resolve * their canonical names. * * @param userDN User's distinguished name. * @param context Used for making queries. */ private Set<GrantedAuthority> resolveGroups(String domainDN, String userDN, DirContext context) throws NamingException { LOGGER.finer("Looking up group of " + userDN); Attributes id = context.getAttributes(userDN, new String[] {"tokenGroups", "memberOf", "CN"}); Attribute tga = id.get("tokenGroups"); if (tga == null) { // see JENKINS-11644. still trying to figure out when this happens LOGGER.warning("Failed to retrieve tokenGroups for " + userDN); HashSet<GrantedAuthority> r = new HashSet<GrantedAuthority>(); r.add(new GrantedAuthorityImpl("unable-to-retrieve-tokenGroups")); return r; } // build up the query to retrieve all the groups StringBuilder query = new StringBuilder("(|"); List<byte[]> sids = new ArrayList<byte[]>(); NamingEnumeration<?> tokenGroups = tga.getAll(); while (tokenGroups.hasMore()) { byte[] gsid = (byte[]) tokenGroups.next(); query.append("(objectSid={" + sids.size() + "})"); sids.add(gsid); } tokenGroups.close(); query.append(")"); Set<GrantedAuthority> groups = new HashSet<GrantedAuthority>(); NamingEnumeration<SearchResult> renum = new LDAPSearchBuilder(context, domainDN) .subTreeScope() .returns("cn") .search(query.toString(), sids.toArray()); while (renum.hasMore()) { Attributes a = renum.next().getAttributes(); Attribute cn = a.get("cn"); if (LOGGER.isLoggable(Level.FINE)) LOGGER.fine(userDN + " is a member of " + cn); groups.add(new GrantedAuthorityImpl(cn.get().toString())); } renum.close(); { /* stage 2: use memberOf to find groups that aren't picked up by tokenGroups. This includes distribution groups */ LOGGER.fine("Stage 2: looking up via memberOf"); Stack<Attributes> q = new Stack<Attributes>(); q.push(id); while (!q.isEmpty()) { Attributes identity = q.pop(); LOGGER.finer("Looking up group of " + identity); Attribute memberOf = identity.get("memberOf"); if (memberOf == null) continue; for (int i = 0; i < memberOf.size(); i++) { try { Attributes group = context.getAttributes( new LdapName(memberOf.get(i).toString()), new String[] {"CN", "memberOf"}); Attribute cn = group.get("CN"); if (cn == null) { LOGGER.fine("Failed to obtain CN of " + memberOf.get(i)); continue; } if (LOGGER.isLoggable(Level.FINE)) LOGGER.fine(cn.get() + " is a member of " + memberOf.get(i)); if (groups.add(new GrantedAuthorityImpl(cn.get().toString()))) { q.add(group); // recursively look for groups that this group is a member of. } } catch (NameNotFoundException e) { LOGGER.fine("Failed to obtain CN of " + memberOf.get(i)); } } } } return groups; }
/** * Execute an HTTP request. * * @return HTTP response object of specified {@code type}. * @throws RetrofitError Thrown if any error occurs during the HTTP request. */ private Object invokeRequest(RestMethodInfo methodDetails, Object[] args) { methodDetails.init(); // Ensure all relevant method information has been loaded. String url = server.getUrl(); try { Request request = new RequestBuilder(converter) // .setApiUrl(server.getUrl()) .setArgs(args) .setHeaders(headers.get()) .setMethodInfo(methodDetails) .build(); url = request.getUrl(); if (!methodDetails.isSynchronous) { // If we are executing asynchronously then update the current thread with a useful name. Thread.currentThread().setName(THREAD_PREFIX + url); } if (LOGGER.isLoggable(Level.FINE)) { logRequest(request); } Object profilerObject = null; if (profiler != null) { profilerObject = profiler.beforeCall(); } long start = System.nanoTime(); Response response = clientProvider.get().execute(request); long elapsedTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); int statusCode = response.getStatus(); if (profiler != null) { RequestInformation requestInfo = getRequestInfo(server, methodDetails, request); profiler.afterCall(requestInfo, elapsedTime, statusCode, profilerObject); } TypedInput body = response.getBody(); if (LOGGER.isLoggable(Level.FINE)) { // Replace the response since the logger needs to consume the entire input stream. body = logResponse(url, response.getStatus(), body, elapsedTime); } List<Header> headers = response.getHeaders(); for (Header header : headers) { if (HTTP.CONTENT_TYPE.equalsIgnoreCase(header.getName()) // && !UTF_8.equalsIgnoreCase(Utils.parseCharset(header.getValue()))) { throw new IOException("Only UTF-8 charset supported."); } } Type type = methodDetails.type; if (statusCode >= 200 && statusCode < 300) { // 2XX == successful request if (type.equals(Response.class)) { return response; } if (body == null) { return null; } try { return converter.fromBody(body, type); } catch (ConversionException e) { throw RetrofitError.conversionError(url, response, converter, type, e); } } throw RetrofitError.httpError(url, response, converter, type); } catch (RetrofitError e) { throw e; // Pass through our own errors. } catch (IOException e) { throw RetrofitError.networkError(url, e); } catch (Throwable t) { throw RetrofitError.unexpectedError(url, t); } }
/* * The endpoint's executor executes this method to convert the raw bytes * into a message, look for an associated exchange and forward it to * the stack of layers. */ private void receiveMessage(RawData raw) { DataParser parser = new DataParser(raw.getBytes()); if (parser.isRequest()) { // This is a request Request request; try { request = parser.parseRequest(); } catch (IllegalStateException e) { StringBuffer log = new StringBuffer("message format error caused by ") .append(raw.getInetSocketAddress()); if (!parser.isReply()) { // manually build RST from raw information EmptyMessage rst = new EmptyMessage(Type.RST); rst.setDestination(raw.getAddress()); rst.setDestinationPort(raw.getPort()); rst.setMID(parser.getMID()); for (MessageInterceptor interceptor : interceptors) interceptor.sendEmptyMessage(rst); connector.send(serializer.serialize(rst)); log.append(" and reset"); } if (LOGGER.isLoggable(Level.INFO)) { LOGGER.info(log.toString()); } return; } request.setSource(raw.getAddress()); request.setSourcePort(raw.getPort()); request.setSenderIdentity(raw.getSenderIdentity()); /* * Logging here causes significant performance loss. * If necessary, add an interceptor that logs the messages, * e.g., the MessageTracer. */ for (MessageInterceptor interceptor : interceptors) interceptor.receiveRequest(request); // MessageInterceptor might have canceled if (!request.isCanceled()) { Exchange exchange = matcher.receiveRequest(request); if (exchange != null) { exchange.setEndpoint(CoAPEndpoint.this); coapstack.receiveRequest(exchange, request); } } } else if (parser.isResponse()) { // This is a response Response response = parser.parseResponse(); response.setSource(raw.getAddress()); response.setSourcePort(raw.getPort()); /* * Logging here causes significant performance loss. * If necessary, add an interceptor that logs the messages, * e.g., the MessageTracer. */ for (MessageInterceptor interceptor : interceptors) interceptor.receiveResponse(response); // MessageInterceptor might have canceled if (!response.isCanceled()) { Exchange exchange = matcher.receiveResponse(response); if (exchange != null) { exchange.setEndpoint(CoAPEndpoint.this); response.setRTT(System.currentTimeMillis() - exchange.getTimestamp()); coapstack.receiveResponse(exchange, response); } else if (response.getType() != Type.ACK) { LOGGER.fine("Rejecting unmatchable response from " + raw.getInetSocketAddress()); reject(response); } } } else if (parser.isEmpty()) { // This is an empty message EmptyMessage message = parser.parseEmptyMessage(); message.setSource(raw.getAddress()); message.setSourcePort(raw.getPort()); /* * Logging here causes significant performance loss. * If necessary, add an interceptor that logs the messages, * e.g., the MessageTracer. */ for (MessageInterceptor interceptor : interceptors) interceptor.receiveEmptyMessage(message); // MessageInterceptor might have canceled if (!message.isCanceled()) { // CoAP Ping if (message.getType() == Type.CON || message.getType() == Type.NON) { LOGGER.info("Responding to ping by " + raw.getInetSocketAddress()); reject(message); } else { Exchange exchange = matcher.receiveEmptyMessage(message); if (exchange != null) { exchange.setEndpoint(CoAPEndpoint.this); coapstack.receiveEmptyMessage(exchange, message); } } } } else { LOGGER.finest("Silently ignoring non-CoAP message from " + raw.getInetSocketAddress()); } }
/** Loads all the extensions. */ protected List<ExtensionComponent<T>> load() { if (LOGGER.isLoggable(Level.FINE)) LOGGER.log(Level.FINE, "Loading ExtensionList: " + extensionType); return hudson.getPluginManager().getPluginStrategy().findComponents(extensionType, hudson); }