/** * Try to establish a connection to server with id sid. * * @param sid server id */ synchronized void connectOne(long sid) { if (senderWorkerMap.get(sid) == null) { InetSocketAddress electionAddr; if (self.quorumPeers.containsKey(sid)) { electionAddr = self.quorumPeers.get(sid).electionAddr; } else { LOG.warn("Invalid server id: " + sid); return; } try { if (LOG.isDebugEnabled()) { LOG.debug("Opening channel to server " + sid); } Socket sock = new Socket(); setSockOpts(sock); sock.connect(self.getView().get(sid).electionAddr, cnxTO); if (LOG.isDebugEnabled()) { LOG.debug("Connected to server " + sid); } initiateConnection(sock, sid); } catch (UnresolvedAddressException e) { // Sun doesn't include the address that causes this // exception to be thrown, also UAE cannot be wrapped cleanly // so we log the exception in order to capture this critical // detail. LOG.warn("Cannot open channel to " + sid + " at election address " + electionAddr, e); throw e; } catch (IOException e) { LOG.warn("Cannot open channel to " + sid + " at election address " + electionAddr, e); } } else { LOG.debug("There is a connection already for server " + sid); } }
/** * Return an ImageRecord containing the images pixel dimensions. * * @param file absolute file path to image * @return ImageRecord containing the images pixel dimensions */ public static ImageRecord getImageDimensions(final String file) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Getting image dimensions from: {}", file); } final ImageRecord dim = new ImageRecord(file); final Opener o = new Opener(); final ImagePlus imp = o.openImage(file); if (imp == null) { return null; } ImageProcessor ip = imp.getProcessor(); final int width = ip.getWidth(); final int height = ip.getHeight(); if (LOGGER.isDebugEnabled()) { LOGGER.debug( "{} (width: {} | height: {})", file, Integer.toString(width), Integer.toString(height)); } dim.setWidth(width); dim.setHeight(height); ip = null; return dim; }
@Override public Connection<CL> createConnection() { try { Connection<CL> connection = connFactory.createConnection((HostConnectionPool<CL>) pool, null); connection.open(); availableConnections.add(connection); monitor.incConnectionCreated(host); numActiveConnections.incrementAndGet(); return connection; } catch (DynoConnectException e) { if (Logger.isDebugEnabled()) { if (monitor.getConnectionCreateFailedCount() % 10000 == 0) { Logger.error("Failed to create connection", e); } } monitor.incConnectionCreateFailed(host, e); throw e; } catch (RuntimeException e) { if (Logger.isDebugEnabled()) { if (monitor.getConnectionCreateFailedCount() % 10000 == 0) { Logger.error("Failed to create connection", e); } } monitor.incConnectionCreateFailed(host, e); throw new DynoConnectException(e); } }
private void processReads(Context cx) { if (!readStarted) { return; } int read; do { try { read = clientChannel.read(readBuffer); } catch (IOException ioe) { if (log.isDebugEnabled()) { log.debug("Error reading from channel: {}", ioe, ioe); } read = -1; } if (log.isDebugEnabled()) { log.debug("Read {} bytes from {} into {}", read, clientChannel, readBuffer); } if (read > 0) { readBuffer.flip(); Buffer.BufferImpl buf = Buffer.BufferImpl.newBuffer(cx, this, readBuffer, true); readBuffer.clear(); if (onRead != null) { onRead.call(cx, onRead, this, new Object[] {buf, 0, read}); } } else if (read < 0) { setErrno(Constants.EOF); removeInterest(SelectionKey.OP_READ); if (onRead != null) { onRead.call(cx, onRead, this, new Object[] {null, 0, 0}); } } } while (readStarted && (read > 0)); }
/** * Performs a number of checks to ensure response saneness according to the rules of RFC2616: * * <ol> * <li>If the response code is {@link javax.servlet.http.HttpServletResponse#SC_NO_CONTENT} then * it is illegal for the body to contain anything. See * http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.5 * <li>If the response code is {@link javax.servlet.http.HttpServletResponse#SC_NOT_MODIFIED} * then it is illegal for the body to contain anything. See * http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5 * </ol> * * @param request the client HTTP request * @param responseStatus the responseStatus * @return true if the response should be 0, even if it is isn't. */ public static boolean shouldBodyBeZero(HttpServletRequest request, int responseStatus) { // Check for NO_CONTENT if (responseStatus == HttpServletResponse.SC_NO_CONTENT) { if (LOG.isDebugEnabled()) { LOG.debug( "{} resulted in a {} response. Removing message body in accordance with RFC2616.", request.getRequestURL(), HttpServletResponse.SC_NO_CONTENT); } return true; } // Check for NOT_MODIFIED if (responseStatus == HttpServletResponse.SC_NOT_MODIFIED) { if (LOG.isDebugEnabled()) { LOG.debug( "{} resulted in a {} response. Removing message body in accordance with RFC2616.", request.getRequestURL(), HttpServletResponse.SC_NOT_MODIFIED); } return true; } return false; }
/** {@inheritDoc} */ @Override public ImmutableSetMultimap<String, String> getPermissions( List<Authorizable> authorizables, List<String> groups) { Roles roles = rolesReference.get(); String database = null; Boolean isURI = false; for (Authorizable authorizable : authorizables) { if (authorizable instanceof Database) { database = authorizable.getName(); } if (authorizable instanceof AccessURI) { isURI = true; } } if (LOGGER.isDebugEnabled()) { LOGGER.debug("Getting permissions for {} via {}", groups, database); } ImmutableSetMultimap.Builder<String, String> resultBuilder = ImmutableSetMultimap.builder(); for (String group : groups) { resultBuilder.putAll(group, roles.getRoles(database, group, isURI)); } ImmutableSetMultimap<String, String> result = resultBuilder.build(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("result = " + result); } return result; }
private void doClose(Context cx, Function callback) { super.close(); try { ScriptRunner runner = getRunner(); if (clientChannel != null) { if (log.isDebugEnabled()) { log.debug("Closing client channel {}", clientChannel); } clientChannel.close(); runner.unregisterCloseable(clientChannel); } if (svrChannel != null) { if (log.isDebugEnabled()) { log.debug("Closing server channel {}", svrChannel); } svrChannel.close(); runner.unregisterCloseable(svrChannel); } if (callback != null) { runner.enqueueCallback(callback, this, null, runner.getDomain(), new Object[] {}); } } catch (IOException ioe) { log.debug("Uncaught exception in channel close: {}", ioe); setErrno(Constants.EIO); } }
/** * 创建短消息 * * @param pms 短消息内容 * @param savetosentbox 设置短消息是否在发件箱保留(0为不保留, 1为保留) */ public void createPrivateMessage(Pms pms, int savetosentbox) { if (pms.getFolder() != 0) { pms.setMsgfrom(pms.getMsgto()); } else { pmsDAO .createQuery( "update Users set newpmcount=abs(newpmcount*1)+1,newpm=1 where uid=?", pms.getUsersByMsgtoid().getUid()) .executeUpdate(); } pmsDAO.save(pms); if (logger.isDebugEnabled()) { logger.debug("创建短消息 {} 成功", pms.getPmid()); } if (savetosentbox == 1 && pms.getFolder() == 0) { // 保留在发件箱 Pms pm = new Pms(); pm.setFolder(1); pm.setMessage(pms.getMessage()); pm.setMsgfrom(pms.getMsgfrom()); pm.setMsgto(pms.getMsgto()); pm.setNew_(pms.getNew_()); pm.setPostdatetime(pms.getPostdatetime()); pm.setSubject(pms.getSubject()); pm.setUsersByMsgfromid(pms.getUsersByMsgfromid()); pm.setUsersByMsgtoid(pms.getUsersByMsgtoid()); pmsDAO.save(pm); if (logger.isDebugEnabled()) { logger.debug("保留短消息 {} 到发件箱成功", pm.getPmid()); } } }
/** * @see * uk.ac.cam.caret.sakai.rwiki.service.api.api.dao.RWikiHistoryObjectDao#getRWikiHistoryObject(uk.ac.cam.caret.sakai.rwiki.service.api.api.model.RWikiObject, * int) */ public RWikiHistoryObject getRWikiHistoryObject(final RWikiObject rwo, final int revision) { long start = System.currentTimeMillis(); try { HibernateCallback callback = new HibernateCallback() { public Object doInHibernate(Session session) throws HibernateException { return session .createCriteria(RWikiHistoryObject.class) .add(Expression.eq("rwikiobjectid", rwo.getRwikiobjectid())) .add(Expression.eq("revision", Integer.valueOf(revision))) .list(); } }; List found = (List) getHibernateTemplate().execute(callback); if (found.size() == 0) { if (log.isDebugEnabled()) { log.debug("Found " + found.size() + " objects with id " + rwo.getRwikiobjectid()); } return null; } if (log.isDebugEnabled()) { log.debug( "Found " + found.size() + " objects with id " + rwo.getRwikiobjectid() + " returning most recent one."); } return (RWikiHistoryObject) proxyObject(found.get(0)); } finally { long finish = System.currentTimeMillis(); TimeLogger.printTimer( "RWikiHistoryObjectDaoImpl.getRWikiHistoryObject: " + rwo.getName(), start, finish); } }
private net.sf.ehcache.CacheManager ensureCacheManager() { try { if (this.manager == null) { if (log.isDebugEnabled()) { log.debug("cacheManager property not set. Constructing CacheManager instance... "); } // using the CacheManager constructor, the resulting instance is _not_ a VM singleton // (as would be the case by calling CacheManager.getInstance(). We do not use the // getInstance here // because we need to know if we need to destroy the CacheManager instance - using the // static call, // we don't know which component is responsible for shutting it down. By using a single // EhCacheManager, // it will always know to shut down the instance if it was responsible for creating it. if (shared) { this.manager = net.sf.ehcache.CacheManager.create(getCacheManagerConfigFileInputStream()); } else { this.manager = new net.sf.ehcache.CacheManager(getCacheManagerConfigFileInputStream()); } if (log.isTraceEnabled()) { log.trace("instantiated Ehcache CacheManager instance."); } cacheManagerImplicitlyCreated = true; if (log.isDebugEnabled()) { log.debug("implicit cacheManager created successfully."); } } return this.manager; } catch (Exception e) { throw new CacheException(e); } }
public static String buildTurnitinURL( String apiURL, Map<String, Object> parameters, String secretKey) { if (!parameters.containsKey("fid")) { throw new IllegalArgumentException("You must to include a fid in the parameters"); } StringBuilder apiDebugSB = new StringBuilder(); if (apiTraceLog.isDebugEnabled()) { apiDebugSB.append("Starting URL TII Construction:\n"); } parameters.put("gmtime", getGMTime()); List<String> sortedkeys = new ArrayList<String>(); sortedkeys.addAll(parameters.keySet()); String md5 = buildTurnitinMD5(parameters, secretKey, sortedkeys); StringBuilder sb = new StringBuilder(); sb.append(apiURL); if (apiTraceLog.isDebugEnabled()) { apiDebugSB.append("The TII Base URL is:\n"); apiDebugSB.append(apiURL); } sb.append(sortedkeys.get(0)); sb.append("="); sb.append(parameters.get(sortedkeys.get(0))); if (apiTraceLog.isDebugEnabled()) { apiDebugSB.append(sortedkeys.get(0)); apiDebugSB.append("="); apiDebugSB.append(parameters.get(sortedkeys.get(0))); apiDebugSB.append("\n"); } for (int i = 1; i < sortedkeys.size(); i++) { sb.append("&"); sb.append(sortedkeys.get(i)); sb.append("="); sb.append(parameters.get(sortedkeys.get(i))); if (apiTraceLog.isDebugEnabled()) { apiDebugSB.append(sortedkeys.get(i)); apiDebugSB.append(" = "); apiDebugSB.append(parameters.get(sortedkeys.get(i))); apiDebugSB.append("\n"); } } sb.append("&"); sb.append("md5="); sb.append(md5); if (apiTraceLog.isDebugEnabled()) { apiDebugSB.append("md5 = "); apiDebugSB.append(md5); apiDebugSB.append("\n"); apiTraceLog.debug(apiDebugSB.toString()); } return sb.toString(); }
private void createTables() throws SQLException, BlockStoreException { Statement s = conn.get().createStatement(); if (log.isDebugEnabled()) log.debug("PostgresFullPrunedBlockStore : CREATE headers table"); s.executeUpdate(CREATE_HEADERS_TABLE); if (log.isDebugEnabled()) log.debug("PostgresFullPrunedBlockStore : CREATE settings table"); s.executeUpdate(CREATE_SETTINGS_TABLE); if (log.isDebugEnabled()) log.debug("PostgresFullPrunedBlockStore : CREATE undoable block table"); s.executeUpdate(CREATE_UNDOABLE_TABLE); if (log.isDebugEnabled()) log.debug("PostgresFullPrunedBlockStore : CREATE undoable block index"); s.executeUpdate(CREATE_UNDOABLE_TABLE_INDEX); if (log.isDebugEnabled()) log.debug("PostgresFullPrunedBlockStore : CREATE open output table"); s.executeUpdate(CREATE_OPEN_OUTPUT_TABLE); // Create indexes.. s.executeUpdate(CREATE_HEADERS_HASH_INDEX); s.executeUpdate(CREATE_OUTPUT_ADDRESS_TYPE_INDEX); s.executeUpdate(CREATE_OUTPUTS_ADDRESS_INDEX); s.executeUpdate(CREATE_OUTPUTS_HASH_INDEX); s.executeUpdate(CREATE_OUTPUTS_HASH_INDEX_INDEX); s.executeUpdate(CREATE_UNDOABLE_HASH_INDEX); s.executeUpdate("INSERT INTO settings(name, value) VALUES('" + CHAIN_HEAD_SETTING + "', NULL)"); s.executeUpdate( "INSERT INTO settings(name, value) VALUES('" + VERIFIED_CHAIN_HEAD_SETTING + "', NULL)"); s.executeUpdate("INSERT INTO settings(name, value) VALUES('" + VERSION_SETTING + "', '03')"); s.close(); createNewStore(params); }
/** * Writes the given body to Netty channel. Will <b>not</b >wait until the body has been written. * * @param log logger to use * @param channel the Netty channel * @param remoteAddress the remote address when using UDP * @param body the body to write (send) * @param exchange the exchange * @param listener listener with work to be executed when the operation is complete */ public static void writeBodyAsync( Logger log, Channel channel, SocketAddress remoteAddress, Object body, Exchange exchange, ChannelFutureListener listener) { ChannelFuture future; if (remoteAddress != null) { if (log.isDebugEnabled()) { log.debug( "Channel: {} remote address: {} writing body: {}", new Object[] {channel, remoteAddress, body}); } future = channel.write(body, remoteAddress); } else { if (log.isDebugEnabled()) { log.debug("Channel: {} writing body: {}", new Object[] {channel, body}); } future = channel.write(body); } if (listener != null) { future.addListener(listener); } }
private Method getMethodObject(Class c) throws Throwable { int found = 0; Method result = null; Method[] methods = c.getDeclaredMethods(); for (int i = 0; i < methods.length; i++) { Method method = methods[i]; if (LOG.isDebugEnabled()) { LOG.debug( "Checking reflected method name '" + method.getName() + "' vs. supplied method name '" + getMethod() + "'"); } if (method.getName().equals(getMethod()) && isEquivalent(toClassArray(args), method.getParameterTypes())) { if (result != null) { found++; } result = method; if (LOG.isDebugEnabled()) { LOG.debug("Matched method '" + method + "'"); } } } if (found > 1) { LOG.warn("" + found + " method signatures matched specified method!"); } return result; }
private void processKey(SelectionKey key) { try { if (key.isReadable()) { final DatagramChannel socketChannel = (DatagramChannel) key.channel(); reader.readAll(socketChannel); } if (key.isWritable()) { final DatagramChannel socketChannel = (DatagramChannel) key.channel(); try { int bytesJustWritten = writer.writeAll(socketChannel); contemplateThrottleWrites(bytesJustWritten); } catch (NotYetConnectedException e) { if (LOG.isDebugEnabled()) LOG.debug("", e); serverConnector.connectLater(); } catch (IOException e) { if (LOG.isDebugEnabled()) LOG.debug("", e); serverConnector.connectLater(); } } } catch (Exception e) { LOG.error("", e); if (!isClosed) closeEarlyAndQuietly(key.channel()); } }
@Around("loggingPointcut()") public Object logAround(ProceedingJoinPoint joinPoint) throws Throwable { if (log.isDebugEnabled()) { log.debug( "Enter: {}.{}() with argument[s] = {}", joinPoint.getSignature().getDeclaringTypeName(), joinPoint.getSignature().getName(), Arrays.toString(joinPoint.getArgs())); } try { Object result = joinPoint.proceed(); if (log.isDebugEnabled()) { log.debug( "Exit: {}.{}() with result = {}", joinPoint.getSignature().getDeclaringTypeName(), joinPoint.getSignature().getName(), result); } return result; } catch (IllegalArgumentException e) { log.error( "Illegal argument: {} in {}.{}()", Arrays.toString(joinPoint.getArgs()), joinPoint.getSignature().getDeclaringTypeName(), joinPoint.getSignature().getName()); throw e; } }
private DatagramChannel connectClient() throws IOException { final DatagramChannel client = address.isMulticastAddress() ? DatagramChannel.open(address.getAddress().length == 4 ? INET : INET6) : DatagramChannel.open(); final InetSocketAddress hostAddress = new InetSocketAddress(port); client.configureBlocking(false); if (address.isMulticastAddress()) { client.setOption(SO_REUSEADDR, true); client.bind(hostAddress); if (networkInterface != null) { // This is probably not needed, because client socket doesn't send datagrams, // but since EVERYBODY on the internet configures this for any channels, and // I don't see any harm this config could make, I leave it here client.setOption(IP_MULTICAST_IF, networkInterface); client.join(address, networkInterface); } else { client.join(address, NetworkInterface.getByInetAddress(hostAddress.getAddress())); } if (LOG.isDebugEnabled()) LOG.debug("Connecting via multicast, group=" + address); } else { client.bind(hostAddress); } if (LOG.isDebugEnabled()) LOG.debug("Listening on port " + port); closeables.add(client); return client; }
private void collect(IClassInfo ci, Set<Class> collector) { if (ci.hasAnnotation(IgnoreBean.class)) { if (LOG.isDebugEnabled()) { LOG.debug( "Skipping bean candidate '{}' because it is annotated with '{}'.", ci.name(), IgnoreBean.class.getSimpleName()); } return; } if (!ci.isInstanciable()) { if (LOG.isDebugEnabled()) { LOG.debug("Skipping bean candidate '{}' because it is not instanciable.", ci.name()); } return; } if (!ci.hasNoArgsConstructor()) { if (LOG.isDebugEnabled()) { LOG.debug("Skipping bean candidate '{}' because it has no empty constructor().", ci.name()); } return; } try { collector.add(ci.resolveClass()); } catch (Exception ex) { LOG.warn("Could not resolve class [{}]", ci.name(), ex); } }
public void concurrentModificationCheck( AuditConfiguration auditConfiguration, Session session, SortedSet<AuditLogicalGroup> auditLogicalGroups, AuditTransaction auditTransaction, Long loadAuditTransactionId) { if (loadAuditTransactionId != null) { for (AuditLogicalGroup storedAuditLogicalGroup : auditLogicalGroups) { if (log.isDebugEnabled()) { log.debug("lock AuditLogicalGroup with id:" + storedAuditLogicalGroup.getId()); } // session.lock(storedAuditLogicalGroup, LockMode.UPGRADE); // not the audit logical group is not immutable session.refresh(storedAuditLogicalGroup, LockMode.UPGRADE); } try { concurrentModificationCheck( auditConfiguration, session, auditTransaction, loadAuditTransactionId); } catch (ConcurrentModificationException ce) { if (log.isDebugEnabled()) { log.debug("Detected ConcurrentModificationException, will rethrow.", ce); } throw ce; } } }
@Override public Future<List<Service>> searchServices(Service filter, IIdentity node) throws ServiceDiscoveryException { if (logger.isDebugEnabled()) logger.debug("Searching repository for a given service, on node: " + node.getJid()); List<Service> result = new ArrayList<Service>(); try { String myLocalJid = getCommMngr().getIdManager().getThisNetworkNode().getJid(); if (myLocalJid.equals(node.getJid())) { if (logger.isDebugEnabled()) logger.debug("It's the local node, so we do a local call"); return searchServices(filter); } if (logger.isDebugEnabled()) logger.debug("Trying to query the remote node..."); ServiceDiscoveryRemoteClient callback = new ServiceDiscoveryRemoteClient(); getServiceDiscoveryRemote().searchService(filter, node, callback); result = callback.getResultList(); } catch (Exception ex) { ex.printStackTrace(); logger.error("Exception while searching for services!"); throw new ServiceDiscoveryException("Exception while searching for services!", ex); } return new AsyncResult<List<Service>>(result); }
private void processConnect(Context cx) { try { removeInterest(SelectionKey.OP_CONNECT); addInterest(SelectionKey.OP_WRITE); clientChannel.finishConnect(); if (log.isDebugEnabled()) { log.debug("Client {} connected", clientChannel); } sendOnConnectComplete(cx, 0, true, true); } catch (ConnectException ce) { if (log.isDebugEnabled()) { log.debug("Error completing connect: {}", ce); } setErrno(Constants.ECONNREFUSED); sendOnConnectComplete(cx, Constants.ECONNREFUSED, false, false); } catch (IOException ioe) { if (log.isDebugEnabled()) { log.debug("Error completing connect: {}", ioe); } setErrno(Constants.EIO); sendOnConnectComplete(cx, Constants.EIO, false, false); } }
@Override public boolean loadLibrary(String libname, boolean ignoreError, ClassLoader cl) { try { for (Entry<String, String> nativeEntry : platformNativeIndex.entrySet()) { if (nativeEntry.getKey().contains(libname)) { if (log.isDebugEnabled()) { log.debug( "Loading mapped entry: [{}] [{}] [{}]", libname, nativeEntry.getKey(), nativeEntry.getValue()); } File nativeLibCopy = extractJarEntry( nativeEntry.getValue(), nativeEntry.getKey(), System.getProperty(JAVA_TMP_DIR), String.format("%s.jni", libname)); System.load(nativeLibCopy.getAbsolutePath()); return true; } } } catch (Exception e) { log.error("Unable to load native library [{}] - {}", libname, e); } if (log.isDebugEnabled()) { log.debug("No mapped library match for [{}]", libname); } return false; }
private void exportEntitlements(File baseDir, Consumer consumer) throws IOException, ExportCreationException { File entCertDir = new File(baseDir.getCanonicalPath(), "entitlements"); entCertDir.mkdir(); for (Entitlement ent : entitlementCurator.listByConsumer(consumer)) { if (ent.isDirty()) { log.error("Entitlement " + ent.getId() + " is marked as dirty."); throw new ExportCreationException("Attempted to export dirty entitlements"); } if (!this.exportRules.canExport(ent)) { if (log.isDebugEnabled()) { log.debug( "Skipping export of entitlement with product: {}", ent.getPool().getProductId()); } continue; } if (log.isDebugEnabled()) { log.debug("Exporting entitlement for product" + ent.getPool().getProductId()); } FileWriter writer = null; try { File file = new File(entCertDir.getCanonicalPath(), ent.getId() + ".json"); writer = new FileWriter(file); entExporter.export(mapper, writer, ent); } finally { if (writer != null) { writer.close(); } } } }
@SuppressWarnings("unchecked") private void ifMatchConditionalProcessing(HttpServletRequest request, XcapResourceImpl resource) throws XcapException { Enumeration ifMatchEnum = request.getHeaders(Constants.IF_MATCH); String currentEtag = getEtag(resource); resource.setPreviousEtag(currentEtag); if (ifMatchEnum != null && ifMatchEnum.hasMoreElements()) { while (ifMatchEnum.hasMoreElements()) { String element = (String) ifMatchEnum.nextElement(); String[] matchEtags = element.split(","); for (int i = 0; i < matchEtags.length; i++) { if (Constants.WILCARD.equals(matchEtags[i].trim())) { if (resource.isAllDocument() && resource.isCreation()) { throw new XcapException( "Conditional processing failed: " + "If-match: * and new document creation", HttpServletResponse.SC_PRECONDITION_FAILED); } else if (_log.isDebugEnabled()) _log.debug("wilcard entity tags has matched"); } else if (currentEtag.equals(matchEtags[i].trim())) { if (_log.isDebugEnabled()) _log.debug("entity tag has matched"); return; } } } throw new XcapException( "Conditional processing failed: " + "If-match: present and none match", HttpServletResponse.SC_PRECONDITION_FAILED); } }
@Override public HttpFuture send(boolean lastChunk) { calculateKeepAlive(lastChunk); if (log.isDebugEnabled()) { log.debug("send: sending HTTP response {}", response); } ChannelFuture future = channel.write(response); if (data != null) { if (log.isDebugEnabled()) { log.debug("send: Sending HTTP chunk with data {}", data); } DefaultHttpContent chunk = new DefaultHttpContent(NettyServer.copyBuffer(data)); future = channel.write(chunk); } if (lastChunk) { future = sendLastChunk(); } channel.flush(); if (lastChunk && !keepAlive) { shutDown(); } return new NettyHttpFuture(future); }
/** * Deletes all resources created by tests, after all tests have been run. * * <p>This cleanup method will always be run, even if one or more tests fail. For this reason, it * attempts to remove all resources created at any point during testing, even if some of those * resources may be expected to be deleted by certain tests. */ @AfterClass(alwaysRun = true) public void cleanUp() { String noTest = System.getProperty("noTestCleanup"); if (Boolean.TRUE.toString().equalsIgnoreCase(noTest)) { if (logger.isDebugEnabled()) { logger.debug("Skipping Cleanup phase ..."); } return; } if (logger.isDebugEnabled()) { logger.debug("Cleaning up temporary resources created for testing ..."); } IntakeClient intakeClient = new IntakeClient(); // Note: Any non-success responses are ignored and not reported. for (String resourceId : intakeIdsCreated) { ClientResponse<Response> res = intakeClient.delete(resourceId); res.releaseConnection(); } // Delete persons before PersonAuth OrgAuthorityClient personAuthClient = new OrgAuthorityClient(); for (String resourceId : orgIdsCreated) { ClientResponse<Response> res = personAuthClient.deleteItem(orgAuthCSID, resourceId); res.releaseConnection(); } if (orgAuthCSID != null) { personAuthClient.delete(orgAuthCSID).releaseConnection(); } }
public void testPoolShrink() throws Exception { if (log.isDebugEnabled()) { log.debug("*** Starting testPoolShrink"); } Field poolField = pds.getClass().getDeclaredField("pool"); poolField.setAccessible(true); XAPool pool = (XAPool) poolField.get(pds); assertEquals(1, pool.inPoolSize()); assertEquals(1, pool.totalPoolSize()); Connection c1 = pds.getConnection(); assertEquals(0, pool.inPoolSize()); assertEquals(1, pool.totalPoolSize()); Connection c2 = pds.getConnection(); assertEquals(0, pool.inPoolSize()); assertEquals(2, pool.totalPoolSize()); c1.close(); c2.close(); Thread.sleep(1100); // leave enough time for the idle connections to expire TransactionManagerServices.getTaskScheduler().interrupt(); // wake up the task scheduler Thread.sleep(1200); // leave enough time for the scheduled shrinking task to do its work if (log.isDebugEnabled()) { log.debug("*** checking pool sizes"); } assertEquals(1, pool.inPoolSize()); assertEquals(1, pool.totalPoolSize()); }
/** * {@inheritDoc} * * @see org.apache.http.client.ResponseHandler#handleResponse(org.apache.http.HttpResponse) */ @Override public final ApiResponse<V> handleResponse(final HttpResponse response) throws ClientProtocolException, IOException { if (LOGGER.isDebugEnabled()) { LOGGER.debug("handleResponse.start"); } InputStream inputStream = response.getEntity().getContent(); final ApiResponse<V> apiResponse = handleJsonResponse(inputStream); inputStream.close(); try { KeoUtils.checkResponse(apiResponse); } catch (final JsonParseException e) { throw new IOException("Unable to parse the json response received from Keolis:\n" + e); } if (LOGGER.isDebugEnabled()) { LOGGER.debug("handleResponse.end"); } return apiResponse; }
/** * Deletes all resources created by tests, after all tests have been run. * * <p>This cleanup method will always be run, even if one or more tests fail. For this reason, it * attempts to remove all resources created at any point during testing, even if some of those * resources may be expected to be deleted by certain tests. * * @throws Exception */ @AfterClass(alwaysRun = true) public void cleanUp() throws Exception { String noTest = System.getProperty("noTestCleanup"); if (Boolean.TRUE.toString().equalsIgnoreCase(noTest)) { if (logger.isDebugEnabled()) { logger.debug("Skipping Cleanup phase ..."); } return; } if (logger.isDebugEnabled()) { logger.debug("Cleaning up temporary resources created for testing ..."); } PersonAuthorityClient personAuthClient = new PersonAuthorityClient(); // Delete Person resource(s) (before PersonAuthority resources). for (String resourceId : personIdsCreated) { // Note: Any non-success responses are ignored and not reported. personAuthClient.deleteItem(personAuthCSID, resourceId).close(); } // Delete PersonAuthority resource(s). // Note: Any non-success response is ignored and not reported. if (personAuthCSID != null) { personAuthClient.delete(personAuthCSID); // Delete Condition Checks resource(s). ConditioncheckClient conditioncheckClient = new ConditioncheckClient(); for (String resourceId : conditioncheckIdsCreated) { // Note: Any non-success responses are ignored and not reported. conditioncheckClient.delete(resourceId).close(); } } }
/** * The ANSWER_DATA_REQUEST state is entered when a BACnet Data Expecting Reply, a Test_Request, or * a proprietary frame that expects a reply is received. */ private void answerDataRequest() { synchronized (this) { if (replyFrame != null) { // Reply // debug("answerDataRequest:Reply with " + replyFrame); if (LOG.isDebugEnabled()) LOG.debug(thisStation + " answerDataRequest:Reply"); sendFrame(replyFrame); replyFrame = null; state = MasterNodeState.idle; activity = true; } else { long now = timeSource.currentTimeMillis(); if (replyDeadline < now) { // DeferredReply // debug("answerDataRequest:DeferredReply to " + // frameToReply.getSourceAddress()); if (LOG.isDebugEnabled()) LOG.debug(thisStation + " answerDataRequest:DeferredReply"); sendFrame(FrameType.replyPostponed, frame.getSourceAddress()); state = MasterNodeState.idle; activity = true; } else { // If the current time of the host was moved back, the above condition could cause an // indefinite // wait. So, we check if the reply deadline is too long, and correct if so. long timeDiff = replyDeadline - now; if (timeDiff > Constants.REPLY_DELAY) { LOG.warn("Correcting replyDeadline time because of timeDiff of " + timeDiff); replyDeadline = now + Constants.REPLY_DELAY; } } } } }