@Test @Ignore public void testSlopPushers() throws Exception { Set<Integer> failedNodes = getFailedNodes(); Multimap<Integer, ByteArray> failedKeys = populateStore(failedNodes); reviveNodes(failedNodes); for (int i = 0; i < 5; i++) { for (StreamingSlopPusherJob job : slopPusherJobs) { if (logger.isTraceEnabled()) logger.trace("Started slop pusher job " + job); job.run(); if (logger.isTraceEnabled()) logger.trace("Finished slop pusher job " + job); } } for (Map.Entry<Integer, ByteArray> entry : failedKeys.entries()) { List<Versioned<byte[]>> values = store.get(entry.getValue(), null); assertTrue( "slop entry should be pushed for " + entry.getValue() + ", preflist " + keysToNodes.get(entry.getValue()), values.size() > 0); assertEquals( "slop entry should be correct for " + entry.getValue(), keyValues.get(entry.getValue()), new ByteArray(values.get(0).getValue())); } }
/** * Get representation files and writes them in the local disk * * @param representation * @return * @throws IOException * @throws DownloaderException */ protected LocalRepresentationObject downloadRepresentationToLocalDisk( RepresentationObject representation) throws IOException, DownloaderException { File tempDirectory = TempDir.createUniqueDirectory("rodaSourceRep"); logger.debug("Saving representation to " + tempDirectory); LocalRepresentationObject localRepresentation = new LocalRepresentationObject(tempDirectory, representation); RepresentationFile rootRepFile = representation.getRootFile(); File rootFile = this.rodaDownloader.saveTo(representation.getPid(), rootRepFile.getId(), tempDirectory); localRepresentation.getRootFile().setAccessURL(rootFile.toURI().toURL().toString()); logger.trace("File " + rootRepFile.getId() + " saved to " + rootFile); for (RepresentationFile partRepFile : localRepresentation.getPartFiles()) { File partFile = this.rodaDownloader.saveTo( localRepresentation.getPid(), partRepFile.getId(), tempDirectory); partRepFile.setAccessURL(partFile.toURI().toURL().toString()); logger.trace("File " + partRepFile.getId() + " saved to " + partFile); } return localRepresentation; }
// TODO: This a guess!!...untested!! public byte[] read() { try { serialConnection.readStaleData(); long start = currentTimeMillis(); while (currentTimeMillis() - start <= sendTimeout) { if (serialConnection.available() > 10) { byte[] bytes = serialConnection.readAvailable(); LOGGER.trace("AEM UEGO input: " + asHex(bytes)); int startIndex = findStart(bytes); LOGGER.trace("AEM UEGO start index: " + startIndex); if (startIndex < 0 || startIndex >= bytes.length) continue; List<Byte> buffer = new ArrayList<Byte>(); for (int i = startIndex; i < bytes.length; i++) { byte b = bytes[i]; if (b == (byte) 0x0D) { byte[] response = toArray(buffer); LOGGER.trace("AEM UEGO Response: " + asHex(response)); return response; } else { buffer.add(b); } } } sleep(1); } LOGGER.warn("AEM UEGO Response [read timeout]"); return new byte[0]; } catch (Exception e) { close(); throw new SerialCommunicationException(e); } }
@AroundInvoke public Object checkArguments(InvocationContext ctx) throws Exception { try { log = Logger.getLogger(LogoutInterceptor.class); Object[] args = ctx.getParameters(); String className = ctx.getTarget().getClass().getSimpleName(); log.trace("Class name: " + className); String methodName = ctx.getMethod().getName(); log.trace("Method: " + methodName); String sessionId = (String) args[0]; if ((sessionId == null) || (sessionId.length() == 0)) { throw new Exception("sessionId should not be null"); } cf = (QueueConnectionFactory) new InitialContext().lookup(QueueNames.CONNECTION_FACTORY); queue = (Queue) new InitialContext().lookup(QueueNames.LOGOUT_QUEUE); log.trace("Queue logout: " + queue.getQueueName()); QueueConnection connection = cf.createQueueConnection(); QueueSession session = connection.createQueueSession(false, Session.AUTO_ACKNOWLEDGE); QueueSender sender = session.createSender(queue); Message logoutMessage = session.createTextMessage(sessionId); Timestamp time = new Timestamp(new Date().getTime()); // Messages will not accept timestamp property- must change to string logoutMessage.setStringProperty(PropertyNames.TIME, time.toString()); sender.send(logoutMessage); session.close(); } catch (Exception e) { log.fatal("Error in LogoutInterceptor", e); } return ctx.proceed(); }
@Override public boolean remove(Object obj) { AbstractTransaction txn = (AbstractTransaction) obj; boolean retval; if (trace.val) LOG.trace(String.format("Partition %d :: Attempting to acquire lock", this.partitionId)); this.lock.lock(); try { // We have to check whether we are the first txn in the queue, // because we will need to reset the blockTimestamp after // delete ourselves so that the next guy can get executed // This is not thread-safe... boolean reset = txn.equals(super.peek()); retval = super.remove(txn); if (debug.val) { LOG.debug(String.format("Partition %d :: remove(%s) -> %s", this.partitionId, txn, retval)); // Sanity Check assert (super.contains(txn) == false) : "Failed to remove " + txn + "???\n" + this.debug(); } if (retval) this.checkQueueState(reset); } finally { if (trace.val) LOG.trace(String.format("Partition %d :: Releasing lock", this.partitionId)); this.lock.unlock(); } return (retval); }
private static void produceForFile(String topic, String filename) { try { LOGGER.debug("Setting up streams"); PipedInputStream send = new PipedInputStream(BUFFER_LEN); PipedOutputStream input = new PipedOutputStream(send); LOGGER.debug("Setting up connections"); LOGGER.debug("Setting up file reader"); BufferedFileReader reader = new BufferedFileReader(filename, input); LOGGER.debug("Setting up kafka producer"); KafkaObjectArrayProducer kafkaProducer = new KafkaObjectArrayProducer(topic, send); LOGGER.debug("Spinning up threads"); Thread source = new Thread(reader); Thread kafka = new Thread(kafkaProducer); source.start(); kafka.start(); LOGGER.debug("Joining"); kafka.join(); } catch (IOException ex) { LOGGER.fatal("IO Error while piping", ex); LOGGER.trace(null, ex); } catch (InterruptedException ex) { LOGGER.warn("interruped", ex); LOGGER.trace(null, ex); } }
/** * Override to handle interception * * @param invocation * @return * @throws Exception */ public String intercept(ActionInvocation invocation) throws Exception { String _logger_method = "intercept"; if (log.isTraceEnabled()) { log.trace("> " + _logger_method); } String result; try { String nextPage = checkAccess(invocation); if (nextPage == null) { Logger invocationLog = null; if (log.isDebugEnabled()) { invocationLog = Logger.getLogger(invocation.getAction().getClass()); invocationLog.debug("> " + invocation.getProxy().getMethod()); } result = invocation.invoke(); if (log.isDebugEnabled()) { invocationLog.debug("< " + invocation.getProxy().getMethod()); } } else { result = NavConsts.POPUP_TIME_OUT; } } catch (Exception e) { String excID = Long.toString(System.currentTimeMillis()); BaseAction baseAction = (BaseAction) invocation.getAction(); baseAction.addFieldError("errorID", "Error ID: " + excID); publishException(invocation, new ExceptionHolder(e)); return NavConsts.POPUP_GLOBAL_ERROR; } if (log.isTraceEnabled()) { log.trace("< " + _logger_method); } return result; }
@Override public boolean sendPacket(TransferDescription description, byte[] payload) { JID jid = description.getRecipient(); LOG.trace("intercepting outgoing packet to: " + jid); discardOutgoingSessionPackets.putIfAbsent(jid, false); boolean discard = discardOutgoingSessionPackets.get(jid); if (discard) { LOG.trace("discarding outgoing packet: " + description); return false; } blockOutgoingSessionPackets.putIfAbsent(jid, false); boolean blockOutgoingPackets = blockOutgoingSessionPackets.get(jid); if (blockOutgoingPackets || blockAllOutgoingSessionPackets) { blockedOutgoingSessionPackets.putIfAbsent( jid, new ConcurrentLinkedQueue<OutgoingPacketHolder>()); OutgoingPacketHolder holder = new OutgoingPacketHolder(); holder.description = description; holder.payload = payload; LOG.trace("queuing outgoing packet: " + description); blockedOutgoingSessionPackets.get(jid).add(holder); return false; } return true; }
@Override public boolean receivedPacket(BinaryXMPPExtension object) { JID jid = object.getTransferDescription().getSender(); LOG.trace("intercepting incoming packet from: " + jid); discardIncomingSessionPackets.putIfAbsent(jid, false); boolean discard = discardIncomingSessionPackets.get(jid); if (discard) { LOG.trace("discarding incoming packet: " + object); return false; } blockIncomingSessionPackets.putIfAbsent(jid, false); boolean blockIncomingPackets = blockIncomingSessionPackets.get(jid); if (blockIncomingPackets || blockAllIncomingSessionPackets) { blockedIncomingSessionPackets.putIfAbsent( jid, new ConcurrentLinkedQueue<BinaryXMPPExtension>()); LOG.trace("queuing incoming packet: " + object); blockedIncomingSessionPackets.get(jid).add(object); return false; } return true; }
@Audit @Transactional(readOnly = false, propagation = Propagation.REQUIRED) @Override public void reportSpammersContent(User spammer, User reporter, String comment) { if (log.isInfoEnabled()) { log.info( "Reporting SPAM Abuse on all content of of this spammer: " + spammer.getUsername() + ". Reporter is: " + reporter.getUsername()); } final Date reportDate = new Date(); Iterable<Document> docs = documentManager.getUserDocuments(spammer, documentStates); for (Document document : docs) { if (log.isTraceEnabled()) { log.trace("Report spam of document: " + document.getDocumentID()); } reportSpam(document, reporter, comment, reportDate); } Iterable<ForumMessage> messages = forumManager.getUserMessages(spammer); for (ForumMessage message : messages) { if (log.isTraceEnabled()) { log.trace( "Report spam of message: " + message.getID() + ", threadId: " + message.getForumThreadID()); } // TODO: Check how works root messages (threads) reportSpam(message, reporter, comment, reportDate); } List<Blog> blogs = blogManager.getExplicitlyEntitledBlogs(spammer); for (Blog blog : blogs) { if (blog.isUserBlog()) { Iterator<BlogPost> blogPosts = blogManager.getBlogPosts(blog); while (blogPosts.hasNext()) { BlogPost blogPost = blogPosts.next(); if (log.isTraceEnabled()) { log.trace("Report spam for Blog post, id: " + blogPost.getID()); } reportSpam(blogPost, reporter, comment, reportDate); } } } Iterator<Favorite> favorites = favoriteManager.getUserFavorites(spammer, Sets.newHashSet(externalUrlObjectType)); while (favorites.hasNext()) { Favorite favorite = favorites.next(); JiveObject favoritedObject = favorite.getObjectFavorite().getFavoritedObject(); if (log.isTraceEnabled()) { log.trace("Report spam Favorite (Bookmark) to external URL: " + favorite.getID()); log.trace("Favorited object: " + favoritedObject); } reportSpam(favoritedObject, reporter, comment, reportDate); } }
/** * Attempts to load a remote resource (jars, properties files, etc) * * @param url * @throws IOException */ private void loadRemoteResource(URL url) throws IOException { if (logger.isTraceEnabled()) logger.trace("Attempting to load a remote resource."); if (url.toString().toLowerCase().endsWith(".jar")) { loadJar(url); return; } InputStream stream = url.openStream(); ByteArrayOutputStream out = new ByteArrayOutputStream(); int byt; while (((byt = stream.read()) != -1)) { out.write(byt); } byte[] content = out.toByteArray(); if (jarEntryContents.containsKey(url.toString())) { if (!Configuration.supressCollisionException()) throw new JclException("Resource " + url.toString() + " already loaded"); else { if (logger.isTraceEnabled()) logger.trace("Resource " + url.toString() + " already loaded; ignoring entry..."); return; } } if (logger.isTraceEnabled()) logger.trace("Loading remote resource."); jarEntryContents.put(url.toString(), content); out.close(); stream.close(); }
/** Test the Date the feature was designed for (http://en.wikipedia.org/wiki/Year_2038_problem) */ @Test public void testParseCaLatestValidDateTime() { LOG.trace(">testParseCaLatestValidDateTime"); final String bug2038Hex = "80000000"; LOG.info("bug2038Hex: " + bug2038Hex); final String bug2038Iso = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ssZZ", TimeZone.getTimeZone("UTC")) .format(Long.parseLong("80000000", 16) * 1000); LOG.info("bug2038Iso: " + bug2038Iso); final Date bug2038HexDate = ValidityDate.parseCaLatestValidDateTime(bug2038Hex); LOG.info("bug2038HexDate: " + bug2038HexDate); final Date bug2038IsoDate = ValidityDate.parseCaLatestValidDateTime(bug2038Iso); LOG.info("bug2038IsoDate: " + bug2038IsoDate); Assert.assertEquals( "The two date formats should yield the same Date!", bug2038HexDate, bug2038IsoDate); // Test now also final Date now = new Date(); LOG.info("now: " + now); final String nowIso = FastDateFormat.getInstance(ValidityDate.ISO8601_DATE_FORMAT, TimeZone.getTimeZone("UTC")) .format(now); LOG.info("nowIso: " + nowIso); final Date nowIsoDate = ValidityDate.parseCaLatestValidDateTime(nowIso); LOG.info("nowIsoDate: " + nowIsoDate); // Compare as strings since we will loose milliseconds in the conversion to ISO8601 format Assert.assertEquals( "Unable to parse current time correctly!", now.toString(), nowIsoDate.toString()); // Test unhappy path (return of default value) final Date defaultIsoDate = ValidityDate.parseCaLatestValidDateTime("COFFEE"); Assert.assertEquals( "Default value not returned when invalid date-time specified!", new Date(Long.MAX_VALUE).toString(), defaultIsoDate.toString()); LOG.trace("<testParseCaLatestValidDateTime"); }
/** * Reads all available data from the input stream of <code>conn</code> and returns it as byte * array. If no input data is available the method returns <code>null</code>. * * @param conn * @return * @throws IOException */ protected static byte[] loadBodyDataInBuffer(HttpURLConnection conn) throws IOException { InputStream input = conn.getInputStream(); byte[] data = null; try { if (Thread.currentThread() instanceof MapSourceListener) { // We only throttle atlas downloads, not downloads for the preview map long bandwidthLimit = Settings.getInstance().getBandwidthLimit(); if (bandwidthLimit > 0) { input = new ThrottledInputStream(input); } } data = Utilities.getInputBytes(input); } catch (IOException e) { InputStream errorIn = conn.getErrorStream(); try { byte[] errData = Utilities.getInputBytes(errorIn); log.trace( "Retrieved " + errData.length + " error bytes for a HTTP " + conn.getResponseCode()); } catch (Exception ee) { log.debug("Error retrieving error stream content: " + e); } finally { Utilities.closeStream(errorIn); } throw e; } finally { Utilities.closeStream(input); } log.trace("Retrieved " + data.length + " bytes for a HTTP " + conn.getResponseCode()); if (data.length == 0) return null; return data; }
private Boundary ngramMatcher(String[] tokenizedRole, List<String[]> sentence, int n) { logger.debug(Arrays.toString(tokenizedRole) + "\t" + n); if (n > sentence.size()) { return null; } for (int i = 0; i < sentence.size() - n + 1; i++) { boolean b = true; for (int j = 0; j < n; j++) { logger.trace(i + "," + j + "\t" + tokenizedRole[j] + "\t" + sentence.get(i)[2]); // b &= tokenizedRole[j].equalsIgnoreCase(sentence.get(i + j)[2]); // replace with kernel function b &= equals(tokenizedRole[j].toLowerCase(), (sentence.get(i + j)[2].toLowerCase())); logger.trace( i + "," + j + "\t" + tokenizedRole[j] + "\t" + sentence.get(i + j)[2] + "\t" + tokenizedRole[j].equals(sentence.get(i + j)[2])); } if (b) { logger.trace("boundary(" + n + "): " + i + ", " + (i + n - 1)); return new Boundary(i, i + n - 1); } } return null; }
/** * Stores uploaded file "as it is" and adds database entry. * * @return ID of attachment in the database. */ @Override public Attachment uploadFile( String fileName, String contentType, User user, byte[] contents, String tags) { if (log.isTraceEnabled()) { log.trace(">> uploadFile()"); } try { if (contents.length > MAX_ZIP_SIZE) { log.trace("File too large!"); throw new IOException("File too large."); } if (!checkUploadRights(user)) { return null; } Attachment a = prepareAttachment(fileName, contentType, user, contents, tags); em.persist(a); Set<User> uset = new HashSet(); uset.add(user); a.setUser(uset); em.merge(a); if (log.isTraceEnabled()) { log.trace("<< uploadFile(): " + a); } return a; } catch (Exception ex) { log.error("uploadFile(): Failed to upload file.", ex); return null; } }
@Override public String createCsvData(String rrdFilename, long startTime, long endTime) throws IOException, MetricsGraphException { LOGGER.trace("ENTERING: createCsvData"); MetricData metricData = getMetricData(rrdFilename, startTime, endTime); StringBuffer csv = new StringBuffer(""); csv.append("Timestamp,Value\n"); List<Long> timestamps = metricData.getTimestamps(); List<Double> values = metricData.getValues(); for (int i = 0; i < timestamps.size(); i++) { String timestamp = getCalendarTime(timestamps.get(i)); csv.append(timestamp + "," + new Double(values.get(i)).longValue() + "\n"); } LOGGER.debug("csv = " + csv.toString()); LOGGER.trace("EXITING: createCsvData"); return csv.toString(); }
public PutMetricDataResponseType putMetricData(PutMetricDataType request) throws CloudWatchException { PutMetricDataResponseType reply = request.getReply(); final Context ctx = Contexts.lookup(); try { LOG.trace("put metric data called"); // IAM Action Check checkActionPermission(PolicySpec.CLOUDWATCH_PUTMETRICDATA, ctx); final OwnerFullName ownerFullName = ctx.getUserFullName(); final List<MetricDatum> metricData = validateMetricData(request.getMetricData()); final String namespace = validateNamespace(request.getNamespace(), true); final Boolean isUserAccountAdmin = Principals.isSameUser( Principals.systemUser(), Wrappers.unwrap(Context.class, Contexts.lookup()).getUser()); LOG.trace("Namespace=" + namespace); LOG.trace("metricData=" + metricData); MetricType metricType = getMetricTypeFromNamespace(namespace); if (metricType == MetricType.System && !isUserAccountAdmin) { throw new InvalidParameterValueException( "The value AWS/ for parameter Namespace is invalid."); } MetricDataQueue.getInstance() .insertMetricData(ownerFullName.getAccountNumber(), namespace, metricData, metricType); } catch (Exception ex) { handleException(ex); } return reply; }
@Override public OutputStream createPptReport( List<String> metricNames, String metricsDir, long startTime, long endTime) throws IOException, MetricsGraphException { LOGGER.trace("ENTERING: createPptReport"); SlideShow ppt = new SlideShow(); Collections.sort(metricNames); for (String metricName : metricNames) { String rrdFilename = metricsDir + metricName + ".rrd"; byte[] graph = createGraph(metricName, rrdFilename, startTime, endTime); MetricData metricData = getMetricData(rrdFilename, startTime, endTime); createSlide(ppt, metricName, graph, metricData); } ByteArrayOutputStream bos = new ByteArrayOutputStream(); ppt.write(bos); bos.close(); LOGGER.trace("EXITING: createPptReport"); return bos; }
/** @generated */ public void connectPort(final org.omg.CORBA.Object connection, final String connectionId) throws CF.PortPackage.InvalidPort, CF.PortPackage.OccupiedPort { if (logger != null) { logger.trace("bulkio.OutPort connectPort ENTER (port=" + name + ")"); } synchronized (this.updatingPortsLock) { final dataOctetOperations port; try { port = BULKIO.jni.dataOctetHelper.narrow(connection); } catch (final Exception ex) { if (logger != null) { logger.error("bulkio.OutPort CONNECT PORT: " + name + " PORT NARROW FAILED"); } throw new CF.PortPackage.InvalidPort( (short) 1, "Invalid port for connection '" + connectionId + "'"); } this.outConnections.put(connectionId, port); this.active = true; this.stats.put(connectionId, new linkStatistics(this.name, new UInt8Size())); if (logger != null) { logger.debug("bulkio.OutPort CONNECT PORT: " + name + " CONNECTION '" + connectionId + "'"); } } if (logger != null) { logger.trace("bulkio.OutPort connectPort EXIT (port=" + name + ")"); } if (callback != null) { callback.connect(connectionId); } }
@Test public void callerData() { assertEquals(0, listAppender.list.size()); PatternLayout pl = new PatternLayout(); pl.setPattern("%-5level [%class] %logger - %msg"); pl.setContext(lc); pl.start(); listAppender.layout = pl; Logger logger = Logger.getLogger("basic-test"); logger.trace("none"); assertEquals(0, listAppender.list.size()); rootLogger.setLevel(Level.TRACE); logger.trace(HELLO); assertEquals(1, listAppender.list.size()); ILoggingEvent event = (ILoggingEvent) listAppender.list.get(0); assertEquals(HELLO, event.getMessage()); assertEquals(1, listAppender.stringList.size()); assertEquals( "TRACE [" + Log4jInvocation.class.getName() + "] basic-test - Hello", listAppender.stringList.get(0)); }
@Override public void run() { try { selector = Selector.open(); while (true) { processSelectionQueue(); int nKeys = selector.select(selectWaitTime); // blocking if (nKeys == 0) { continue; } else { logger.trace(String.format("Selector %d, keys num: %d", selectorNum, nKeys)); } Set<SelectionKey> keys = selector.selectedKeys(); Iterator<SelectionKey> iter = keys.iterator(); while (iter.hasNext()) { SelectionKey key = iter.next(); iter.remove(); logger.trace("Key operations: " + key.readyOps()); if (key.isWritable()) { doWrite(key); } } } } catch (IOException e) { throw new RuntimeException(e); } }
private CompleteDNS getCompleteDNS(@Nonnull String providerDnsZoneId, boolean withSubdomains) throws CloudException, InternalException { Logger std = NovaOpenStack.getLogger(RackspaceCloudDNS.class, "std"); if (std.isTraceEnabled()) { std.trace("ENTER: " + RackspaceCloudDNS.class.getName() + ".getCompleteDNS()"); } try { ProviderContext ctx = provider.getContext(); if (ctx == null) { std.error("No context exists for this request"); throw new InternalException("No context exists for this request"); } String query = providerDnsZoneId + "?showRecords=true"; if (withSubdomains) { query = query + "&showSubdomains=true"; } NovaMethod method = new NovaMethod(provider); JSONObject response = method.getResource(SERVICE, RESOURCE, query, false); if (response == null) { return null; } try { DNSZone zone = toZone(ctx, response); if (zone != null) { CompleteDNS dns = new CompleteDNS(); dns.domain = zone; dns.subdomains = new ArrayList<DNSZone>(); JSONObject subdomains = (response.has("subdomains") ? response.getJSONObject("subdomains") : null); if (subdomains != null) { JSONArray domains = (subdomains.has("domains") ? subdomains.getJSONArray("domains") : null); if (domains != null) { listSubdomains(ctx, dns.subdomains, zone, domains); } } return dns; } } catch (JSONException e) { std.error("getCompleteDNS(): JSON error parsing response: " + e.getMessage()); e.printStackTrace(); throw new CloudException( CloudErrorType.COMMUNICATION, 200, "invalidResponse", "JSON error parsing " + response); } return null; } finally { if (std.isTraceEnabled()) { std.trace("exit - " + RackspaceCloudDNS.class.getName() + ".getCompleteDNS()"); } } }
/** * Only return transaction state objects that are ready to run. This is non-blocking. If the txn * is not ready, then this will return null. <B>Note:</B> This should only be allowed to be called * by one thread. */ @Override public AbstractTransaction poll() { AbstractTransaction retval = null; if (trace.val) LOG.trace(String.format("Partition %d :: Attempting to acquire lock", this.partitionId)); if (this.state == QueueState.UNBLOCKED) { this.lock.lock(); try { // if (this.state != QueueState.UNBLOCKED) { // this.checkQueueState(false); // } if (this.state == QueueState.UNBLOCKED) { // 2012-12-21 // So this is allow to be null because there is a race condition // if another thread removes the txn from the queue. retval = super.poll(); if (retval != null) { if (debug.val) LOG.debug(String.format("Partition %d :: poll() -> %s", this.partitionId, retval)); this.lastTxnPopped = retval.getTransactionId(); this.txnsPopped++; } // call this again to prime the next txn this.checkQueueState(true); } } finally { if (trace.val) LOG.trace(String.format("Partition %d :: Releasing lock", this.partitionId)); this.lock.unlock(); } } return (retval); }
/** * This method is used by non-blocking code to determine if the give buffer represents a complete * request. Because the non-blocking code can by definition not just block waiting for more data, * it's possible to get partial reads, and this identifies that case. * * @param buffer Buffer to check; the buffer is reset to position 0 before calling this method and * the caller must reset it after the call returns * @return True if the buffer holds a complete request, false otherwise */ public boolean isCompleteRequest(ByteBuffer buffer) { DataInputStream inputStream = new DataInputStream(new ByteBufferBackedInputStream(buffer)); try { int dataSize = inputStream.readInt(); if (logger.isTraceEnabled()) logger.trace( "In isCompleteRequest, dataSize: " + dataSize + ", buffer position: " + buffer.position()); if (dataSize == -1) return true; // Here we skip over the data (without reading it in) and // move our position to just past it. buffer.position(buffer.position() + dataSize); return true; } catch (Exception e) { // This could also occur if the various methods we call into // re-throw a corrupted value error as some other type of exception. // For example, updating the position on a buffer past its limit // throws an InvalidArgumentException. if (logger.isTraceEnabled()) logger.trace("In isCompleteRequest, probable partial read occurred: " + e); return false; } }
public BufferedImage getTileImage(int x, int y) throws IOException { SRCachedTile cachedTile = cache.get(new CacheKey(x, y)); BufferedImage image = null; if (cachedTile != null) { CachedTile tile = cachedTile.get(); if (tile != null) { if (tile.loaded) log.trace(String.format("Cache hit: x=%d y=%d", x, y)); image = tile.getImage(); if (!tile.nextLoadJobCreated) { // log.debug(String.format("Preload job added : x=%d y=%d l=%d", // x + 1, y, layer)); preloadTile(new CachedTile(new CacheKey(x + 1, y))); tile.nextLoadJobCreated = true; } } } if (image == null) { log.trace(String.format("Cache miss: x=%d y=%d", x, y)); // log.debug(String.format("Preload job added : x=%d y=%d l=%d", x + // 1, y, layer)); preloadTile(new CachedTile(new CacheKey(x + 1, y))); image = internalGetTileImage(x, y); } return image; }
@Override @javax.ejb.TransactionAttribute(javax.ejb.TransactionAttributeType.SUPPORTS) public Attachment getUploadedFile(Long userId, long id) { if (log.isTraceEnabled()) { log.trace(">> getUploadedFile(): id=" + id); } try { System.out.println("getUploadedFile: userId = " + userId + " ; id = " + id); Attachment att = em.find(Attachment.class, id); // if (!checkDownloadRights(userId, att.getId())) { // return null; // } if (log.isTraceEnabled()) { log.trace("<< getUploadedFile(): " + att); } System.out.println("att = " + att); return att; } catch (Exception ex) { if (log.isTraceEnabled()) { log.trace("<< getUploadedFile()"); } } return null; }
@Override public boolean postStateTransitionEvent( State oldState, Event event, State newState, VirtualMachine vm, boolean status, Long oldHostId) { if (!status) { return false; } if (VirtualMachine.State.isVmStarted(oldState, event, newState)) { if (s_logger.isTraceEnabled()) { s_logger.trace("Security Group Mgr: handling start of vm id" + vm.getId()); } handleVmStarted((VMInstanceVO) vm); } else if (VirtualMachine.State.isVmStopped(oldState, event, newState)) { if (s_logger.isTraceEnabled()) { s_logger.trace("Security Group Mgr: handling stop of vm id" + vm.getId()); } handleVmStopped((VMInstanceVO) vm); } else if (VirtualMachine.State.isVmMigrated(oldState, event, newState)) { if (s_logger.isTraceEnabled()) { s_logger.trace("Security Group Mgr: handling migration of vm id" + vm.getId()); } handleVmMigrated((VMInstanceVO) vm); } return true; }
/** * Adds a single file into the ZipOutputStream with specified entry name. * * @throws IOException */ private void addFileToZip(ZipOutputStream zipOut, UploadedFile file, String name) throws IOException { if (log.isTraceEnabled()) { log.trace(">> addFileToZip(): " + file); } ZipEntry entry = new ZipEntry(name); zipOut.putNextEntry(entry); InputStream in = null; try { in = file.getInputstream(); FileUtils.copyStream(file.getInputstream(), zipOut); zipOut.closeEntry(); } finally { FileUtils.close(in); } // try (InputStream in = file.getInputstream()) { // FileUtils.copyStream(file.getInputstream(), zipOut); // zipOut.closeEntry(); // } if (log.isTraceEnabled()) { log.trace("<< addFileToZip()"); } }
@Override protected void runInContext() { HttpContext context = new BasicHttpContext(null); try { while (!Thread.interrupted() && _conn.isOpen()) { _httpService.handleRequest(_conn, context); _conn.close(); } } catch (ConnectionClosedException ex) { if (s_logger.isTraceEnabled()) { s_logger.trace("ApiServer: Client closed connection"); } } catch (IOException ex) { if (s_logger.isTraceEnabled()) { s_logger.trace("ApiServer: IOException - " + ex); } } catch (HttpException ex) { s_logger.warn("ApiServer: Unrecoverable HTTP protocol violation" + ex); } finally { try { _conn.shutdown(); } catch (IOException ignore) { } } }
@Override public void replaceContent(InputStream in, Long contentLength, GetableResource wrapped) { Template template = (Template) wrapped; if (log.isTraceEnabled()) { log.trace("replaceContent: parent template: " + template.getTemplateName()); } if ("root".equals(template.getTemplateName())) { log.trace("has a root template, so use entire content as the body parameter"); // parse the doc, and use the entire root element (inclusive) as content // of the body param XmlUtils2 xmlUtils2 = new XmlUtils2(); Document doc; try { doc = xmlUtils2.getJDomDocument(in); } catch (JDOMException ex) { throw new RuntimeException(ex); } String content = JDomUtils.getXml(doc.getRootElement()); CodeUtils.saveValue(template, "body", content); template.save(); } else { log.trace("parent is not root, so parse content for body and title"); pageContentTypeHandler.replaceContent(in, contentLength, wrapped); } }