@Override public boolean write(Object object) throws DataException { if (closed) { throw new IllegalStateException("Writer closed."); } while (writers.size() > 0) { DataWriter currentWriter = writers.get(0); boolean keep = currentWriter.write(object); if (keep) { if (logger.isTraceEnabled()) { logger.trace("Current writer [" + currentWriter + "] requires more data."); } return true; } else { if (logger.isTraceEnabled()) { logger.trace("Current writer [" + currentWriter + "] complete."); } currentWriter.close(); writers.remove(0); } } return false; }
public static CallContext unregister() { CallContext context = s_currentContext.get(); if (context == null) { return null; } s_currentContext.remove(); if (s_logger.isTraceEnabled()) { s_logger.trace("Unregistered: " + context); } String contextId = context.getContextId(); String sessionIdOnStack = null; String sessionIdPushedToNDC = "ctx-" + UuidUtils.first(contextId); while ((sessionIdOnStack = NDC.pop()) != null) { if (sessionIdPushedToNDC.equals(sessionIdOnStack)) { break; } if (s_logger.isTraceEnabled()) { s_logger.trace("Popping from NDC: " + contextId); } } Stack<CallContext> stack = s_currentContextStack.get(); stack.pop(); if (!stack.isEmpty()) { s_currentContext.set(stack.peek()); } return context; }
public synchronized void addSession(long id, int sessionTimeout) { sessionsWithTimeout.put(id, sessionTimeout); if (sessionsById.get(id) == null) { SessionImpl s = new SessionImpl(id, sessionTimeout, 0); sessionsById.put(id, s); if (LOG.isTraceEnabled()) { ZooTrace.logTraceMessage( LOG, ZooTrace.SESSION_TRACE_MASK, "SessionTrackerImpl --- Adding session 0x" + Long.toHexString(id) + " " + sessionTimeout); } } else { if (LOG.isTraceEnabled()) { ZooTrace.logTraceMessage( LOG, ZooTrace.SESSION_TRACE_MASK, "SessionTrackerImpl --- Existing session 0x" + Long.toHexString(id) + " " + sessionTimeout); } } touchSession(id, sessionTimeout); }
public JAudioTagReader(String filename) { try { MP3File.logger.setLevel(Level.OFF); this.filename = filename; if (log.isTraceEnabled()) log.trace("JAudioTagReader(): filename=" + filename); MP3File mp3file; try { // mp3file = new MP3File(filename); mp3file = new MP3File(new File(filename), MP3File.LOAD_ALL, true); } catch (ReadOnlyFileException roe) { mp3file = new MP3File(new File(filename), MP3File.LOAD_ALL, true); } if (log.isTraceEnabled()) log.trace("JAudioTagReader(): mp3file=" + mp3file.displayStructureAsPlainText()); id3v1 = mp3file.getID3v1Tag(); id3v2 = mp3file.getID3v2TagAsv24(); if (id3v2 != null) { String identifier = id3v2.getIdentifier(); if (log.isTraceEnabled()) log.trace("JAudioTagReader(): id3v2 identifier=" + identifier); } if (id3v1 instanceof ID3v11Tag) { id3v11 = (ID3v11Tag) id3v1; } // if (log.isDebugEnabled()) // log.debug("JAudioTagReader(): GEOB frames=" + getFrames(FRAME_GENERAL)); } catch (java.lang.OutOfMemoryError e) { log.error("JAudioTagReader(): error out of memory, filename=" + filename, e); } catch (Exception e) { log.error("JAudioTagReader(): error Exception, filename=" + filename, e); } }
public SecondaryStorageVmVO assignSecStorageVmFromRunningPool( long dataCenterId, SecondaryStorageVm.Role role) { if (s_logger.isTraceEnabled()) { s_logger.trace( "Assign secondary storage vm from running pool for request from data center : " + dataCenterId); } SecondaryStorageVmAllocator allocator = getCurrentAllocator(); assert (allocator != null); List<SecondaryStorageVmVO> runningList = _secStorageVmDao.getSecStorageVmListInStates(role, dataCenterId, State.Running); if (runningList != null && runningList.size() > 0) { if (s_logger.isTraceEnabled()) { s_logger.trace("Running secondary storage vm pool size : " + runningList.size()); for (SecondaryStorageVmVO secStorageVm : runningList) { s_logger.trace("Running secStorageVm instance : " + secStorageVm.getHostName()); } } Map<Long, Integer> loadInfo = new HashMap<Long, Integer>(); return allocator.allocSecondaryStorageVm(runningList, loadInfo, dataCenterId); } else { if (s_logger.isTraceEnabled()) { s_logger.trace("Empty running secStorageVm pool for now in data center : " + dataCenterId); } } return null; }
/** * Adds a single file into the ZipOutputStream with specified entry name. * * @throws IOException */ private void addFileToZip(ZipOutputStream zipOut, UploadedFile file, String name) throws IOException { if (log.isTraceEnabled()) { log.trace(">> addFileToZip(): " + file); } ZipEntry entry = new ZipEntry(name); zipOut.putNextEntry(entry); InputStream in = null; try { in = file.getInputstream(); FileUtils.copyStream(file.getInputstream(), zipOut); zipOut.closeEntry(); } finally { FileUtils.close(in); } // try (InputStream in = file.getInputstream()) { // FileUtils.copyStream(file.getInputstream(), zipOut); // zipOut.closeEntry(); // } if (log.isTraceEnabled()) { log.trace("<< addFileToZip()"); } }
/** * Stores uploaded file "as it is" and adds database entry. * * @return ID of attachment in the database. */ @Override public Attachment uploadFile( String fileName, String contentType, User user, byte[] contents, String tags) { if (log.isTraceEnabled()) { log.trace(">> uploadFile()"); } try { if (contents.length > MAX_ZIP_SIZE) { log.trace("File too large!"); throw new IOException("File too large."); } if (!checkUploadRights(user)) { return null; } Attachment a = prepareAttachment(fileName, contentType, user, contents, tags); em.persist(a); Set<User> uset = new HashSet(); uset.add(user); a.setUser(uset); em.merge(a); if (log.isTraceEnabled()) { log.trace("<< uploadFile(): " + a); } return a; } catch (Exception ex) { log.error("uploadFile(): Failed to upload file.", ex); return null; } }
/** * This method is used by non-blocking code to determine if the give buffer represents a complete * request. Because the non-blocking code can by definition not just block waiting for more data, * it's possible to get partial reads, and this identifies that case. * * @param buffer Buffer to check; the buffer is reset to position 0 before calling this method and * the caller must reset it after the call returns * @return True if the buffer holds a complete request, false otherwise */ public boolean isCompleteRequest(ByteBuffer buffer) { DataInputStream inputStream = new DataInputStream(new ByteBufferBackedInputStream(buffer)); try { int dataSize = inputStream.readInt(); if (logger.isTraceEnabled()) logger.trace( "In isCompleteRequest, dataSize: " + dataSize + ", buffer position: " + buffer.position()); if (dataSize == -1) return true; // Here we skip over the data (without reading it in) and // move our position to just past it. buffer.position(buffer.position() + dataSize); return true; } catch (Exception e) { // This could also occur if the various methods we call into // re-throw a corrupted value error as some other type of exception. // For example, updating the position on a buffer past its limit // throws an InvalidArgumentException. if (logger.isTraceEnabled()) logger.trace("In isCompleteRequest, probable partial read occurred: " + e); return false; } }
@Override public boolean postStateTransitionEvent( State oldState, Event event, State newState, VirtualMachine vm, boolean status, Long oldHostId) { if (!status) { return false; } if (VirtualMachine.State.isVmStarted(oldState, event, newState)) { if (s_logger.isTraceEnabled()) { s_logger.trace("Security Group Mgr: handling start of vm id" + vm.getId()); } handleVmStarted((VMInstanceVO) vm); } else if (VirtualMachine.State.isVmStopped(oldState, event, newState)) { if (s_logger.isTraceEnabled()) { s_logger.trace("Security Group Mgr: handling stop of vm id" + vm.getId()); } handleVmStopped((VMInstanceVO) vm); } else if (VirtualMachine.State.isVmMigrated(oldState, event, newState)) { if (s_logger.isTraceEnabled()) { s_logger.trace("Security Group Mgr: handling migration of vm id" + vm.getId()); } handleVmMigrated((VMInstanceVO) vm); } return true; }
/** * Attempts to load a remote resource (jars, properties files, etc) * * @param url * @throws IOException */ private void loadRemoteResource(URL url) throws IOException { if (logger.isTraceEnabled()) logger.trace("Attempting to load a remote resource."); if (url.toString().toLowerCase().endsWith(".jar")) { loadJar(url); return; } InputStream stream = url.openStream(); ByteArrayOutputStream out = new ByteArrayOutputStream(); int byt; while (((byt = stream.read()) != -1)) { out.write(byt); } byte[] content = out.toByteArray(); if (jarEntryContents.containsKey(url.toString())) { if (!Configuration.supressCollisionException()) throw new JclException("Resource " + url.toString() + " already loaded"); else { if (logger.isTraceEnabled()) logger.trace("Resource " + url.toString() + " already loaded; ignoring entry..."); return; } } if (logger.isTraceEnabled()) logger.trace("Loading remote resource."); jarEntryContents.put(url.toString(), content); out.close(); stream.close(); }
private CompleteDNS getCompleteDNS(@Nonnull String providerDnsZoneId, boolean withSubdomains) throws CloudException, InternalException { Logger std = NovaOpenStack.getLogger(RackspaceCloudDNS.class, "std"); if (std.isTraceEnabled()) { std.trace("ENTER: " + RackspaceCloudDNS.class.getName() + ".getCompleteDNS()"); } try { ProviderContext ctx = provider.getContext(); if (ctx == null) { std.error("No context exists for this request"); throw new InternalException("No context exists for this request"); } String query = providerDnsZoneId + "?showRecords=true"; if (withSubdomains) { query = query + "&showSubdomains=true"; } NovaMethod method = new NovaMethod(provider); JSONObject response = method.getResource(SERVICE, RESOURCE, query, false); if (response == null) { return null; } try { DNSZone zone = toZone(ctx, response); if (zone != null) { CompleteDNS dns = new CompleteDNS(); dns.domain = zone; dns.subdomains = new ArrayList<DNSZone>(); JSONObject subdomains = (response.has("subdomains") ? response.getJSONObject("subdomains") : null); if (subdomains != null) { JSONArray domains = (subdomains.has("domains") ? subdomains.getJSONArray("domains") : null); if (domains != null) { listSubdomains(ctx, dns.subdomains, zone, domains); } } return dns; } } catch (JSONException e) { std.error("getCompleteDNS(): JSON error parsing response: " + e.getMessage()); e.printStackTrace(); throw new CloudException( CloudErrorType.COMMUNICATION, 200, "invalidResponse", "JSON error parsing " + response); } return null; } finally { if (std.isTraceEnabled()) { std.trace("exit - " + RackspaceCloudDNS.class.getName() + ".getCompleteDNS()"); } } }
@Audit @Transactional(readOnly = false, propagation = Propagation.REQUIRED) @Override public void reportSpammersContent(User spammer, User reporter, String comment) { if (log.isInfoEnabled()) { log.info( "Reporting SPAM Abuse on all content of of this spammer: " + spammer.getUsername() + ". Reporter is: " + reporter.getUsername()); } final Date reportDate = new Date(); Iterable<Document> docs = documentManager.getUserDocuments(spammer, documentStates); for (Document document : docs) { if (log.isTraceEnabled()) { log.trace("Report spam of document: " + document.getDocumentID()); } reportSpam(document, reporter, comment, reportDate); } Iterable<ForumMessage> messages = forumManager.getUserMessages(spammer); for (ForumMessage message : messages) { if (log.isTraceEnabled()) { log.trace( "Report spam of message: " + message.getID() + ", threadId: " + message.getForumThreadID()); } // TODO: Check how works root messages (threads) reportSpam(message, reporter, comment, reportDate); } List<Blog> blogs = blogManager.getExplicitlyEntitledBlogs(spammer); for (Blog blog : blogs) { if (blog.isUserBlog()) { Iterator<BlogPost> blogPosts = blogManager.getBlogPosts(blog); while (blogPosts.hasNext()) { BlogPost blogPost = blogPosts.next(); if (log.isTraceEnabled()) { log.trace("Report spam for Blog post, id: " + blogPost.getID()); } reportSpam(blogPost, reporter, comment, reportDate); } } } Iterator<Favorite> favorites = favoriteManager.getUserFavorites(spammer, Sets.newHashSet(externalUrlObjectType)); while (favorites.hasNext()) { Favorite favorite = favorites.next(); JiveObject favoritedObject = favorite.getObjectFavorite().getFavoritedObject(); if (log.isTraceEnabled()) { log.trace("Report spam Favorite (Bookmark) to external URL: " + favorite.getID()); log.trace("Favorited object: " + favoritedObject); } reportSpam(favoritedObject, reporter, comment, reportDate); } }
/** * Override to handle interception * * @param invocation * @return * @throws Exception */ public String intercept(ActionInvocation invocation) throws Exception { String _logger_method = "intercept"; if (log.isTraceEnabled()) { log.trace("> " + _logger_method); } String result; try { String nextPage = checkAccess(invocation); if (nextPage == null) { Logger invocationLog = null; if (log.isDebugEnabled()) { invocationLog = Logger.getLogger(invocation.getAction().getClass()); invocationLog.debug("> " + invocation.getProxy().getMethod()); } result = invocation.invoke(); if (log.isDebugEnabled()) { invocationLog.debug("< " + invocation.getProxy().getMethod()); } } else { result = NavConsts.POPUP_TIME_OUT; } } catch (Exception e) { String excID = Long.toString(System.currentTimeMillis()); BaseAction baseAction = (BaseAction) invocation.getAction(); baseAction.addFieldError("errorID", "Error ID: " + excID); publishException(invocation, new ExceptionHolder(e)); return NavConsts.POPUP_GLOBAL_ERROR; } if (log.isTraceEnabled()) { log.trace("< " + _logger_method); } return result; }
@Test @Ignore public void testSlopPushers() throws Exception { Set<Integer> failedNodes = getFailedNodes(); Multimap<Integer, ByteArray> failedKeys = populateStore(failedNodes); reviveNodes(failedNodes); for (int i = 0; i < 5; i++) { for (StreamingSlopPusherJob job : slopPusherJobs) { if (logger.isTraceEnabled()) logger.trace("Started slop pusher job " + job); job.run(); if (logger.isTraceEnabled()) logger.trace("Finished slop pusher job " + job); } } for (Map.Entry<Integer, ByteArray> entry : failedKeys.entries()) { List<Versioned<byte[]>> values = store.get(entry.getValue(), null); assertTrue( "slop entry should be pushed for " + entry.getValue() + ", preflist " + keysToNodes.get(entry.getValue()), values.size() > 0); assertEquals( "slop entry should be correct for " + entry.getValue(), keyValues.get(entry.getValue()), new ByteArray(values.get(0).getValue())); } }
@Override protected void runInContext() { HttpContext context = new BasicHttpContext(null); try { while (!Thread.interrupted() && _conn.isOpen()) { _httpService.handleRequest(_conn, context); _conn.close(); } } catch (ConnectionClosedException ex) { if (s_logger.isTraceEnabled()) { s_logger.trace("ApiServer: Client closed connection"); } } catch (IOException ex) { if (s_logger.isTraceEnabled()) { s_logger.trace("ApiServer: IOException - " + ex); } } catch (HttpException ex) { s_logger.warn("ApiServer: Unrecoverable HTTP protocol violation" + ex); } finally { try { _conn.shutdown(); } catch (IOException ignore) { } } }
@Override @javax.ejb.TransactionAttribute(javax.ejb.TransactionAttributeType.SUPPORTS) public Attachment getUploadedFile(Long userId, long id) { if (log.isTraceEnabled()) { log.trace(">> getUploadedFile(): id=" + id); } try { System.out.println("getUploadedFile: userId = " + userId + " ; id = " + id); Attachment att = em.find(Attachment.class, id); // if (!checkDownloadRights(userId, att.getId())) { // return null; // } if (log.isTraceEnabled()) { log.trace("<< getUploadedFile(): " + att); } System.out.println("att = " + att); return att; } catch (Exception ex) { if (log.isTraceEnabled()) { log.trace("<< getUploadedFile()"); } } return null; }
/** * Note that this synchronization block only matters for the loader * * @param min_item_count * @param clientId - Will use null if less than zero * @param exclude * @return */ private synchronized UserId getRandomUserId(int min_item_count, int clientId, UserId... exclude) { // We use the UserIdGenerator to ensure that we always select the next UserId for // a given client from the same set of UserIds if (this.randomItemCount == null) { this.randomItemCount = new FlatHistogram<Long>(this.rng, this.users_per_itemCount); } if (this.userIdGenerator == null) this.initializeUserIdGenerator(clientId); UserId user_id = null; int tries = 1000; final long num_users = this.userIdGenerator.getTotalUsers() - 1; while (user_id == null && tries-- > 0) { // We first need to figure out how many items our seller needs to have long itemCount = -1; // assert(min_item_count < this.users_per_item_count.getMaxValue()); while (itemCount < min_item_count) { itemCount = this.randomItemCount.nextValue(); } // WHILE // Set the current item count and then choose a random position // between where the generator is currently at and where it ends this.userIdGenerator.setCurrentItemCount((int) itemCount); long cur_position = this.userIdGenerator.getCurrentPosition(); long new_position = rng.number(cur_position, num_users); user_id = this.userIdGenerator.seekToPosition((int) new_position); if (user_id == null) continue; // Make sure that we didn't select the same UserId as the one we were // told to exclude. if (exclude != null && exclude.length > 0) { for (UserId ex : exclude) { if (ex != null && ex.equals(user_id)) { if (LOG.isTraceEnabled()) LOG.trace("Excluding " + user_id); user_id = null; break; } } // FOR if (user_id == null) continue; } // If we don't care about skew, then we're done right here if (LOG.isTraceEnabled()) LOG.trace("Selected " + user_id); break; } // WHILE if (user_id == null && LOG.isDebugEnabled()) { LOG.warn( String.format( "Failed to select a random UserId " + "[minItemCount=%d, clientId=%d, exclude=%s, totalPossible=%d, currentPosition=%d]", min_item_count, clientId, Arrays.toString(exclude), this.userIdGenerator.getTotalUsers(), this.userIdGenerator.getCurrentPosition())); } return (user_id); }
/** * Handles HTTP GET * * @param request servlet request * @param response servlet response * @throws IOException input/output error * @throws ServletException on error */ public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { if (log.isTraceEnabled()) { log.trace(">doGet()"); } check(request, response); if (log.isTraceEnabled()) { log.trace("<doGet()"); } }
/** * Reads registry entries assigned to this server. If <code>lastUpdate</code> is different than 0, * just the changed entries with last_update bigger than the given one and the given status are * returned. * * @param lastUpdate the lastUpdate. 0 if all entries must be returned * @param status the wanted status * @throws com.funambol.pushlistener.service.registry.dao.DataAccessException if an error occurs * @return the read entries */ public List<RegistryEntry> getEntries(long lastUpdate, String status) throws DataAccessException { Connection con = null; PreparedStatement ps = null; ResultSet rs = null; List<RegistryEntry> entries = new ArrayList<RegistryEntry>(); try { con = getConnection(); con.setReadOnly(true); if (lastUpdate != 0) { // // Read only changed registry entries // if (log.isTraceEnabled()) { log.trace("Executing '" + queryDesc.getReadChangedEntriesQuery() + "'"); } ps = con.prepareStatement(queryDesc.getReadChangedEntriesQuery()); ps.setLong(1, lastUpdate); ps.setString(2, status); } else { // // Read all registry entries // if (log.isTraceEnabled()) { log.trace("Executing '" + queryDesc.getReadActiveEntriesQuery() + "'"); } ps = con.prepareStatement(queryDesc.getReadActiveEntriesQuery()); ps.setString(1, "Y"); } rs = ps.executeQuery(); while (rs.next()) { entries.add(resultSetToRegistryEntry(rs)); } } catch (Exception e) { throw new DataAccessException(e); } finally { DBTools.close(con, ps, rs); } return entries; }
/** * Stores uploaded file in database. If there are several files, they are compressed into zip * archive (with filename = user_login.zip). * * @param user Owner of the attachment * @param files List of uploaded files to be saved into database * @return ID of attachment in the database or null if operation failed. */ @Override public Attachment uploadFiles(User user, List<UploadedFile> files, String tags) { System.out.println("att man: uploadfiles"); if (log.isTraceEnabled()) { log.trace(">> uploadFiles(): " + files); } if (!checkUploadRights(user)) { return null; } System.out.println("after checking"); if (files.isEmpty()) { if (log.isDebugEnabled()) { log.debug("List of files is empty! Nothing to compress."); } return null; } System.out.println("files are not empty"); try { System.out.println("tryind to prepare attachment"); Attachment att = prepareAttachment(user, files, tags); if (att.getSize() > MAX_ZIP_SIZE) { if (log.isTraceEnabled()) { log.trace("File too large!"); } throw new IOException("File too large."); } em.persist(att); Set<User> uset = new HashSet(); uset.add(user); att.setUser(uset); em.merge(att); if (log.isTraceEnabled()) { log.trace("<< uploadFiles(): " + att); } return att; } catch (Exception ex) { log.error("uploadFiles(): Failed to upload files. ", ex); return null; } }
@Test @Ignore public void testDeleteHandoff() throws Exception { populateStore(Sets.<Integer>newHashSet()); Map<ByteArray, Version> versions = Maps.newHashMap(); for (ByteArray key : keyValues.keySet()) versions.put(key, store.get(key, null).get(0).getVersion()); Set<Integer> failedNodes = getFailedNodes(); Multimap<Integer, ByteArray> failedKeys = ArrayListMultimap.create(); for (ByteArray key : keysToNodes.keySet()) { Iterable<Integer> nodes = keysToNodes.get(key); for (int i = 0; i < REPLICATION_FACTOR; i++) { int node = Iterables.get(nodes, i); if (failedNodes.contains(node)) { failedKeys.put(node, key); break; } } } for (Map.Entry<Integer, ByteArray> failedKey : failedKeys.entries()) { try { store.delete(failedKey.getValue(), versions.get(failedKey.getValue())); } catch (Exception e) { if (logger.isTraceEnabled()) logger.trace(e, e); } } Set<ByteArray> slopKeys = makeSlopKeys(failedKeys, Slop.Operation.DELETE); Set<ByteArray> keysInSlops = Sets.newHashSet(); for (Store<ByteArray, Slop, byte[]> slopStore : slopStores.values()) { Map<ByteArray, List<Versioned<Slop>>> res = slopStore.getAll(slopKeys, null); for (Map.Entry<ByteArray, List<Versioned<Slop>>> entry : res.entrySet()) { Slop slop = entry.getValue().get(0).getValue(); keysInSlops.add(slop.getKey()); if (logger.isTraceEnabled()) logger.trace(slop); } } for (Map.Entry<Integer, ByteArray> failedKey : failedKeys.entries()) assertTrue( "delete operation for " + failedKey.getValue() + " should be handed off", keysInSlops.contains(failedKey.getValue())); }
private MyDouble getValue(final Id key, final double time, final Map<Id, WeightsPerTimeBin> map) { synchronized (map) { if (log.isTraceEnabled()) log.trace("getting value at " + key); WeightsPerTimeBin val = map.get(key); if (val == null) { if (log.isTraceEnabled()) log.trace("creating value at " + key); val = new WeightsPerTimeBin(); map.put(key, val); } return val.getValue(time); } }
@javax.ejb.TransactionAttribute(javax.ejb.TransactionAttributeType.SUPPORTS) private boolean isOwner(User u, Attachment a) { for (User usr : a.getUser()) { if (usr.getId().equals(u.getId())) { if (log.isTraceEnabled()) { log.trace("<< isOwner(): true // owner of the file"); } return true; } } if (log.isTraceEnabled()) { log.trace("<< isOwner(): false"); } return false; }
@Override public Attachment shareFile(Long attachmentId, long who, Long with) { if (log.isTraceEnabled()) { log.trace(">> shareFile(): attachmentId=" + attachmentId + ", who=" + who + ", with=" + with); } Attachment att = em.find(Attachment.class, attachmentId); if (att == null) { if (log.isTraceEnabled()) { log.trace("<< shareFile(): null - no such attachment"); } return null; } boolean canShare = false; try { // canShare |= um.isAdmin(who); User actor = em.find(User.class, who); if (actor != null && actor.getUserGroup() == 1) { canShare = true; } } catch (Exception ex) { } if (!canShare) { for (User u : att.getUser()) { if (u.getId() == who) { canShare = true; break; } } } if (!canShare) { if (log.isTraceEnabled()) { log.trace("<< shareFile(): null - operation is not permitted"); } return null; } User w = em.find(User.class, with); if (w == null) { if (log.isTraceEnabled()) { log.trace("<< shareFile(): cannot share with nobody, and owners list was not modified"); } return att; } att.getUser().add(w); em.persist(att); return att; }
public synchronized boolean touchSession(long sessionId, int timeout) { if (LOG.isTraceEnabled()) { ZooTrace.logTraceMessage( LOG, ZooTrace.CLIENT_PING_TRACE_MASK, "SessionTrackerImpl --- Touch session: 0x" + Long.toHexString(sessionId) + " with timeout " + timeout); } SessionImpl s = sessionsById.get(sessionId); if (s == null) { return false; } long expireTime = roundToInterval(System.currentTimeMillis() + timeout); if (s.tickTime >= expireTime) { // Nothing needs to be done return true; } SessionSet set = sessionSets.get(s.tickTime); if (set != null) { set.sessions.remove(s); } s.tickTime = expireTime; set = sessionSets.get(s.tickTime); if (set == null) { set = new SessionSet(); sessionSets.put(expireTime, set); } set.sessions.add(s); return true; }
/** * Reads active registry entries assigned to this server. * * @return the list of the active registry entries associated to this server * @throws com.funambol.pushlistener.service.registry.dao.DataAccessException if an error occurs */ public List<RegistryEntry> getActiveEntries() throws DataAccessException { Connection con = null; PreparedStatement ps = null; ResultSet rs = null; List<RegistryEntry> entries = new ArrayList<RegistryEntry>(); try { if (log.isTraceEnabled()) { log.trace("Executing '" + queryDesc.getReadActiveEntriesQuery() + "'"); } con = getConnection(); con.setReadOnly(true); ps = con.prepareStatement(queryDesc.getReadActiveEntriesQuery()); ps.setString(1, "Y"); rs = ps.executeQuery(); while (rs.next()) { entries.add(resultSetToRegistryEntry(rs)); } } catch (Exception e) { throw new DataAccessException(e); } finally { DBTools.close(con, ps, rs); } return entries; }
@Override public void execute() throws IOException { Attributes remoteAttributes = remoteIndex.get(path); Path rootDirectory = Registry.getInstance().getRootDirectory(); try { // TODO get rid of getRootDirectory, stupid (non)dependency new NetworkClient(rootDirectory).request(remoteAttributes.getAddress(), path); } catch (URISyntaxException e) { logger.error(e); } if (logger.isTraceEnabled()) {} logger.trace( String.format("Setting time on %1s to %2s", path, remoteAttributes.lastModifiedTime())); rootDirectory .resolve(path) .setAttribute( "basic:lastModifiedTime", FileTime.fromMillis(remoteAttributes.lastModifiedTime()), LinkOption.NOFOLLOW_LINKS); localIndex.addFromRemote(path, remoteAttributes); }
@Override public void onUserStatusChange( User user, String nick, String statusId, String status, Map<String, String> extra, Object userStatusHandle, boolean realTimeEvent) { String id = user.getId(); if (logger.isTraceEnabled()) { // just use trace as it contains the movements logger.trace(id + " has new info"); } if (extra == null) { extra = new HashMap<String, String>(); } if (nick != null) { extra.put("nick", nick); } if (status != null) { extra.put("status", status); extra.put("statusId", statusId); } this.listener.smartUpdate(userStatusHandle, extra, !realTimeEvent); }
public void sendData( final String connectionID, final TransferDescription description, final byte[] data) throws IOException { final JID connectionJID = currentLocalJID; if (connectionJID == null) throw new IOException("not connected to a XMPP server"); final IByteStreamConnection connection = getCurrentConnection(connectionID, description.getRecipient()); if (connection == null) throw new IOException( "not connected to " + description.getRecipient() + " [connection identifier=" + connectionID + "]"); description.setSender(connectionJID); if (LOG.isTraceEnabled()) LOG.trace( "send " + description + ", data len=" + data.length + " byte(s), connection=" + connection); sendInternal(connection, description, data); }
/** * Creates a test signature and verifies it. * * @param privateKey Private key to sign with * @param publicKey Public key to verify with * @param signatureProvider Name of provider to sign with * @throws NoSuchAlgorithmException In case the key or signature algorithm is unknown * @throws NoSuchProviderException In case the supplied provider name is unknown or BC is not * installed * @throws InvalidKeyException If signature verification failed or the key was invalid * @throws SignatureException If the signature could not be made or verified correctly */ public static void testSignAndVerify( PrivateKey privateKey, PublicKey publicKey, String signatureProvider) throws NoSuchAlgorithmException, NoSuchProviderException, InvalidKeyException, SignatureException { final byte input[] = "Lillan gick pa vagen ut, motte dar en katt...".getBytes(); final String sigAlg = suggestSigAlg(publicKey); if (sigAlg == null) { throw new NoSuchAlgorithmException("Unknown key algorithm: " + publicKey.getAlgorithm()); } if (LOG.isDebugEnabled()) { LOG.debug("Testing keys with algorithm: " + publicKey.getAlgorithm()); LOG.debug("testSigAlg: " + sigAlg); LOG.debug("provider: " + signatureProvider); LOG.trace("privateKey: " + privateKey); LOG.trace("privateKey class: " + privateKey.getClass().getName()); LOG.trace("publicKey: " + publicKey); LOG.trace("publicKey class: " + publicKey.getClass().getName()); } final Signature signSignature = Signature.getInstance(sigAlg, signatureProvider); signSignature.initSign(privateKey); signSignature.update(input); byte[] signBA = signSignature.sign(); if (LOG.isTraceEnabled()) { LOG.trace("Created signature of size: " + signBA.length); LOG.trace("Created signature: " + new String(Hex.encode(signBA))); } final Signature verifySignature = Signature.getInstance(sigAlg, "BC"); verifySignature.initVerify(publicKey); verifySignature.update(input); if (!verifySignature.verify(signBA)) { throw new InvalidKeyException("Test signature inconsistent"); } }