/** @param args [0] RabbitmqHost */ public static void main(String[] args) { System.out.println(Constants.HEADER); String RabbitmqHost = "localhost"; if (args.length > 0) RabbitmqHost = args[0]; ConnectionFactory factory = new ConnectionFactory(); factory.setHost(RabbitmqHost); try { Connection connection = factory.newConnection(); System.out.println("Connected: " + RabbitmqHost); Channel channel = connection.createChannel(); channel.exchangeDeclare(Constants.exchange, "direct", false); Stats stats = new Stats(); JSONWriter rabbitmqJson = new JSONWriter(); int msgCount = 0; for (; ; ) { stats.Update(); String statMsg = rabbitmqJson.write(stats); System.out.println(stats.toString()); channel.basicPublish(Constants.exchange, Constants.routingKey, null, statMsg.getBytes()); ++msgCount; if (System.in.available() > 0) break; Thread.sleep(1000); } channel.close(); System.out.println("Done: " + msgCount + " messages sent"); connection.close(); } catch (Exception e) { e.printStackTrace(); } }
@Override public Void visitSendingExchange(Exchange exchange, Wrapper wrapper) throws RuntimeException { Stats stats = wrapper.getStats(); stats.addCost(exchange.getAggregateSendCost()); stats.addMaxWidth(exchange.getMaxSendWidth()); return super.visitSendingExchange(exchange, wrapper); }
private Stats getLastCallTimeCollection( String requestName, Map<String, Map<String, Number>> fallbackValues, long fallbackStartTime) throws IOException { File dataFile = new File(context.getResourceDataDirectory(), requestName); if (!dataFile.exists()) { return Stats.fromMap( fallbackValues, requestName, System.currentTimeMillis(), fallbackStartTime); } else { ObjectInputStream in = null; try { in = new ObjectInputStream(new FileInputStream(dataFile)); Stats stats = (Stats) in.readObject(); if (stats.serverStartTime == 0) { // we might get serverStartTime == 0 if the datafile comes from the old version of the // plugin // in that case just fallback to the old behavior that assumed no server restarts. // After that we save the new version of the stats with the start time remembered and we // will // switch to the new correct behavior from the next collection. stats.serverStartTime = fallbackStartTime; } return stats; } catch (IOException e) { throw new IOException( "Couldn't read the stored calltime data from file " + dataFile + ".", e); } catch (ClassNotFoundException e) { throw new IllegalStateException("Couldn't find plugin API classes. This is serious!", e); } finally { StreamUtil.safeClose(in); } } }
private void eatMemory(int callIndex, File foFile, int replicatorRepeats) throws Exception { Source src = new StreamSource(foFile); Transformer transformer = replicatorTemplates.newTransformer(); transformer.setParameter("repeats", new Integer(replicatorRepeats)); OutputStream out = new NullOutputStream(); // write to /dev/nul try { FOUserAgent userAgent = fopFactory.newFOUserAgent(); userAgent.setBaseURL(foFile.getParentFile().toURL().toExternalForm()); Fop fop = fopFactory.newFop(MimeConstants.MIME_PDF, userAgent, out); Result res = new SAXResult(fop.getDefaultHandler()); transformer.transform(src, res); stats.notifyPagesProduced(fop.getResults().getPageCount()); if (callIndex == 0) { System.out.println( foFile.getName() + " generates " + fop.getResults().getPageCount() + " pages."); } stats.checkStats(); } finally { IOUtils.closeQuietly(out); } }
@Test public void shouldFormatTime() { assertThat(Stats.formatTime(1 * 60 * 60 * 1000 + 2 * 60 * 1000 + 3 * 1000 + 400)) .isEqualTo("1:02:03.400s"); assertThat(Stats.formatTime(2 * 60 * 1000 + 3 * 1000 + 400)).isEqualTo("2:03.400s"); assertThat(Stats.formatTime(3 * 1000 + 400)).isEqualTo("3.400s"); assertThat(Stats.formatTime(400)).isEqualTo("0.400s"); }
@Override public MarshalledEntry load(Object key) { if (!isValidKeyType(key)) { return null; } EntityManager em = emf.createEntityManager(); try { EntityTransaction txn = em.getTransaction(); long txnBegin = timeService.time(); txn.begin(); try { long entityFindBegin = timeService.time(); Object entity = em.find(configuration.entityClass(), key); stats.addEntityFind(timeService.time() - entityFindBegin); try { if (entity == null) return null; InternalMetadata m = null; if (configuration.storeMetadata()) { byte[] keyBytes; try { keyBytes = marshaller.objectToByteBuffer(key); } catch (Exception e) { throw new JpaStoreException("Failed to marshall key", e); } long metadataFindBegin = timeService.time(); MetadataEntity metadata = em.find(MetadataEntity.class, keyBytes); stats.addMetadataFind(timeService.time() - metadataFindBegin); if (metadata != null && metadata.getMetadata() != null) { try { m = (InternalMetadata) marshaller.objectFromByteBuffer(metadata.getMetadata()); } catch (Exception e) { throw new JpaStoreException("Failed to unmarshall metadata", e); } if (m.isExpired(timeService.wallClockTime())) { return null; } } } if (trace) log.trace("Loaded " + entity + " (" + m + ")"); return marshallerEntryFactory.newMarshalledEntry(key, entity, m); } finally { try { txn.commit(); stats.addReadTxCommitted(timeService.time() - txnBegin); } catch (Exception e) { stats.addReadTxFailed(timeService.time() - txnBegin); throw new JpaStoreException("Failed to load entry", e); } } } finally { if (txn != null && txn.isActive()) txn.rollback(); } } finally { em.close(); } }
private void eatMemory(File foFile, int runRepeats, int replicatorRepeats) throws Exception { stats = new Stats(); for (int i = 0; i < runRepeats; i++) { eatMemory(i, foFile, replicatorRepeats); stats.progress(i, runRepeats); } stats.dumpFinalStats(); System.out.println(stats.getGoogleChartURL()); }
@Override public void write(MarshalledEntry entry) { EntityManager em = emf.createEntityManager(); Object entity = entry.getValue(); MetadataEntity metadata = configuration.storeMetadata() ? new MetadataEntity( entry.getKeyBytes(), entry.getMetadataBytes(), entry.getMetadata() == null ? Long.MAX_VALUE : entry.getMetadata().expiryTime()) : null; try { if (!configuration.entityClass().isAssignableFrom(entity.getClass())) { throw new JpaStoreException( String.format( "This cache is configured with JPA CacheStore to only store values of type %s - cannot write %s = %s", configuration.entityClass().getName(), entity, entity.getClass().getName())); } else { EntityTransaction txn = em.getTransaction(); Object id = emf.getPersistenceUnitUtil().getIdentifier(entity); if (!entry.getKey().equals(id)) { throw new JpaStoreException( "Entity id value must equal to key of cache entry: " + "key = [" + entry.getKey() + "], id = [" + id + "]"); } long txnBegin = timeService.time(); try { if (trace) log.trace("Writing " + entity + "(" + toString(metadata) + ")"); txn.begin(); long entityMergeBegin = timeService.time(); em.merge(entity); stats.addEntityMerge(timeService.time() - entityMergeBegin); if (metadata != null && metadata.hasBytes()) { long metadataMergeBegin = timeService.time(); em.merge(metadata); stats.addMetadataMerge(timeService.time() - metadataMergeBegin); } txn.commit(); stats.addWriteTxCommited(timeService.time() - txnBegin); } catch (Exception e) { stats.addWriteTxFailed(timeService.time() - txnBegin); throw new JpaStoreException("Exception caught in write()", e); } finally { if (txn != null && txn.isActive()) txn.rollback(); } } } finally { em.close(); } }
public String stats() { StringBuilder builder = new StringBuilder(); builder.append("===============thread pool stats=========================================\r\n"); builder.append( String.format( "%14s%10s%8s%8s%11s%10s%12s\r\n", "name", "threads", "queue", "active", "rejected", "largest", "completed")); for (Stats stat : stats) { builder.append(stat.stats()); } return builder.toString(); }
@Override public void readFrom(StreamInput in) throws IOException { totalStats = Stats.readStats(in); openContexts = in.readVLong(); if (in.readBoolean()) { int size = in.readVInt(); groupStats = new HashMap<String, Stats>(size); for (int i = 0; i < size; i++) { groupStats.put(in.readString(), Stats.readStats(in)); } } }
@Override public Void visitOp(PhysicalOperator op, Wrapper wrapper) { if (op instanceof HasAffinity) { wrapper.addEndpointAffinity(((HasAffinity) op).getOperatorAffinity()); } Stats stats = wrapper.getStats(); stats.addCost(op.getCost()); for (PhysicalOperator child : op) { child.accept(this, wrapper); } return null; }
private Object record(InvocationContext invocationContext, Method callback) throws Exception { invocations.incrementAndGet(); Stats stats = enabled ? stats(invocationContext, callback) : null; long start = System.nanoTime(); try { return invocationContext.proceed(); } finally { long time = System.nanoTime() - start; time = millis(time); // do it in 2 steps since otherwise the measure is false (more false) if (stats != null) stats.record(time); invocationTime.addAndGet(time); } }
@Override public void run() { // Modify card's content here once it's initialized if (card != null) { Calendar mCalendar = Calendar.getInstance(); mCalendar.setTimeInMillis(System.currentTimeMillis()); // Modify time to be at the begining of today mCalendar.set(Calendar.HOUR_OF_DAY, 0); mCalendar.set(Calendar.MINUTE, 0); mCalendar.set(Calendar.SECOND, 0); mCalendar.set(Calendar.MILLISECOND, 0); // Get stats for today still.setText( Converters.readable_elapsed( Stats.getTimeStill( sContext.getContentResolver(), mCalendar.getTimeInMillis(), System.currentTimeMillis()))); walking.setText( Converters.readable_elapsed( Stats.getTimeWalking( sContext.getContentResolver(), mCalendar.getTimeInMillis(), System.currentTimeMillis()))); running.setText( Converters.readable_elapsed( Stats.getTimeRunning( sContext.getContentResolver(), mCalendar.getTimeInMillis(), System.currentTimeMillis()))); biking.setText( Converters.readable_elapsed( Stats.getTimeBiking( sContext.getContentResolver(), mCalendar.getTimeInMillis(), System.currentTimeMillis()))); driving.setText( Converters.readable_elapsed( Stats.getTimeVehicle( sContext.getContentResolver(), mCalendar.getTimeInMillis(), System.currentTimeMillis()))); } // Reset timer and schedule the next card refresh uiRefresher.postDelayed(uiChanger, refresh_interval); }
@Test(timeout = 120000) public void testFadviseSkippedForSmallReads() throws Exception { // start a cluster LOG.info("testFadviseSkippedForSmallReads"); tracker.clear(); Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, true); MiniDFSCluster cluster = null; String TEST_PATH = "/test"; int TEST_PATH_LEN = MAX_TEST_FILE_LEN; FSDataInputStream fis = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); // create new file createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, null); // Since the DataNode was configured with drop-behind, and we didn't // specify any policy, we should have done drop-behind. ExtendedBlock block = cluster .getNameNode() .getRpcServer() .getBlockLocations(TEST_PATH, 0, Long.MAX_VALUE) .get(0) .getBlock(); String fadvisedFileName = cluster.getBlockFile(0, block).getName(); Stats stats = tracker.getStats(fadvisedFileName); stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE); stats.clear(); stats.assertNotDroppedInRange(0, TEST_PATH_LEN); // read file fis = fs.open(new Path(TEST_PATH)); byte buf[] = new byte[17]; fis.readFully(4096, buf, 0, buf.length); // we should not have dropped anything because of the small read. stats = tracker.getStats(fadvisedFileName); stats.assertNotDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE); } finally { IOUtils.cleanup(null, fis); if (cluster != null) { cluster.shutdown(); } } }
/** * Produces a string with properly-formatted SVG document, containing all the signature strokes as * simple lines. * * @param data * @return */ public static String toSVG(int[][][] data) throws Exception { Stats stats = new jSignature.Tools.Stats(data); /* [UNSUPPORTED] 'var' as type is unsupported "var" */ contentsize = stats.getContentSize(); /* [UNSUPPORTED] 'var' as type is unsupported "var" */ limits = stats.getContentLimits(); String outersvgtemplate = "<?xml version=\'1.0\' encoding=\'UTF-8\' standalone=\'no\'?>\r\n" + "<!DOCTYPE svg PUBLIC \'-//W3C//DTD SVG 1.1//EN\' \'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\'>\r\n" + "<svg xmlns=\'http://www.w3.org/2000/svg\' version=\'1.1\' width=\'{0}\' height=\'{1}\'>{2}\r\n" + "</svg>".Replace('\'', '"'); return String.Format( outersvgtemplate, contentsize[0], contentsize[1], GetPathsSVGFragment(data, limits[0] * -1 + 1, limits[1] * -1 + 1)); }
public static void main(String[] args) throws IOException { PersistenceObjectModel.register(); FileStore store = new FileStore(JdbmTest.FILE); @SuppressWarnings("unchecked") final TMap<String, TSet<PersistenceClass>> map = (TMap) store.getRoot(); Transaction.setDefaultTrunk(map.getTrunk()); Assert.assertEquals(COUNT + 1, map.size()); Transaction.run( new Runnable() { public void run() { for (int i = 0; i < COUNT + 1; i++) { TSet<PersistenceClass> set = map.get("set" + i); Assert.assertEquals(1, set.size()); for (PersistenceClass current : set) Assert.assertTrue(current.getInt() == i); } } }); if (Stats.ENABLED) { // 2 for log file initialization Assert.assertEquals(2, Stats.getInstance().FileTotalWritten.get()); } TSet<PersistenceClass> set = new TSet<PersistenceClass>(); PersistenceClass object = new PersistenceClass(); object.setInt(COUNT + 1); set.add(object); map.put("set" + (COUNT + 1), set); store.flush(); Log.write("Inserted 1 more set."); if (Stats.ENABLED) { Assert.assertTrue( Stats.getInstance().FileTotalWritten.get() < Stats.getInstance().FileTotalRead.get() / 10); Stats.getInstance().writeAndReset(); } store.close(); PlatformAdapter.shutdown(); }
@Override public GRCandidateProgram[] crossover(final CandidateProgram p1, final CandidateProgram p2) { final GRCandidateProgram child1 = (GRCandidateProgram) p1; final GRCandidateProgram child2 = (GRCandidateProgram) p2; final NonTerminalSymbol parseTree1 = child1.getParseTree(); final NonTerminalSymbol parseTree2 = child2.getParseTree(); final List<NonTerminalSymbol> nonTerminals1 = parseTree1.getNonTerminalSymbols(); final List<NonTerminalSymbol> nonTerminals2 = parseTree2.getNonTerminalSymbols(); final int point1 = rng.nextInt(nonTerminals1.size()); final NonTerminalSymbol subtree1 = nonTerminals1.get(point1); // Generate a list of matching non-terminals from the second program. final List<NonTerminalSymbol> matchingNonTerminals = new ArrayList<NonTerminalSymbol>(); for (final NonTerminalSymbol nt : nonTerminals2) { if (nt.getGrammarRule().equals(subtree1.getGrammarRule())) { matchingNonTerminals.add(nt); } } if (matchingNonTerminals.isEmpty()) { // No valid points in second program, cancel crossover. return null; } else { // Randomly choose a second point out of the matching non-terminals. final int point2 = rng.nextInt(matchingNonTerminals.size()); final NonTerminalSymbol subtree2 = matchingNonTerminals.get(point2); // Add crossover points to the stats manager. Stats.get().addData(XO_POINT1, point1); Stats.get().addData(XO_POINT2, point2); // Swap the non-terminals' children. final List<Symbol> temp = subtree1.getChildren(); subtree1.setChildren(subtree2.getChildren()); subtree2.setChildren(temp); // Add subtrees into the stats manager. Stats.get().addData(XO_SUBTREE1, subtree1); Stats.get().addData(XO_SUBTREE2, subtree2); } return new GRCandidateProgram[] {child1, child2}; }
@Test public void testMeanVector() { Vector calculated = Vector.of(2.0, 2.0, 2.0); mean = Stats.meanVector(set); assertThat(mean, equalTo(calculated)); }
/** * get xp and gold from killing characters * * @param Charac collision characters */ public void KillXP(GameCharacter[] Charac) { // When the character dies for (int i = 0; i < Charac.length; i++) { if (Charac[i].ISDEAD && Charac[i].COLL_LISTENER) { Charac[i].COLL_LISTENER = false; audDEATH.playAudio("death.wav"); // Remove collision listener for (int j = 0; j < FIREBALL.length; j++) { FIREBALL[j].removeCollChar(Charac[i]); } for (int j = 0; j < SHOCK.length; j++) { SHOCK[j].removeCollChar(Charac[i]); } for (int j = 0; j < DARKNESS.length; j++) { DARKNESS[j].removeCollChar(Charac[i]); } for (int j = 0; j < LIFE_DRAIN.length; j++) { LIFE_DRAIN[j].removeCollChar(Charac[i]); } // Add Xp STATS.IncreaseXP(10 * Charac[i].STATS.LEVEL); // Add Gold this.GOLD += Charac[i].GOLD; } } }
/** * Writes the node to the specified output stream. * * @param out output stream * @throws IOException I/O exception */ void write(final DataOutput out) throws IOException { out.writeNum(name); out.write1(kind); out.writeNum(0); out.writeNum(children.length); out.writeDouble(1); // update leaf flag boolean leaf = stats.isLeaf(); for (final PathNode child : children) { leaf &= child.kind == Data.TEXT || child.kind == Data.ATTR; } stats.setLeaf(leaf); stats.write(out); for (final PathNode child : children) child.write(out); }
public static void Win(Player p) { Game.gs = GameState.WIN; sendToAllPlus("§b✦§a-----------------------§b✦"); sendToAllPlus(""); sendToAllPlus(" §a§lWinner:"); if (p == null) { sendToAllPlus(" §eWell... no-one won."); } else { sendToAllPlus(" §l" + p.getName()); } sendToAllPlus(""); sendToAllPlus("§b✦§a-----------------------§b✦"); if (p != null) { int needed = 0; if (LetterType.getPlayerLetter(p) == LetterType.A) { needed = 2000; } else { needed = LetterType.getPlayerLetter(p).getNeeded().getMoney(); } int amount = Math.round(needed / 30); p.sendMessage(ChatColor.GREEN + "+" + amount + "$"); MoneyAPI.addMoney(p, amount); Stats.getStats(p).addWins(1).addGamesPlayed(1); } }
@NotNull public String getAnalysisSummary() { StringBuilder sb = new StringBuilder(); sb.append("\n" + _.banner("analysis summary")); String duration = _.formatTime(System.currentTimeMillis() - stats.getInt("startTime")); sb.append("\n- total time: " + duration); sb.append("\n- modules loaded: " + loadedFiles.size()); sb.append("\n- semantic problems: " + semanticErrors.size()); sb.append("\n- failed to parse: " + failedToParse.size()); // calculate number of defs, refs, xrefs int nDef = 0, nXRef = 0; for (Binding b : getAllBindings()) { nDef += 1; nXRef += b.refs.size(); } sb.append("\n- number of definitions: " + nDef); sb.append("\n- number of cross references: " + nXRef); sb.append("\n- number of references: " + getReferences().size()); long nResolved = this.resolved.size(); long nUnresolved = this.unresolved.size(); sb.append("\n- resolved names: " + nResolved); sb.append("\n- unresolved names: " + nUnresolved); sb.append("\n- name resolve rate: " + _.percent(nResolved, nResolved + nUnresolved)); sb.append("\n" + _.getGCStats()); return sb.toString(); }
/** used because actors usually need to run the full constructor before adding things to them */ public GameRoot init() { /** * gameroot is 16 units wide and whatever units high, keeping aspect ratio. not too important.. */ this.setSize(16, (screenH / screenW) * 16); cam.setToOrtho(false, getWidth(), getHeight()); cam.update(); this.addListener( new EventListener() { @Override public boolean handle(Event event) { if (event instanceof MobRemoveEvent) { ((MobRemoveEvent) event).removeMob(); if (event instanceof MobTouchedEvent) { stats.pointUp(); screen.pointsChanged(stats.getPoints()); } else if (event instanceof MobExplodeEvent) { screen.mobExploded(stats.mobExploded()); if (stats.getStrikes() >= 5 && !gameOver) gameOver(); } } return false; } }); stats.reset(); gameOver = false; return this; }
@Override public boolean contains(Object key) { if (!isValidKeyType(key)) { return false; } EntityManager em = emf.createEntityManager(); try { EntityTransaction txn = em.getTransaction(); long txnBegin = timeService.time(); txn.begin(); try { long entityFindBegin = timeService.time(); Object entity = em.find(configuration.entityClass(), key); stats.addEntityFind(timeService.time() - entityFindBegin); if (trace) log.trace("Entity " + key + " -> " + entity); try { if (entity == null) return false; if (configuration.storeMetadata()) { byte[] keyBytes; try { keyBytes = marshaller.objectToByteBuffer(key); } catch (Exception e) { throw new JpaStoreException("Cannot marshall key", e); } long metadataFindBegin = timeService.time(); MetadataEntity metadata = em.find(MetadataEntity.class, keyBytes); stats.addMetadataFind(timeService.time() - metadataFindBegin); if (trace) log.trace("Metadata " + key + " -> " + toString(metadata)); return metadata == null || metadata.expiration > timeService.wallClockTime(); } else { return true; } } finally { txn.commit(); stats.addReadTxCommitted(timeService.time() - txnBegin); } } catch (RuntimeException e) { stats.addReadTxFailed(timeService.time() - txnBegin); throw e; } finally { if (txn != null && txn.isActive()) txn.rollback(); } } finally { em.close(); } }
public void CompleteQuest(int Index) { Quest qstComp = QUEST_LIST.elementAt(Index); qstComp.STATUS = 3; qstComp.QUEST_GIVER.QUEST_LIST.elementAt(0).STATUS = 3; qstComp.QUEST_GIVER.QUEST_LIST.removeElementAt(0); STATS.IncreaseXP(qstComp.XP); GOLD += qstComp.GOLD; }
public void go() { source.start(); source.waitFor(); if (sink != null) { sink.close(); } stats.print(); }
/** * Default constructor. * * @param name node name * @param kind node kind * @param parent parent node * @param count counter */ private PathNode(final int name, final byte kind, final PathNode parent, final int count) { this.children = new PathNode[0]; this.name = (short) name; this.kind = kind; this.parent = parent; stats = new Stats(); stats.count = count; }
@Test public void printReturnsRelevantInformation() { ByteArrayOutputStream output = new ByteArrayOutputStream(); Stats s = new Stats(1, 2, 3, 4, 5, 6, 7, 8, 9); s.print(new PrintStream(output)); assertEquals( "Latency measured with 1 probe(s): \n" + " 2,00 us for 50 percentile\n" + " 3,00 us for 90 percentile\n" + " 4,00 us for 99 percentile\n" + " 5,00 us for 99.9 percentile\n" + " 6,00 us for 99.99 percentile\n" + " 7,00 us best run\n" + " 8,00 us avg run\n" + " 9,00 us worst run\n", output.toString()); }
@Override public Datastore.Stats getStats(boolean useCache) { if (useCache) { try { Stats cachedStats = (Stats) STATS_CACHE.get(STATS_CACHE_KEY); if (cachedStats != null) { return cachedStats; } logger.info("Stats not in cache, re-computing"); } catch (InvalidValueException err) { logger.log(Level.WARNING, "Could not load data from memcache", err); } } Stats ret = new Stats(); DatastoreService datastore = DatastoreServiceFactory.getDatastoreService(); PreparedQuery pq = datastore.prepare(new com.google.appengine.api.datastore.Query("__Stat_Kind__")); for (Entity kindStat : pq.asIterable()) { String kind = (String) kindStat.getProperty("kind_name"); if ("Channel".equals(kind)) { ret.numChannels = ((Long) kindStat.getProperty("count")).intValue(); ret.timestamp = (Date) kindStat.getProperty("timestamp"); } } ret.numUsers = countUsersActiveInLastNDays(datastore, -1); ret.oneDayActiveUsers = countUsersActiveInLastNDays(datastore, 1); ret.sevenDayActiveUsers = countUsersActiveInLastNDays(datastore, 7); ret.thirtyDayActiveUsers = countUsersActiveInLastNDays(datastore, 30); STATS_CACHE.put(STATS_CACHE_KEY, ret); return ret; }
/** * * Test the scenario where the DataNode defaults to not dropping the cache, but our client * defaults are set. */ @Test(timeout = 120000) public void testClientDefaults() throws Exception { // start a cluster LOG.info("testClientDefaults"); tracker.clear(); Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, false); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, false); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS, true); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, true); MiniDFSCluster cluster = null; String TEST_PATH = "/test"; int TEST_PATH_LEN = MAX_TEST_FILE_LEN; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); // create new file createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, null); // verify that we dropped everything from the cache during file creation. ExtendedBlock block = cluster .getNameNode() .getRpcServer() .getBlockLocations(TEST_PATH, 0, Long.MAX_VALUE) .get(0) .getBlock(); String fadvisedFileName = cluster.getBlockFile(0, block).getName(); Stats stats = tracker.getStats(fadvisedFileName); stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE); stats.clear(); // read file readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, null); // verify that we dropped everything from the cache. Assert.assertNotNull(stats); stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE); } finally { if (cluster != null) { cluster.shutdown(); } } }