/** * Wait up to DEFAULT_SLEEP for an expected count of TS to connect to the master * * @param expected How many TS are expected * @return true if there are at least as many TS as expected, otherwise false */ static boolean waitForTabletServers(int expected) throws Exception { int count = 0; Stopwatch stopwatch = new Stopwatch().start(); while (count < expected && stopwatch.elapsedMillis() < DEFAULT_SLEEP) { Thread.sleep(200); Deferred<ListTabletServersResponse> d = client.listTabletServers(); d.addErrback(defaultErrorCB); count = d.join(DEFAULT_SLEEP).getTabletServersCount(); } return count >= expected; }
/** * Find the port of the leader master in order to retrieve it from the port to process map. * * @return The port of the leader master. * @throws Exception If we are unable to find the leader master. */ protected static int findLeaderMasterPort() throws Exception { Stopwatch sw = new Stopwatch().start(); int leaderPort = -1; while (leaderPort == -1 && sw.elapsedMillis() < DEFAULT_SLEEP) { Deferred<Master.GetTableLocationsResponsePB> masterLocD = client.getMasterTableLocationsPB(); Master.GetTableLocationsResponsePB r = masterLocD.join(DEFAULT_SLEEP); leaderPort = r.getTabletLocations(0).getReplicas(0).getTsInfo().getRpcAddresses(0).getPort(); } if (leaderPort == -1) { fail("No leader master found after " + DEFAULT_SLEEP + " ms."); } return leaderPort; }
/** * List input directories. Subclasses may override to, e.g., select only files matching a regular * expression. * * @param job the job to list input paths for * @return array of FileStatus objects * @throws IOException if zero items. */ protected List<FileStatus> listStatus(JobContext job) throws IOException { Path[] dirs = getInputPaths(job); if (dirs.length == 0) { throw new IOException("No input paths specified in job"); } // get tokens for all the required FileSystems.. TokenCache.obtainTokensForNamenodes(job.getCredentials(), dirs, job.getConfiguration()); // Whether we need to recursive look into the directory structure boolean recursive = getInputDirRecursive(job); // creates a MultiPathFilter with the hiddenFileFilter and the // user provided one (if any). List<PathFilter> filters = new ArrayList<PathFilter>(); filters.add(hiddenFileFilter); PathFilter jobFilter = getInputPathFilter(job); if (jobFilter != null) { filters.add(jobFilter); } PathFilter inputFilter = new MultiPathFilter(filters); List<FileStatus> result = null; int numThreads = job.getConfiguration().getInt(LIST_STATUS_NUM_THREADS, DEFAULT_LIST_STATUS_NUM_THREADS); Stopwatch sw = new Stopwatch().start(); if (numThreads == 1) { result = singleThreadedListStatus(job, dirs, inputFilter, recursive); } else { Iterable<FileStatus> locatedFiles = null; try { LocatedFileStatusFetcher locatedFileStatusFetcher = new LocatedFileStatusFetcher( job.getConfiguration(), dirs, recursive, inputFilter, true); locatedFiles = locatedFileStatusFetcher.getFileStatuses(); } catch (InterruptedException e) { throw new IOException("Interrupted while getting file statuses"); } result = Lists.newArrayList(locatedFiles); } sw.stop(); if (LogGlobal.isDebugEnabled()) { /* LOG.debug("Time taken to get FileStatuses: "+sw.elapsedMillis()) */ LOG.time_taken_get_filestatuses(String.valueOf(sw.elapsedMillis())).tag("methodCall").debug(); } /* LOG.info("Total input paths to process : "+result.size()) */ LOG.total_input_paths_process(String.valueOf(result.size())).tag("methodCall").info(); return result; }
/** * Code for each 'client' to run. * * @param id * @param c * @param sharedConnection * @throws IOException */ static void cycle(int id, final Configuration c, final HConnection sharedConnection) throws IOException { HTableInterface table = sharedConnection.getTable(BIG_USER_TABLE); table.setAutoFlushTo(false); long namespaceSpan = c.getLong("hbase.test.namespace.span", 1000000); long startTime = System.currentTimeMillis(); final int printInterval = 100000; Random rd = new Random(id); boolean get = c.getBoolean("hbase.test.do.gets", false); try { Stopwatch stopWatch = new Stopwatch(); stopWatch.start(); for (int i = 0; i < namespaceSpan; i++) { byte[] b = format(rd.nextLong()); if (get) { Get g = new Get(b); table.get(g); } else { Put p = new Put(b); p.add(HConstants.CATALOG_FAMILY, b, b); table.put(p); } if (i % printInterval == 0) { LOG.info("Put " + printInterval + "/" + stopWatch.elapsedMillis()); stopWatch.reset(); stopWatch.start(); } } LOG.info( "Finished a cycle putting " + namespaceSpan + " in " + (System.currentTimeMillis() - startTime) + "ms"); } finally { table.close(); } }
/** * Generate the list of files and make them into FileSplits. * * @param job the job context * @throws IOException */ public List<InputSplit> getSplits(JobContext job) throws IOException { Stopwatch sw = new Stopwatch().start(); long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job)); long maxSize = getMaxSplitSize(job); // generate splits List<InputSplit> splits = new ArrayList<InputSplit>(); List<FileStatus> files = listStatus(job); for (FileStatus file : files) { Path path = file.getPath(); long length = file.getLen(); if (length != 0) { BlockLocation[] blkLocations; if (file instanceof LocatedFileStatus) { blkLocations = ((LocatedFileStatus) file).getBlockLocations(); } else { FileSystem fs = path.getFileSystem(job.getConfiguration()); blkLocations = fs.getFileBlockLocations(file, 0, length); } if (isSplitable(job, path)) { long blockSize = file.getBlockSize(); long splitSize = computeSplitSize(blockSize, minSize, maxSize); long bytesRemaining = length; while (((double) bytesRemaining) / splitSize > SPLIT_SLOP) { int blkIndex = getBlockIndex(blkLocations, length - bytesRemaining); splits.add( makeSplit( path, length - bytesRemaining, splitSize, blkLocations[blkIndex].getHosts())); bytesRemaining -= splitSize; } if (bytesRemaining != 0) { int blkIndex = getBlockIndex(blkLocations, length - bytesRemaining); splits.add( makeSplit( path, length - bytesRemaining, bytesRemaining, blkLocations[blkIndex].getHosts())); } } else { // not splitable splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts())); } } else { // Create empty hosts array for zero length files splits.add(makeSplit(path, 0, length, new String[0])); } } // Save the number of input files for metrics/loadgen job.getConfiguration().setLong(NUM_INPUT_FILES, files.size()); sw.stop(); if (LogGlobal.isDebugEnabled()) { /* LOG.debug("Total # of splits generated by getSplits: "+splits.size()+", TimeTaken: "+sw.elapsedMillis()) */ LOG.total_splits_generated_getsplits_timetak( String.valueOf(splits.size()), String.valueOf(sw.elapsedMillis())) .tag("methodCall") .debug(); } return splits; }
public String getMillisTimeStamp() { return String.valueOf(time.elapsedMillis()) + " ms"; }
@Test(dependsOnGroups = "example.ipc.server") public void clientTest() throws IOException, ServiceException, InterruptedException { PeerInfo client = new PeerInfo(socketAddress.getHostName(), 1234); ThreadPoolCallExecutor executor = new ThreadPoolCallExecutor(3, 10); DuplexTcpClientBootstrap bootstrap = new DuplexTcpClientBootstrap( client, new NioClientSocketChannelFactory( Executors.newCachedThreadPool(), Executors.newCachedThreadPool()), executor); bootstrap.setOption("connectTimeoutMillis", 10000); bootstrap.setOption("connectResponseTimeoutMillis", 10000); bootstrap.setOption("receiveBufferSize", 1048576); bootstrap.setOption("tcpNoDelay", false); RpcClientChannel channel = bootstrap.peerWith(socketAddress); // blocking calll DataService.BlockingInterface dataService = DataService.newBlockingStub(channel); RpcController controller = channel.newRpcController(); // make request GetRequest request = GetRequest.newBuilder().setRow(ByteString.copyFromUtf8("row1")).build(); final Stopwatch stopwatch = new Stopwatch().start(); GetResponse response = dataService.getData(controller, request); stopwatch.stop(); System.out.println(response.getDataList()); System.out.printf("Request took %s milliseconds\n", stopwatch.elapsedMillis()); // do it again since the socket is open stopwatch.reset().start(); response = dataService.getData(controller, request); stopwatch.stop(); System.out.println(response.getDataList()); System.out.printf("Request took %s milliseconds\n", stopwatch.elapsedMillis()); // non-blocking DataService.Stub stub = DataService.newStub(channel); final Object lock = new Object(); stopwatch.reset().start(); stub.getData( controller, request, new RpcCallback<GetResponse>() { public void run(final GetResponse parameter) { System.out.println("Non-Blocking Callback"); System.out.println(parameter.getDataList()); stopwatch.stop(); System.out.printf("Request took %s milliseconds\n", stopwatch.elapsedMillis()); synchronized (lock) { lock.notify(); } } }); synchronized (lock) { lock.wait(); } }