示例#1
0
 @Override
 public boolean waitMsecs(int msecs) {
   if (msecs < 0) {
     throw new RuntimeException("waitMsecs: msecs cannot be negative!");
   }
   long maxMsecs = time.getMilliseconds() + msecs;
   int curMsecTimeout = 0;
   lock.lock();
   try {
     while (!eventOccurred) {
       curMsecTimeout = Math.min(msecs, msecPeriod);
       if (LOG.isDebugEnabled()) {
         LOG.debug("waitMsecs: Wait for " + curMsecTimeout);
       }
       try {
         boolean signaled = cond.await(curMsecTimeout, TimeUnit.MILLISECONDS);
         if (LOG.isDebugEnabled()) {
           LOG.debug("waitMsecs: Got timed signaled of " + signaled);
         }
       } catch (InterruptedException e) {
         throw new IllegalStateException(
             "waitMsecs: Caught interrupted " + "exception on cond.await() " + curMsecTimeout, e);
       }
       if (time.getMilliseconds() > maxMsecs) {
         return false;
       }
       msecs = Math.max(0, msecs - curMsecTimeout);
       progressable.progress(); // go around again
     }
   } finally {
     lock.unlock();
   }
   return true;
 }
  /**
   * If the key is to be associated with a valid value, a mutation is created for it with the given
   * table and columns. In the event the value in the column is missing (i.e., null), then it is
   * marked for {@link Deletion}. Similarly, if the entire value for a key is missing (i.e., null),
   * then the entire key is marked for {@link Deletion}.
   *
   * @param keyColumns the key to write.
   * @param values the values to write.
   * @throws IOException
   */
  @Override
  public void write(Map<String, ByteBuffer> keyColumns, List<ByteBuffer> values)
      throws IOException {
    TokenRange range = ringCache.getRange(getPartitionKey(keyColumns));

    // get the client for the given range, or create a new one
    final InetAddress address = ringCache.getEndpoints(range).get(0);
    RangeClient client = clients.get(address);
    if (client == null) {
      // haven't seen keys for this range: create new client
      client = new RangeClient(ringCache.getEndpoints(range));
      client.start();
      clients.put(address, client);
    }

    // add primary key columns to the bind variables
    List<ByteBuffer> allValues = new ArrayList<ByteBuffer>(values);
    for (ColumnMetadata column : partitionKeyColumns)
      allValues.add(keyColumns.get(column.getName()));
    for (ColumnMetadata column : clusterColumns) allValues.add(keyColumns.get(column.getName()));

    client.put(allValues);

    if (progressable != null) progressable.progress();
    if (context != null) HadoopCompat.progress(context);
  }
 @Override
 public synchronized void write(BytesWritable key, BytesWritable value) throws IOException {
   try {
     mWriter.put(key.getBytes(), value.getBytes());
     // Send a progress to the job manager to inform it that the task is still running.
     mProgress.progress();
   } catch (TachyonException e) {
     throw new IOException(e);
   }
 }
示例#4
0
  public static <K extends Object, V extends Object> void writeFile(
      RawKeyValueIterator records, Writer<K, V> writer, Progressable progressable)
      throws IOException {
    long recordCtr = 0;
    while (records.next()) {
      writer.append(records.getKey(), records.getValue());

      if ((++recordCtr % PROGRESS_BAR) == 0) {
        progressable.progress();
      }
    }
  }
示例#5
0
 public static long copyFileToZipStream(
     File file, ZipOutputStream zipOutputStream, Progressable progressable) throws IOException {
   createNewZipEntry(zipOutputStream, file);
   long numRead = 0;
   try (FileInputStream inputStream = new FileInputStream(file)) {
     byte[] buf = new byte[0x10000];
     for (int bytesRead = inputStream.read(buf);
         bytesRead >= 0;
         bytesRead = inputStream.read(buf)) {
       progressable.progress();
       if (bytesRead == 0) {
         continue;
       }
       zipOutputStream.write(buf, 0, bytesRead);
       progressable.progress();
       numRead += bytesRead;
     }
   }
   zipOutputStream.closeEntry();
   progressable.progress();
   return numRead;
 }
示例#6
0
  public static <K extends Object, V extends Object> void writeFile(
      RawKeyValueIterator records,
      Writer<K, V> writer,
      Progressable progressable,
      Configuration conf)
      throws IOException {
    long progressBar = conf.getLong("mapred.merge.recordsBeforeProgress", 10000);
    long recordCtr = 0;
    while (records.next()) {
      writer.append(records.getKey(), records.getValue());

      if (((recordCtr++) % progressBar) == 0) {
        progressable.progress();
      }
    }
  }
  private void testLuceneIndexRecordReader(
      ArrayList<String> queryStrings,
      ArrayList<Path> indexPaths,
      ArrayList<ArrayList<ArrayList<Integer>>> indexesQueriesDocIds)
      throws Exception {

    LuceneIndexInputSplit split = createStrictMock(LuceneIndexInputSplit.class);
    expect(split.getIndexDirs()).andReturn(indexPaths);
    replay(split);

    Configuration conf = new Configuration();
    TaskAttemptContext context = createStrictMock(TaskAttemptContext.class);
    expect(HadoopCompat.getConfiguration(context)).andStubReturn(conf);
    ((Progressable) context).progress(); // casting to avoid Hadoop 2 incompatibility
    expectLastCall().atLeastOnce();
    replay(context);

    LuceneIndexInputFormat.setQueries(queryStrings, conf);

    LuceneIndexRecordReader<IntWritable> rr =
        createMockBuilder(MockRecordReader.class)
            .addMockedMethod("openIndex")
            .addMockedMethod("createSearcher")
            .createMock();

    Query[] queries = new Query[queryStrings.size()];
    for (int i = 0; i < queries.length; i++) {
      Query query = createStrictMock(Query.class);
      replay(query);
      queries[i] = query;
      expect(rr.deserializeQuery(queryStrings.get(i))).andReturn(query);
    }

    for (int index = 0; index < indexPaths.size(); index++) {
      IndexReader reader = createStrictMock(IndexReader.class);
      expect(reader.maxDoc()).andStubReturn(4);
      replay(reader);
      expect(rr.openIndex(indexPaths.get(index), conf)).andReturn(reader);

      IndexSearcher searcher = createStrictMock(IndexSearcher.class);
      expect(rr.createSearcher(reader)).andReturn(searcher);

      for (int query = 0; query < queries.length; query++) {
        final ArrayList<Integer> ids = indexesQueriesDocIds.get(index).get(query);
        final Capture<Collector> collectorCapture = new Capture<Collector>();
        expect(searcher.getIndexReader()).andReturn(reader);
        searcher.search(eq(queries[query]), capture(collectorCapture));

        expectLastCall()
            .andAnswer(
                new IAnswer<Void>() {
                  @Override
                  public Void answer() throws Throwable {
                    for (int id : ids) {
                      collectorCapture.getValue().collect(id);
                    }
                    return null;
                  }
                });

        for (int docId : ids) {
          expect(searcher.doc(docId)).andReturn(docs[docId]);
        }
      }
      replay(searcher);
    }

    replay(rr);

    rr.initialize(split, context);

    float prevProgress = -1;
    for (int index = 0; index < indexesQueriesDocIds.size(); index++) {
      for (int query = 0; query < indexesQueriesDocIds.get(index).size(); query++) {
        for (int docId : indexesQueriesDocIds.get(index).get(query)) {
          assertTrue(rr.nextKeyValue());
          assertEquals(query, rr.getCurrentKey().get());
          assertEquals(docsAndValues.get(docs[docId]), (Integer) rr.getCurrentValue().get());
          float newProgress = rr.getProgress();
          assertTrue(newProgress > prevProgress);
          assertTrue(newProgress <= 1.0);
        }
      }
    }

    assertFalse(rr.nextKeyValue());
    assertFalse(rr.nextKeyValue());

    verifyAll();
  }
示例#8
0
 @Override
 public void waitForever() {
   while (!waitMsecs(msecPeriod)) {
     progressable.progress();
   }
 }
 private void progress() {
   if (progressable != null) progressable.progress();
 }