Пример #1
0
 /** Asserts that hdfsMongoJob has written the test data from a file on hdfs to mongodb. */
 @Test
 public void testHdfsMongoJob() {
   String data = UUID.randomUUID().toString();
   job.fileName(HdfsMongoDbJob.DEFAULT_FILE_NAME + "-0.txt");
   // Create a stream that writes to a hdfs file. This file will be used by the job.
   stream(
       "dataSender",
       sources.http()
           + XD_DELIMITER
           + sinks
               .hdfs()
               .directoryName(HdfsMongoDbJob.DEFAULT_DIRECTORY)
               .fileName(HdfsMongoDbJob.DEFAULT_FILE_NAME)
               .toDSL());
   sources.http(getContainerHostForSource("dataSender")).postData(data);
   job(job.toDSL());
   waitForXD();
   // Undeploy the dataSender stream to force XD to close the file.
   this.undeployStream("dataSender");
   waitForXD();
   jobLaunch();
   waitForXD();
   Map<String, String> result = job.getSingleObject(HdfsMongoDbJob.DEFAULT_COLLECTION_NAME);
   String dataResult = result.get("_id");
   assertNotNull(
       "The attribute " + HdfsMongoDbJob.DEFAULT_ID_FIELD + "not present in result", dataResult);
   assertEquals(data, dataResult);
 }
Пример #2
0
 /** Being a good steward, remove the result collection from mongo and source file from hdfs. */
 @After
 public void cleanup() {
   if (job != null) {
     job.dropCollection(HdfsMongoDbJob.DEFAULT_COLLECTION_NAME);
   }
   if (hadoopUtil.fileExists(HdfsMongoDbJob.DEFAULT_DIRECTORY)) {
     hadoopUtil.fileRemove(HdfsMongoDbJob.DEFAULT_DIRECTORY);
   }
 }