@Test(enabled = false)
  public void testEvictionWithEmptyDirs() throws Exception {
    try {
      Configuration conf = cluster.getConf();
      FileSystem fs = FileSystem.get(conf);
      fs.delete(new Path("/"), true);
      stream.clear();

      Pair<List<String>, List<String>> pair =
          generateInstances(
              fs, "feed1", "yyyy/MM/dd/'more'/yyyy", 10, TimeUnit.DAYS, "/data", false);
      final String storageUrl = cluster.getConf().get(HadoopClientFactory.FS_DEFAULT_NAME_KEY);
      String dataPath =
          LocationType.DATA.name()
              + "="
              + storageUrl
              + "/data/YYYY/feed1/mmHH/dd/MM/?{YEAR}/?{MONTH}/?{DAY}/more/?{YEAR}";
      String logFile = hdfsUrl + "/falcon/staging/feed/instancePaths-2012-01-01-01-00.csv";
      long beforeDelCount =
          fs.getContentSummary(new Path(("/data/YYYY/feed1/mmHH/dd/MM/"))).getDirectoryCount();

      FeedEvictor.main(
          new String[] {
            "-feedBasePath", dataPath,
            "-retentionType", "instance",
            "-retentionLimit", "days(10)",
            "-timeZone", "UTC",
            "-frequency", "daily",
            "-logFile", logFile,
            "-falconFeedStorageType", Storage.TYPE.FILESYSTEM.name(),
          });

      compare(map.get("feed1"), stream.getBuffer());

      String expectedInstancePaths = getExpectedInstancePaths(dataPath.replaceAll(storageUrl, ""));
      Assert.assertEquals(readLogFile(new Path(logFile)), expectedInstancePaths);

      String deletedPath = expectedInstancePaths.split(",")[0].split("=")[1];
      Assert.assertFalse(fs.exists(new Path(deletedPath)));
      // empty parents
      Assert.assertFalse(fs.exists(new Path(deletedPath).getParent()));
      Assert.assertFalse(fs.exists(new Path(deletedPath).getParent().getParent()));
      // base path not deleted
      Assert.assertTrue(fs.exists(new Path("/data/YYYY/feed1/mmHH/dd/MM/")));
      // non-eligible empty dirs
      long afterDelCount =
          fs.getContentSummary(new Path(("/data/YYYY/feed1/mmHH/dd/MM/"))).getDirectoryCount();
      Assert.assertEquals((beforeDelCount - afterDelCount), 19);
      for (String path : pair.second) {
        Assert.assertTrue(fs.exists(new Path(path)));
      }

    } catch (Exception e) {
      Assert.fail("Unknown exception", e);
    }
  }
  @Test
  public void testEviction4() throws Exception {
    try {
      Configuration conf = cluster.getConf();
      FileSystem fs = FileSystem.get(conf);
      fs.delete(new Path("/"), true);
      stream.clear();

      Pair<List<String>, List<String>> pair = createTestData("/data");
      FeedEvictor.main(
          new String[] {
            "-feedBasePath",
                LocationType.DATA.name()
                    + "="
                    + cluster.getConf().get(HadoopClientFactory.FS_DEFAULT_NAME_KEY)
                    + "/data/YYYY/feed3/dd/MM/?{MONTH}/more/?{HOUR}",
            "-retentionType", "instance",
            "-retentionLimit", "months(5)",
            "-timeZone", "UTC",
            "-frequency", "hourly",
            "-logFile",
                conf.get(HadoopClientFactory.FS_DEFAULT_NAME_KEY)
                    + "/falcon/staging/feed/2012-01-01-04-00",
            "-falconFeedStorageType", Storage.TYPE.FILESYSTEM.name(),
          });
      Assert.assertEquals("instances=NULL", stream.getBuffer());

      stream.clear();
      String dataPath = "/data/YYYY/feed4/dd/MM/02/more/hello";
      String logFile = hdfsUrl + "/falcon/staging/feed/instancePaths-2012-01-01-02-00.csv";
      FeedEvictor.main(
          new String[] {
            "-feedBasePath",
                LocationType.DATA.name()
                    + "="
                    + cluster.getConf().get(HadoopClientFactory.FS_DEFAULT_NAME_KEY)
                    + dataPath,
            "-retentionType", "instance",
            "-retentionLimit", "hours(5)",
            "-timeZone", "UTC",
            "-frequency", "hourly",
            "-logFile", logFile,
            "-falconFeedStorageType", Storage.TYPE.FILESYSTEM.name(),
          });
      Assert.assertEquals("instances=NULL", stream.getBuffer());

      Assert.assertEquals(readLogFile(new Path(logFile)), getExpectedInstancePaths(dataPath));

      assertFailures(fs, pair);
    } catch (Exception e) {
      Assert.fail("Unknown exception", e);
    }
  }
  @Test
  public void testEviction2() throws Exception {
    try {
      Configuration conf = cluster.getConf();
      FileSystem fs = FileSystem.get(conf);
      fs.delete(new Path("/"), true);
      stream.clear();

      Pair<List<String>, List<String>> pair =
          createTestData("feed1", "yyyy-MM-dd/'more'/yyyy", 10, TimeUnit.DAYS, "/data");
      final String storageUrl = cluster.getConf().get(HadoopClientFactory.FS_DEFAULT_NAME_KEY);
      String dataPath =
          LocationType.DATA.name()
              + "="
              + storageUrl
              + "/data/YYYY/feed1/mmHH/dd/MM/?{YEAR}-?{MONTH}-?{DAY}/more/?{YEAR}";
      String logFile = hdfsUrl + "/falcon/staging/feed/instancePaths-2012-01-01-01-00.csv";

      FeedEvictor.main(
          new String[] {
            "-feedBasePath", dataPath,
            "-retentionType", "instance",
            "-retentionLimit", "days(10)",
            "-timeZone", "UTC",
            "-frequency", "daily",
            "-logFile", logFile,
            "-falconFeedStorageType", Storage.TYPE.FILESYSTEM.name(),
          });

      assertFailures(fs, pair);
      compare(map.get("feed1"), stream.getBuffer());

      String expectedInstancePaths = getExpectedInstancePaths(dataPath);
      Assert.assertEquals(readLogFile(new Path(logFile)), expectedInstancePaths);

      String deletedPath = expectedInstancePaths.split(",")[0].split("=")[1];
      Assert.assertFalse(fs.exists(new Path(deletedPath)));
      // empty parents
      Assert.assertFalse(fs.exists(new Path(deletedPath).getParent()));
      Assert.assertFalse(fs.exists(new Path(deletedPath).getParent().getParent()));
      // base path not deleted
      Assert.assertTrue(fs.exists(new Path("/data/YYYY/feed1/mmHH/dd/MM/")));

    } catch (Exception e) {
      Assert.fail("Unknown exception", e);
    }
  }
 @BeforeClass
 public void setup() throws Exception {
   StartupProperties.get()
       .setProperty("falcon.state.store.impl", "org.apache.falcon.state.store.InMemoryStateStore");
   super.setup();
   this.dfsCluster = EmbeddedCluster.newCluster("testCluster");
   this.conf = dfsCluster.getConf();
 }
 private String readLogFile(Path logFile) throws IOException {
   Configuration conf = cluster.getConf();
   FileSystem fs = FileSystem.get(conf);
   ByteArrayOutputStream writer = new ByteArrayOutputStream();
   InputStream date = fs.open(logFile);
   IOUtils.copyBytes(date, writer, 4096, true);
   return writer.toString();
 }
  @Test
  public void testEviction6() throws Exception {
    try {
      Configuration conf = cluster.getConf();
      FileSystem fs = FileSystem.get(conf);
      fs.delete(new Path("/"), true);
      stream.clear();

      Pair<List<String>, List<String>> pair =
          createTestData("feed1", "yyyy-MM-dd/'more'/yyyy", 10, TimeUnit.DAYS, "/data");
      createTestData("feed1", "yyyy-MM-dd/'more'/yyyy", 10, TimeUnit.DAYS, "/stats");
      createTestData("feed1", "yyyy-MM-dd/'more'/yyyy", 10, TimeUnit.DAYS, "/meta");

      final String storageUrl = cluster.getConf().get(HadoopClientFactory.FS_DEFAULT_NAME_KEY);
      String dataPath =
          "DATA="
              + storageUrl
              + "/data/YYYY/feed1/mmHH/dd/MM/?{YEAR}-?{MONTH}-?{DAY}/more/?{YEAR}"
              + "#STATS="
              + storageUrl
              + "/stats/YYYY/feed1/mmHH/dd/MM/?{YEAR}-?{MONTH}-?{DAY}/more/?{YEAR}"
              + "#META="
              + storageUrl
              + "/meta/YYYY/feed1/mmHH/dd/MM/?{YEAR}-?{MONTH}-?{DAY}/more/?{YEAR}";
      String logFile = hdfsUrl + "/falcon/staging/feed/instancePaths-2012-01-01-01-00.csv";

      FeedEvictor.main(
          new String[] {
            "-feedBasePath", dataPath,
            "-retentionType", "instance",
            "-retentionLimit", "days(10)",
            "-timeZone", "UTC",
            "-frequency", "daily",
            "-logFile", logFile,
            "-falconFeedStorageType", Storage.TYPE.FILESYSTEM.name(),
          });

      assertFailures(fs, pair);

      Assert.assertEquals(readLogFile(new Path(logFile)), getExpectedInstancePaths(dataPath));

    } catch (Exception e) {
      Assert.fail("Unknown exception", e);
    }
  }
示例#7
0
 @BeforeClass
 public void setup() throws Exception {
   super.setup();
   createDB(DB_SQL_FILE);
   falconJPAService.init();
   this.dfsCluster = EmbeddedCluster.newCluster("testCluster");
   this.conf = dfsCluster.getConf();
   registerServices();
 }
示例#8
0
  protected Entity storeEntity(EntityType type, String name, String resource, String writeEndpoint)
      throws Exception {
    Unmarshaller unmarshaller = type.getUnmarshaller();
    ConfigurationStore store = ConfigurationStore.get();
    switch (type) {
      case CLUSTER:
        Cluster cluster = (Cluster) unmarshaller.unmarshal(this.getClass().getResource(resource));
        if (name != null) {
          store.remove(type, name);
          cluster.setName(name);
        }
        store.publish(type, cluster);

        if (writeEndpoint != null) {
          ClusterHelper.getInterface(cluster, Interfacetype.WRITE).setEndpoint(writeEndpoint);
          FileSystem fs = new Path(writeEndpoint).getFileSystem(EmbeddedCluster.newConfiguration());
          fs.create(
                  new Path(
                      ClusterHelper.getLocation(cluster, ClusterLocationType.WORKING).getPath(),
                      "libext/FEED/retention/ext.jar"))
              .close();
          fs.create(
                  new Path(
                      ClusterHelper.getLocation(cluster, ClusterLocationType.WORKING).getPath(),
                      "libext/FEED/replication/ext.jar"))
              .close();
        }

        return cluster;

      case FEED:
        Feed feed = (Feed) unmarshaller.unmarshal(this.getClass().getResource(resource));
        if (name != null) {
          store.remove(type, name);
          feed.setName(name);
        }
        store.publish(type, feed);
        return feed;

      case PROCESS:
        Process process = (Process) unmarshaller.unmarshal(this.getClass().getResource(resource));
        if (name != null) {
          store.remove(type, name);
          process.setName(name);
        }
        store.publish(type, process);
        return process;

      default:
    }

    throw new IllegalArgumentException("Unhandled type: " + type);
  }
  private Pair<List<String>, List<String>> createTestData(
      String feed, String mask, int period, TimeUnit timeUnit, String locationType)
      throws Exception {
    Configuration conf = cluster.getConf();
    FileSystem fs = FileSystem.get(conf);

    List<String> outOfRange = new ArrayList<String>();
    List<String> inRange = new ArrayList<String>();

    Pair<List<String>, List<String>> pair = createTestData(locationType);
    outOfRange.addAll(pair.second);
    inRange.addAll(pair.first);

    pair = generateInstances(fs, feed, mask, period, timeUnit, locationType, true);
    outOfRange.addAll(pair.second);
    inRange.addAll(pair.first);
    return Pair.of(inRange, outOfRange);
  }
  private Pair<List<String>, List<String>> createTestData(String locationType) throws Exception {
    Configuration conf = cluster.getConf();
    FileSystem fs = FileSystem.get(conf);

    List<String> outOfRange = new ArrayList<String>();
    List<String> inRange = new ArrayList<String>();

    touch(fs, locationType + "/YYYY/feed3/dd/MM/02/more/hello", true);
    touch(fs, locationType + "/YYYY/feed4/dd/MM/02/more/hello", true);
    touch(fs, locationType + "/YYYY/feed1/mmHH/dd/MM/bad-va-lue/more/hello", true);
    touch(fs, locationType + "/somedir/feed1/mmHH/dd/MM/bad-va-lue/more/hello", true);
    outOfRange.add(locationType + "/YYYY/feed3/dd/MM/02/more/hello");
    outOfRange.add(locationType + "/YYYY/feed4/dd/MM/02/more/hello");
    outOfRange.add(locationType + "/YYYY/feed1/mmHH/dd/MM/bad-va-lue/more/hello");
    outOfRange.add(locationType + "/somedir/feed1/mmHH/dd/MM/bad-va-lue/more/hello");

    return Pair.of(inRange, outOfRange);
  }
 @AfterClass
 public void close() throws Exception {
   cluster.shutdown();
 }
 @BeforeClass
 public void start() throws Exception {
   cluster = EmbeddedCluster.newCluster("test");
   hdfsUrl = cluster.getConf().get(HadoopClientFactory.FS_DEFAULT_NAME_KEY);
   FeedEvictor.OUT.set(stream);
 }