コード例 #1
0
  @BeforeClass
  public void setUp() throws Exception {
    TestContext.cleanupStore();

    Map<String, String> overlay = sourceContext.getUniqueOverlay();
    String sourceFilePath =
        TestContext.overlayParametersOverTemplate("/table/primary-cluster.xml", overlay);
    sourceContext.setCluster(sourceFilePath);

    final Cluster sourceCluster = sourceContext.getCluster().getCluster();
    String sourceStorageUrl = ClusterHelper.getStorageUrl(sourceCluster);

    // copyTestDataToHDFS
    final String sourcePath = sourceStorageUrl + "/falcon/test/input/" + PARTITION_VALUE;
    FSUtils.copyResourceToHDFS("/apps/data/data.txt", "data.txt", sourcePath);

    sourceMetastoreUrl =
        ClusterHelper.getInterface(sourceCluster, Interfacetype.REGISTRY).getEndpoint();
    setupHiveMetastore(sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME);
    HiveTestUtils.loadData(
        sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME, sourcePath, PARTITION_VALUE);

    String targetFilePath =
        TestContext.overlayParametersOverTemplate("/table/bcp-cluster.xml", overlay);
    targetContext.setCluster(targetFilePath);

    final Cluster targetCluster = targetContext.getCluster().getCluster();
    targetMetastoreUrl =
        ClusterHelper.getInterface(targetCluster, Interfacetype.REGISTRY).getEndpoint();
    setupHiveMetastore(targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME);

    copyLibsToHDFS(targetCluster);
  }
コード例 #2
0
  @AfterClass
  public void tearDown() throws Exception {
    cleanupHiveMetastore(sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME);
    cleanupHiveMetastore(targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME);

    cleanupStagingDirs(sourceContext.getCluster().getCluster(), SOURCE_DATABASE_NAME);
    cleanupStagingDirs(targetContext.getCluster().getCluster(), TARGET_DATABASE_NAME);
    TestContext.deleteEntitiesFromStore();
  }
コード例 #3
0
  @Test(enabled = false)
  public void testTableReplicationWithExistingTargetPartition() throws Exception {
    final String feedName = "customer-table-replicating-feed";
    final Map<String, String> overlay = sourceContext.getUniqueOverlay();
    String filePath =
        TestContext.overlayParametersOverTemplate("/table/primary-cluster.xml", overlay);
    Assert.assertEquals(
        TestContext.executeWithURL("entity -submit -type cluster -file " + filePath), 0);

    filePath = TestContext.overlayParametersOverTemplate("/table/bcp-cluster.xml", overlay);
    Assert.assertEquals(
        TestContext.executeWithURL("entity -submit -type cluster -file " + filePath), 0);

    HCatPartition sourcePartition =
        HiveTestUtils.getPartition(
            sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME, "ds", PARTITION_VALUE);
    Assert.assertNotNull(sourcePartition);

    addPartitionToTarget();
    // verify if the partition on the target exists before replication starts
    // to see import drops partition before importing partition
    HCatPartition targetPartition =
        HiveTestUtils.getPartition(
            targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME, "ds", PARTITION_VALUE);
    Assert.assertNotNull(targetPartition);

    filePath =
        TestContext.overlayParametersOverTemplate(
            "/table/customer-table-replicating-feed.xml", overlay);
    Assert.assertEquals(
        TestContext.executeWithURL("entity -submitAndSchedule -type feed -file " + filePath), 0);

    // wait until the workflow job completes
    WorkflowJob jobInfo =
        OozieTestUtils.getWorkflowJob(
            targetContext.getCluster().getCluster(),
            OozieClient.FILTER_NAME + "=FALCON_FEED_REPLICATION_" + feedName);
    Assert.assertEquals(jobInfo.getStatus(), WorkflowJob.Status.SUCCEEDED);

    // verify if the partition on the target exists
    targetPartition =
        HiveTestUtils.getPartition(
            targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME, "ds", PARTITION_VALUE);
    Assert.assertNotNull(targetPartition);

    InstancesResult response =
        targetContext
            .getService()
            .path("api/instance/running/feed/" + feedName)
            .header("Cookie", targetContext.getAuthenticationToken())
            .accept(MediaType.APPLICATION_JSON)
            .get(InstancesResult.class);
    Assert.assertEquals(response.getStatus(), APIResult.Status.SUCCEEDED);

    TestContext.executeWithURL("entity -delete -type feed -name customer-table-replicating-feed");
    TestContext.executeWithURL("entity -delete -type cluster -name primary-cluster");
    TestContext.executeWithURL("entity -delete -type cluster -name bcp-cluster");
  }
コード例 #4
0
  private void addPartitionToTarget() throws Exception {
    final Cluster targetCluster = targetContext.getCluster().getCluster();
    String targetStorageUrl = ClusterHelper.getStorageUrl(targetCluster);

    // copyTestDataToHDFS
    final String targetPath = targetStorageUrl + "/falcon/test/input/" + PARTITION_VALUE;
    FSUtils.copyResourceToHDFS("/apps/data/data.txt", "data.txt", targetPath);

    HiveTestUtils.loadData(
        targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME, targetPath, PARTITION_VALUE);
  }