// In case of notifications coming from post notifications, start and end time need to be
 // populated.
 private void updateContextWithTime(WorkflowExecutionContext context) {
   try {
     InstancesResult result =
         WorkflowEngineFactory.getWorkflowEngine()
             .getJobDetails(context.getClusterName(), context.getWorkflowId());
     Date startTime = result.getInstances()[0].startTime;
     Date endTime = result.getInstances()[0].endTime;
     Date now = new Date();
     if (startTime == null) {
       startTime = now;
     }
     if (endTime == null) {
       endTime = now;
     }
     context.setValue(WorkflowExecutionArgs.WF_START_TIME, Long.toString(startTime.getTime()));
     context.setValue(WorkflowExecutionArgs.WF_END_TIME, Long.toString(endTime.getTime()));
   } catch (FalconException e) {
     LOG.error(
         "Unable to retrieve job details for "
             + context.getWorkflowId()
             + " on cluster "
             + context.getClusterName(),
         e);
   }
 }
  /**
   * List feed instances with -offset and -numResults params expecting the list of feed instances
   * which start at the right offset and number of instances matches to expected.
   */
  @Test
  public void testFeedOffsetNumResults()
      throws URISyntaxException, IOException, AuthenticationException, InterruptedException {
    // check the default value of the numResults param. Expecting 10 instances.
    InstancesResult r = prism.getFeedHelper().listInstances(feedName, null, null);
    InstanceUtil.validateResponse(r, 10, 1, 1, 4, 4);

    // changing a value to 6. 6 instances are expected
    r = prism.getFeedHelper().listInstances(feedName, "numResults=6", null);
    InstanceUtil.validateResponse(r, 6, 1, 1, 4, 0);

    // use a start option without a numResults parameter. 10 instances are expected
    r = prism.getFeedHelper().listInstances(feedName, "start=" + startTime, null);
    InstanceUtil.validateResponse(r, 10, 1, 1, 4, 4);

    // use a start option with a numResults value which is smaller then the default.
    r = prism.getFeedHelper().listInstances(feedName, "start=" + startTime + "&numResults=8", null);
    InstanceUtil.validateResponse(r, 8, 1, 1, 4, 2);

    // use a start option with a numResults value greater then the default.
    r =
        prism
            .getFeedHelper()
            .listInstances(feedName, "start=" + startTime + "&numResults=12", null);
    InstanceUtil.validateResponse(r, 12, 1, 1, 4, 6);

    // get all instances
    InstancesResult.Instance[] allInstances = r.getInstances();

    // adding an offset param into request. Expected (total number - offset) instances.
    int offset = 3;
    r =
        prism
            .getFeedHelper()
            .listInstances(
                feedName, "start=" + startTime + "&offset=" + offset + "&numResults=12", null);
    InstanceUtil.validateResponse(r, 9, 0, 0, 3, 6);

    // check that expected instances were retrieved
    InstancesResult.Instance[] instances = r.getInstances();
    for (int i = 0; i < 9; i++) {
      LOGGER.info("Comparing instances: " + instances[i] + " and " + allInstances[i + offset]);
      Assert.assertTrue(instances[i].getInstance().equals(allInstances[i + offset].getInstance()));
    }
    // use different offset and numResults params in the request
    offset = 6;
    r =
        prism
            .getFeedHelper()
            .listInstances(
                feedName, "start=" + startTime + "&offset=" + offset + "&numResults=6", null);
    InstanceUtil.validateResponse(r, 6, 0, 0, 0, 6);

    // check that expected instances are present in response
    instances = r.getInstances();
    for (int i = 0; i < 6; i++) {
      LOGGER.info("Comparing instances: " + instances[i] + " and " + allInstances[i + offset]);
      Assert.assertTrue(instances[i].getInstance().equals(allInstances[i + offset].getInstance()));
    }
  }
  @Test(enabled = false)
  public void testTableReplicationWithExistingTargetPartition() throws Exception {
    final String feedName = "customer-table-replicating-feed";
    final Map<String, String> overlay = sourceContext.getUniqueOverlay();
    String filePath =
        TestContext.overlayParametersOverTemplate("/table/primary-cluster.xml", overlay);
    Assert.assertEquals(
        TestContext.executeWithURL("entity -submit -type cluster -file " + filePath), 0);

    filePath = TestContext.overlayParametersOverTemplate("/table/bcp-cluster.xml", overlay);
    Assert.assertEquals(
        TestContext.executeWithURL("entity -submit -type cluster -file " + filePath), 0);

    HCatPartition sourcePartition =
        HiveTestUtils.getPartition(
            sourceMetastoreUrl, SOURCE_DATABASE_NAME, SOURCE_TABLE_NAME, "ds", PARTITION_VALUE);
    Assert.assertNotNull(sourcePartition);

    addPartitionToTarget();
    // verify if the partition on the target exists before replication starts
    // to see import drops partition before importing partition
    HCatPartition targetPartition =
        HiveTestUtils.getPartition(
            targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME, "ds", PARTITION_VALUE);
    Assert.assertNotNull(targetPartition);

    filePath =
        TestContext.overlayParametersOverTemplate(
            "/table/customer-table-replicating-feed.xml", overlay);
    Assert.assertEquals(
        TestContext.executeWithURL("entity -submitAndSchedule -type feed -file " + filePath), 0);

    // wait until the workflow job completes
    WorkflowJob jobInfo =
        OozieTestUtils.getWorkflowJob(
            targetContext.getCluster().getCluster(),
            OozieClient.FILTER_NAME + "=FALCON_FEED_REPLICATION_" + feedName);
    Assert.assertEquals(jobInfo.getStatus(), WorkflowJob.Status.SUCCEEDED);

    // verify if the partition on the target exists
    targetPartition =
        HiveTestUtils.getPartition(
            targetMetastoreUrl, TARGET_DATABASE_NAME, TARGET_TABLE_NAME, "ds", PARTITION_VALUE);
    Assert.assertNotNull(targetPartition);

    InstancesResult response =
        targetContext
            .getService()
            .path("api/instance/running/feed/" + feedName)
            .header("Cookie", targetContext.getAuthenticationToken())
            .accept(MediaType.APPLICATION_JSON)
            .get(InstancesResult.class);
    Assert.assertEquals(response.getStatus(), APIResult.Status.SUCCEEDED);

    TestContext.executeWithURL("entity -delete -type feed -name customer-table-replicating-feed");
    TestContext.executeWithURL("entity -delete -type cluster -name primary-cluster");
    TestContext.executeWithURL("entity -delete -type cluster -name bcp-cluster");
  }
 /** Test the list feed instances api using an orderBy parameter. Check the order. */
 @Test
 public void testFeedOrderBy()
     throws URISyntaxException, OozieClientException, JAXBException, AuthenticationException,
         IOException, InterruptedException {
   SoftAssert softAssert = new SoftAssert();
   // orderBy start time
   InstancesResult r =
       prism.getFeedHelper().listInstances(feedName, "orderBy=startTime&sortOrder=desc", null);
   InstancesResult.Instance[] instances = r.getInstances();
   Date previousDate = new Date();
   for (InstancesResult.Instance instance : instances) {
     Date current = instance.getStartTime();
     if (current != null) { // e.g if instance is WAITING it doesn't have start time
       softAssert.assertTrue(
           current.before(previousDate) || current.equals(previousDate),
           "Wrong order. Current startTime :" + current + " Previous: " + previousDate);
       previousDate = (Date) current.clone();
     }
   }
   // orderBy status
   r =
       prism
           .getFeedHelper()
           .listInstances(
               feedName,
               "start=" + startTime + "&numResults=12&orderBy=status&sortOrder=desc",
               null);
   InstanceUtil.validateResponse(r, 12, 1, 1, 4, 6);
   instances = r.getInstances();
   InstancesResult.WorkflowStatus previousStatus = InstancesResult.WorkflowStatus.WAITING;
   for (InstancesResult.Instance instance : instances) {
     InstancesResult.WorkflowStatus current = instance.getStatus();
     softAssert.assertTrue(
         current.toString().compareTo(previousStatus.toString()) <= 0,
         "Wrong order. Compared " + current + " and " + previousStatus + " statuses.");
     previousStatus = current;
   }
   // sort by endTime
   r =
       prism
           .getFeedHelper()
           .listInstances(
               feedName,
               "start=" + startTime + "&numResults=12&orderBy=endTime&sortOrder=desc",
               null);
   instances = r.getInstances();
   previousDate = new Date();
   for (InstancesResult.Instance instance : instances) {
     Date current = instance.getEndTime();
     if (current != null) { // e.g if instance is WAITING it doesn't have end time
       softAssert.assertTrue(
           current.before(previousDate) || current.equals(previousDate),
           "Wrong order. Current startTime :" + current + " Previous: " + previousDate);
       previousDate = (Date) current.clone();
     }
   }
   softAssert.assertAll();
 }
Exemplo n.º 5
0
 @Override
 public InstancesResult getJobDetails(String cluster, String jobId) throws FalconException {
   InstancesResult.Instance[] instances = new InstancesResult.Instance[1];
   InstancesResult result =
       new InstancesResult(APIResult.Status.SUCCEEDED, "Instance for workflow id:" + jobId);
   instances[0] = DAGEngineFactory.getDAGEngine(cluster).info(jobId);
   result.setInstances(instances);
   return result;
 }
Exemplo n.º 6
0
  @Override
  public InstancesResult getRunningInstances(Entity entity, List<LifeCycle> lifeCycles)
      throws FalconException {
    Set<String> clusters = EntityUtil.getClustersDefinedInColos(entity);
    List<InstancesResult.Instance> runInstances = new ArrayList<>();

    for (String cluster : clusters) {
      Collection<InstanceState> instances =
          STATE_STORE.getExecutionInstances(entity, cluster, InstanceState.getRunningStates());
      for (InstanceState state : instances) {
        String instanceTimeStr = state.getInstance().getInstanceTime().toString();
        InstancesResult.Instance instance =
            new InstancesResult.Instance(
                cluster, instanceTimeStr, InstancesResult.WorkflowStatus.RUNNING);
        instance.startTime = state.getInstance().getActualStart().toDate();
        runInstances.add(instance);
      }
    }
    InstancesResult result = new InstancesResult(APIResult.Status.SUCCEEDED, "Running Instances");
    result.setInstances(runInstances.toArray(new InstancesResult.Instance[runInstances.size()]));
    return result;
  }
Exemplo n.º 7
0
  private InstancesResult doJobAction(
      JobAction action,
      Entity entity,
      Date start,
      Date end,
      Properties props,
      List<LifeCycle> lifeCycles,
      boolean isForced)
      throws FalconException {
    Set<String> clusters = EntityUtil.getClustersDefinedInColos(entity);
    List<String> clusterList = getIncludedClusters(props, FALCON_INSTANCE_ACTION_CLUSTERS);
    APIResult.Status overallStatus = APIResult.Status.SUCCEEDED;
    int instanceCount = 0;

    Collection<InstanceState.STATE> states;
    switch (action) {
      case KILL:
      case SUSPEND:
        states = InstanceState.getActiveStates();
        break;
      case RESUME:
        states = new ArrayList<>();
        states.add(InstanceState.STATE.SUSPENDED);
        break;
      case PARAMS:
        // Applicable only for running and finished jobs.
        states = InstanceState.getRunningStates();
        states.addAll(InstanceState.getTerminalStates());
        states.add(InstanceState.STATE.SUSPENDED);
        break;
      case STATUS:
        states = InstanceState.getActiveStates();
        states.addAll(InstanceState.getTerminalStates());
        states.add(InstanceState.STATE.SUSPENDED);
        break;
      case RERUN:
        // Applicable only for terminated States
        states = InstanceState.getTerminalStates();
        break;
      default:
        throw new IllegalArgumentException("Unhandled action " + action);
    }

    List<ExecutionInstance> instancesToActOn = new ArrayList<>();
    for (String cluster : clusters) {
      if (clusterList.size() != 0 && !clusterList.contains(cluster)) {
        continue;
      }
      LOG.debug("Retrieving instances for cluster : {} for action {}", cluster, action);
      Collection<InstanceState> instances =
          STATE_STORE.getExecutionInstances(
              entity, cluster, states, new DateTime(start), new DateTime(end));
      for (InstanceState state : instances) {
        instancesToActOn.add(state.getInstance());
      }
    }

    // To ensure compatibility with OozieWorkflowEngine.
    // Also because users would like to see the most recent instances first.
    sortInstancesDescBySequence(instancesToActOn);

    List<InstancesResult.Instance> instances = new ArrayList<>();
    for (ExecutionInstance ins : instancesToActOn) {
      instanceCount++;
      String instanceTimeStr = SchemaHelper.formatDateUTC(ins.getInstanceTime().toDate());

      InstancesResult.Instance instance = null;
      try {
        instance = performAction(ins.getCluster(), entity, action, ins, props, isForced);
        instance.instance = instanceTimeStr;
      } catch (FalconException e) {
        LOG.warn("Unable to perform action {} on cluster", action, e);
        instance = new InstancesResult.Instance(ins.getCluster(), instanceTimeStr, null);
        instance.status = InstancesResult.WorkflowStatus.ERROR;
        instance.details = e.getMessage();
        overallStatus = APIResult.Status.PARTIAL;
      }
      instances.add(instance);
    }
    if (instanceCount < 2 && overallStatus == APIResult.Status.PARTIAL) {
      overallStatus = APIResult.Status.FAILED;
    }
    InstancesResult instancesResult = new InstancesResult(overallStatus, action.name());
    instancesResult.setInstances(instances.toArray(new InstancesResult.Instance[instances.size()]));
    return instancesResult;
  }
  /**
   * Test list feed instances using custom filter. Expecting list of feed instances which satisfy
   * custom filters.
   */
  @Test
  public void testFeedCustomFilter()
      throws URISyntaxException, IOException, AuthenticationException, InterruptedException {
    String params = "start=" + startTime + "&filterBy=status:RUNNING";
    InstancesResult r = prism.getFeedHelper().listInstances(feedName, params, null);
    InstanceUtil.validateResponse(r, 1, 1, 0, 0, 0);

    params = "start=" + startTime + "&end=" + endTime + "&filterBy=status:RUNNING&offset=2";
    r = prism.getFeedHelper().listInstances(feedName, params, null);
    InstanceUtil.validateSuccessWOInstances(r);

    params = "start=" + startTime + "&end=" + endTime + "&filterBy=status:WAITING";
    r = prism.getFeedHelper().listInstances(feedName, params, null);
    InstanceUtil.validateResponse(r, 4, 0, 0, 4, 0);

    params =
        "start="
            + startTime
            + "&end="
            + TimeUtil.addMinsToTime(startTime, 41)
            + "&filterBy=status:WAITING";
    r = prism.getFeedHelper().listInstances(feedName, params, null);
    InstanceUtil.validateResponse(r, 3, 0, 0, 3, 0);

    params = "start=" + startTime + "&offset=1&numResults=1&filterBy=status:WAITING";
    r = prism.getFeedHelper().listInstances(feedName, params, null);
    InstanceUtil.validateResponse(r, 1, 0, 0, 1, 0);

    params = "start=" + TimeUtil.addMinsToTime(startTime, 16) + "&offset=2&numResults=12";
    r = prism.getFeedHelper().listInstances(feedName, params, null);
    InstanceUtil.validateResponse(r, 6, 0, 1, 3, 2);

    String sourceCluster = bundles[0].getClusterNames().get(0);
    String clusterName = bundles[1].getClusterNames().get(0);
    params =
        "start="
            + startTime
            + "&filterBy=STATUS:KILLED,CLUSTER:"
            + clusterName
            + "&numResults=5&orderBy=startTime&sortOrder=desc";
    r = prism.getFeedHelper().listInstances(feedName, params, null);
    InstanceUtil.validateResponse(r, 5, 0, 0, 0, 5);

    // should be ordered by a start time
    SoftAssert softAssert = new SoftAssert();
    InstancesResult.Instance[] instances = r.getInstances();
    Date previousDate = new Date();
    for (InstancesResult.Instance instance : instances) {
      Date current = instance.getStartTime();
      softAssert.assertNotNull(current, "Start time shouldn't be null for KILLED instance.");
      softAssert.assertTrue(
          current.before(previousDate) || current.equals(previousDate),
          "Wrong order. Current startTime :" + current + " Previous: " + previousDate);
      previousDate = (Date) current.clone();
    }
    softAssert.assertAll();

    // missing 1st, 11th, 12th instances, all other instances should be retrieved.
    params = "start=" + TimeUtil.addMinsToTime(startTime, 2) + "&offset=2";
    r = prism.getFeedHelper().listInstances(feedName, params, null);
    InstanceUtil.validateResponse(r, 9, 0, 1, 3, 5);

    // missing the 1st, 11th, 12th instance, all instances which have progressed should be present:
    // 5 killed + 1 suspended, but numResults=5, so expecting 1 suspended and 4 killed instances.
    params =
        "start="
            + TimeUtil.addMinsToTime(startTime, 2)
            + "&filterBy=SOURCECLUSTER:"
            + sourceCluster
            + "&offset=1&numResults=5";
    r = prism.getFeedHelper().listInstances(feedName, params, null);
    InstanceUtil.validateResponse(r, 5, 0, 1, 0, 4);
  }