コード例 #1
0
 static Map<String, Map<String, String>> convertBestPossibleState(
     Map<PartitionId, Map<String, String>> bestPossibleState) {
   Map<String, Map<String, String>> result = new HashMap<String, Map<String, String>>();
   for (PartitionId partition : bestPossibleState.keySet()) {
     result.put(partition.stringify(), bestPossibleState.get(partition));
   }
   return result;
 }
コード例 #2
0
 @Override
 public void doTransition(Message message, NotificationContext context)
     throws InterruptedException {
   String instance = message.getTgtName();
   PartitionId partition = message.getPartitionId();
   if (instance.equals("localhost_12918")
       && partition.equals("TestDB0_0")
       && _done.getAndSet(true) == false) {
     _startCountdown.countDown();
     // this await will be interrupted since we cancel the task during handleNewSession
     _endCountdown.await();
   }
 }
コード例 #3
0
ファイル: Message.java プロジェクト: kenwudi/helix-1
 /**
  * Get a list of partitions associated with this message
  *
  * @return list of partition ids
  */
 public List<PartitionId> getPartitionIds() {
   List<String> partitionNames = _record.getListField(Attributes.PARTITION_NAME.toString());
   if (partitionNames == null) {
     return Collections.emptyList();
   }
   List<PartitionId> partitionIds = Lists.newArrayList();
   for (String partitionName : partitionNames) {
     partitionIds.add(PartitionId.from(partitionName));
   }
   return partitionIds;
 }
コード例 #4
0
ファイル: Message.java プロジェクト: kenwudi/helix-1
 /**
  * Get the resource partition associated with this message
  *
  * @return partition id
  */
 public PartitionId getPartitionId() {
   return PartitionId.from(getPartitionName());
 }
コード例 #5
0
ファイル: Message.java プロジェクト: kenwudi/helix-1
 /**
  * Set the id of the partition this message concerns
  *
  * @param partitionId
  */
 public void setPartitionId(PartitionId partitionId) {
   if (partitionId != null) {
     setPartitionName(partitionId.stringify());
   }
 }
コード例 #6
0
  public static void main(String[] args) throws Exception {
    Map<String, String> env = System.getenv();
    LOG.info("Starting app master with the following environment variables");
    for (String key : env.keySet()) {
      LOG.info(key + "\t\t=" + env.get(key));
    }

    Options opts;
    opts = new Options();
    opts.addOption("num_containers", true, "Number of containers");

    // START ZOOKEEPER
    String dataDir = "dataDir";
    String logDir = "logDir";
    IDefaultNameSpace defaultNameSpace =
        new IDefaultNameSpace() {
          @Override
          public void createDefaultNameSpace(ZkClient zkClient) {}
        };
    try {
      FileUtils.deleteDirectory(new File(dataDir));
      FileUtils.deleteDirectory(new File(logDir));
    } catch (IOException e) {
      LOG.error(e);
    }

    final ZkServer server = new ZkServer(dataDir, logDir, defaultNameSpace);
    server.start();

    // start Generic AppMaster that interacts with Yarn RM
    AppMasterConfig appMasterConfig = new AppMasterConfig();
    String containerIdStr = appMasterConfig.getContainerId();
    ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
    ApplicationAttemptId appAttemptID = containerId.getApplicationAttemptId();

    String configFile = AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString();
    String className = appMasterConfig.getApplicationSpecFactory();

    GenericApplicationMaster genericApplicationMaster = new GenericApplicationMaster(appAttemptID);
    try {
      genericApplicationMaster.start();
    } catch (Exception e) {
      LOG.error("Unable to start application master: ", e);
    }
    ApplicationSpecFactory factory = HelixYarnUtil.createInstance(className);

    // TODO: Avoid setting static variable.
    YarnProvisioner.applicationMaster = genericApplicationMaster;
    YarnProvisioner.applicationMasterConfig = appMasterConfig;
    ApplicationSpec applicationSpec = factory.fromYaml(new FileInputStream(configFile));
    YarnProvisioner.applicationSpec = applicationSpec;
    String zkAddress = appMasterConfig.getZKAddress();
    String clusterName = appMasterConfig.getAppName();

    // CREATE CLUSTER and setup the resources
    // connect
    ZkHelixConnection connection = new ZkHelixConnection(zkAddress);
    connection.connect();

    // create the cluster
    ClusterId clusterId = ClusterId.from(clusterName);
    ClusterAccessor clusterAccessor = connection.createClusterAccessor(clusterId);
    StateModelDefinition statelessService =
        new StateModelDefinition(StateModelConfigGenerator.generateConfigForStatelessService());
    StateModelDefinition taskStateModel =
        new StateModelDefinition(StateModelConfigGenerator.generateConfigForTaskStateModel());
    clusterAccessor.createCluster(
        new ClusterConfig.Builder(clusterId)
            .addStateModelDefinition(statelessService)
            .addStateModelDefinition(taskStateModel)
            .build());
    for (String service : applicationSpec.getServices()) {
      String resourceName = service;
      // add the resource with the local provisioner
      ResourceId resourceId = ResourceId.from(resourceName);

      ServiceConfig serviceConfig = applicationSpec.getServiceConfig(resourceName);
      serviceConfig.setSimpleField("service_name", service);
      int numContainers = serviceConfig.getIntField("num_containers", 1);

      YarnProvisionerConfig provisionerConfig = new YarnProvisionerConfig(resourceId);
      provisionerConfig.setNumContainers(numContainers);

      AutoRebalanceModeISBuilder idealStateBuilder = new AutoRebalanceModeISBuilder(resourceId);
      idealStateBuilder.setStateModelDefId(statelessService.getStateModelDefId());
      idealStateBuilder.add(PartitionId.from(resourceId, "0"));
      idealStateBuilder.setNumReplica(1);
      ResourceConfig.Builder resourceConfigBuilder =
          new ResourceConfig.Builder(ResourceId.from(resourceName));
      ResourceConfig resourceConfig =
          resourceConfigBuilder
              .provisionerConfig(provisionerConfig)
              .idealState(idealStateBuilder.build()) //
              .build();
      clusterAccessor.addResource(resourceConfig);
    }
    // start controller
    ControllerId controllerId = ControllerId.from("controller1");
    HelixController controller = connection.createController(clusterId, controllerId);
    controller.start();

    // Start any pre-specified jobs
    List<TaskConfig> taskConfigs = applicationSpec.getTaskConfigs();
    if (taskConfigs != null) {
      YarnConfiguration conf = new YarnConfiguration();
      FileSystem fs;
      fs = FileSystem.get(conf);
      for (TaskConfig taskConfig : taskConfigs) {
        URI yamlUri = taskConfig.getYamlURI();
        if (yamlUri != null && taskConfig.name != null) {
          InputStream is =
              readFromHDFS(
                  fs, taskConfig.name, yamlUri, applicationSpec, appAttemptID.getApplicationId());
          Workflow workflow = Workflow.parse(is);
          TaskDriver taskDriver = new TaskDriver(new ZKHelixManager(controller));
          taskDriver.start(workflow);
        }
      }
    }

    Thread shutdownhook =
        new Thread(
            new Runnable() {
              @Override
              public void run() {
                server.shutdown();
              }
            });
    Runtime.getRuntime().addShutdownHook(shutdownhook);
    Thread.sleep(10000);
  }
コード例 #7
0
ファイル: FullAutoRebalancer.java プロジェクト: kanakb/helix
  @Override
  public ResourceAssignment computeResourceMapping(
      RebalancerConfiguration rebalancerConfig,
      ResourceAssignment prevAssignment,
      Cluster cluster,
      ResourceCurrentState currentState) {
    FullAutoRebalancerConfig config =
        BasicRebalancerConfig.convert(rebalancerConfig, FullAutoRebalancerConfig.class);
    StateModelDefinition stateModelDef =
        cluster.getStateModelMap().get(config.getStateModelDefId());
    // Compute a preference list based on the current ideal state
    List<PartitionId> partitions = new ArrayList<PartitionId>(config.getPartitionSet());
    Map<ParticipantId, Participant> liveParticipants = cluster.getLiveParticipantMap();
    Map<ParticipantId, Participant> allParticipants = cluster.getParticipantMap();
    int replicas = -1;
    if (partitions.size() > 0 && config.getAnyLiveParticipant(partitions.get(0))) {
      replicas = liveParticipants.size();
    } else {
      replicas = config.getReplicaCount();
    }

    // count how many replicas should be in each state
    Map<State, String> upperBounds =
        ConstraintBasedAssignment.stateConstraints(
            stateModelDef, config.getResourceId(), cluster.getConfig());
    LinkedHashMap<State, Integer> stateCountMap =
        ConstraintBasedAssignment.stateCount(
            upperBounds, stateModelDef, liveParticipants.size(), replicas);

    // get the participant lists
    List<ParticipantId> liveParticipantList =
        new ArrayList<ParticipantId>(liveParticipants.keySet());
    List<ParticipantId> allParticipantList =
        new ArrayList<ParticipantId>(cluster.getParticipantMap().keySet());

    // compute the current mapping from the current state
    Map<PartitionId, Map<ParticipantId, State>> currentMapping =
        currentMapping(config, currentState, stateCountMap);

    // If there are nodes tagged with resource, use only those nodes
    // If there are nodes tagged with resource name, use only those nodes
    Set<ParticipantId> taggedNodes = new HashSet<ParticipantId>();
    Set<ParticipantId> taggedLiveNodes = new HashSet<ParticipantId>();
    if (config.getParticipantGroupTag() != null) {
      for (ParticipantId participantId : allParticipantList) {
        if (cluster
            .getParticipantMap()
            .get(participantId)
            .hasTag(config.getParticipantGroupTag())) {
          taggedNodes.add(participantId);
          if (liveParticipants.containsKey(participantId)) {
            taggedLiveNodes.add(participantId);
          }
        }
      }
      if (!taggedLiveNodes.isEmpty()) {
        // live nodes exist that have this tag
        if (LOG.isDebugEnabled()) {
          LOG.debug(
              "found the following participants with tag "
                  + config.getParticipantGroupTag()
                  + " for "
                  + config.getResourceId()
                  + ": "
                  + taggedLiveNodes);
        }
      } else if (taggedNodes.isEmpty()) {
        // no live nodes and no configured nodes have this tag
        LOG.warn(
            "Resource "
                + config.getResourceId()
                + " has tag "
                + config.getParticipantGroupTag()
                + " but no configured participants have this tag");
      } else {
        // configured nodes have this tag, but no live nodes have this tag
        LOG.warn(
            "Resource "
                + config.getResourceId()
                + " has tag "
                + config.getParticipantGroupTag()
                + " but no live participants have this tag");
      }
      allParticipantList = new ArrayList<ParticipantId>(taggedNodes);
      liveParticipantList = new ArrayList<ParticipantId>(taggedLiveNodes);
    }

    // determine which nodes the replicas should live on
    int maxPartition = config.getMaxPartitionsPerParticipant();
    if (LOG.isDebugEnabled()) {
      LOG.debug("currentMapping: " + currentMapping);
      LOG.debug("stateCountMap: " + stateCountMap);
      LOG.debug("liveNodes: " + liveParticipantList);
      LOG.debug("allNodes: " + allParticipantList);
      LOG.debug("maxPartition: " + maxPartition);
    }
    ReplicaPlacementScheme placementScheme = new DefaultPlacementScheme();
    _algorithm =
        new AutoRebalanceStrategy(
            config.getResourceId(), partitions, stateCountMap, maxPartition, placementScheme);
    ZNRecord newMapping =
        _algorithm.typedComputePartitionAssignment(
            liveParticipantList, currentMapping, allParticipantList);

    if (LOG.isInfoEnabled()) {
      LOG.info("newMapping: " + newMapping);
    }

    // compute a full partition mapping for the resource
    if (LOG.isDebugEnabled()) {
      LOG.debug("Processing resource:" + config.getResourceId());
    }
    ResourceAssignment partitionMapping = new ResourceAssignment(config.getResourceId());
    for (PartitionId partition : partitions) {
      Set<ParticipantId> disabledParticipantsForPartition =
          ConstraintBasedAssignment.getDisabledParticipants(allParticipants, partition);
      List<String> rawPreferenceList = newMapping.getListField(partition.toString());
      if (rawPreferenceList == null) {
        rawPreferenceList = Collections.emptyList();
      }
      List<ParticipantId> preferenceList =
          Lists.transform(
              rawPreferenceList,
              new Function<String, ParticipantId>() {
                @Override
                public ParticipantId apply(String participantName) {
                  return ParticipantId.from(participantName);
                }
              });
      preferenceList =
          ConstraintBasedAssignment.getPreferenceList(cluster, partition, preferenceList);
      Map<ParticipantId, State> bestStateForPartition =
          ConstraintBasedAssignment.computeAutoBestStateForPartition(
              upperBounds,
              liveParticipants.keySet(),
              stateModelDef,
              preferenceList,
              currentState.getCurrentStateMap(config.getResourceId(), partition),
              disabledParticipantsForPartition);
      partitionMapping.addReplicaMap(partition, bestStateForPartition);
    }
    return partitionMapping;
  }