Beispiel #1
0
  static void deserializeComparators(Configuration conf, TupleMRConfig mrConfig)
      throws TupleMRException {
    String[] comparatorRefs = conf.getStrings(CONF_COMPARATOR_REFERENCES);
    String[] comparatorInstanceFiles = conf.getStrings(CONF_COMPARATOR_INSTANCES);

    if (comparatorRefs == null) {
      return;
    }
    try {
      for (int i = 0; i < comparatorRefs.length; i++) {
        String[] ref = comparatorRefs[i].split("\\|");
        String instanceFile = comparatorInstanceFiles[i];

        // Here we use "false" as last parameter because otherwise it could be
        // an infinite loop. We will call setConf() later.
        RawComparator<?> comparator =
            InstancesDistributor.loadInstance(conf, RawComparator.class, instanceFile, false);

        if (ref[0].equals(COMMON)) {
          setComparator(mrConfig.getCommonCriteria(), ref[1], comparator);
        } else {
          setComparator(
              mrConfig.getSpecificOrderBys().get(new Integer(ref[0])), ref[1], comparator);
        }
      }
    } catch (IOException e) {
      throw new TupleMRException(e);
    }
  }
Beispiel #2
0
 /**
  * If security is turned off, what is the default web user?
  *
  * @param conf the configuration to look in
  * @return the remote user that was configuration
  */
 public static UserGroupInformation getDefaultWebUser(Configuration conf) throws IOException {
   String[] strings = conf.getStrings(JspHelper.WEB_UGI_PROPERTY_NAME);
   if (strings == null || strings.length == 0) {
     throw new IOException("Cannot determine UGI from request or conf");
   }
   return UserGroupInformation.createRemoteUser(strings[0]);
 }
    @Override
    public void setup(Context context) throws IOException {
      Configuration conf = context.getConfiguration();

      String[] sourceList = conf.getStrings(SOURCES);
      sources = new ArrayList<Integer>(sourceList.length);
      for (int i = 0; i < sourceList.length; i++) {
        sources.add(i, Integer.parseInt(sourceList[i]));
      }

      String[] missingValues = conf.getStrings("MissingMass");
      missingMass = new float[missingValues.length];
      for (int i = 0; i < sources.size(); i++) {
        missingMass[i] = Float.parseFloat(missingValues[i]);
      }
    }
  /**
   * Returns a new {@code PermissionCache} initialized with permission assignments from the {@code
   * hbase.superuser} configuration key.
   */
  private PermissionCache<Permission> initGlobal(Configuration conf) throws IOException {
    UserProvider userProvider = UserProvider.instantiate(conf);
    User user = userProvider.getCurrent();
    if (user == null) {
      throw new IOException(
          "Unable to obtain the current user, "
              + "authorization checks for internal operations will not work correctly!");
    }
    PermissionCache<Permission> newCache = new PermissionCache<Permission>();
    String currentUser = user.getShortName();

    // the system user is always included
    List<String> superusers =
        Lists.asList(
            currentUser, conf.getStrings(AccessControlLists.SUPERUSER_CONF_KEY, new String[0]));
    if (superusers != null) {
      for (String name : superusers) {
        if (AccessControlLists.isGroupPrincipal(name)) {
          newCache.putGroup(
              AccessControlLists.getGroupName(name), new Permission(Permission.Action.values()));
        } else {
          newCache.putUser(name, new Permission(Permission.Action.values()));
        }
      }
    }
    return newCache;
  }
  /** This test attempts to finalize the NameNode and DataNode. */
  public void testFinalize() throws Exception {
    UpgradeUtilities.initialize();

    for (int numDirs = 1; numDirs <= 2; numDirs++) {
      /* This test requires that "current" directory not change after
       * the upgrade. Actually it is ok for those contents to change.
       * For now disabling block verification so that the contents are
       * not changed.
       */
      conf = new Configuration();
      conf.setInt("dfs.datanode.scan.period.hours", -1);
      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");

      log("Finalize with existing previous dir", numDirs);
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
      cluster = new MiniDFSCluster(conf, 1, StartupOption.REGULAR);
      cluster.finalizeCluster(conf);
      checkResult(nameNodeDirs, dataNodeDirs);

      log("Finalize without existing previous dir", numDirs);
      cluster.finalizeCluster(conf);
      checkResult(nameNodeDirs, dataNodeDirs);

      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
    } // end numDir loop
  }
Beispiel #6
0
  /** Load system coprocessors. Read the class names from configuration. Called by constructor. */
  protected void loadSystemCoprocessors(Configuration conf, String confKey) {
    Class<?> implClass = null;

    // load default coprocessors from configure file
    String[] defaultCPClasses = conf.getStrings(confKey);
    if (defaultCPClasses == null || defaultCPClasses.length == 0) return;

    int priority = Coprocessor.PRIORITY_SYSTEM;
    List<E> configured = new ArrayList<E>();
    for (String className : defaultCPClasses) {
      className = className.trim();
      if (findCoprocessor(className) != null) {
        continue;
      }
      ClassLoader cl = this.getClass().getClassLoader();
      Thread.currentThread().setContextClassLoader(cl);
      try {
        implClass = cl.loadClass(className);
        configured.add(loadInstance(implClass, Coprocessor.PRIORITY_SYSTEM, conf));
        LOG.info(
            "System coprocessor "
                + className
                + " was loaded "
                + "successfully with priority ("
                + priority++
                + ").");
      } catch (Throwable t) {
        // We always abort if system coprocessors cannot be loaded
        abortServer(className, t);
      }
    }

    // add entire set to the collection for COW efficiency
    coprocessors.addAll(configured);
  }
Beispiel #7
0
 /*
  * This test attempts to upgrade the datanode from federation
  * version -35 to upper version
  * This test is for non-federation cluster with single namenode
  */
 public void testNonFederationClusterUpgradeAfterFederationVersion() throws Exception {
   File[] baseDirs;
   UpgradeUtilities.initialize();
   for (int numDirs = 1; numDirs <= 2; numDirs++) {
     conf = new Configuration();
     conf.setInt("dfs.datanode.scan.period.hours", -1);
     conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
     String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
     String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
     log("DataNode upgrade with federation layout version in current", numDirs);
     UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
     try {
       cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createVersionFile(
           DATA_NODE,
           baseDirs,
           new StorageInfo(
               FSConstants.FEDERATION_VERSION,
               UpgradeUtilities.getCurrentNamespaceID(cluster),
               UpgradeUtilities.getCurrentFsscTime(cluster)),
           cluster.getNameNode().getNamespaceID());
       cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
       checkResult(DATA_NODE, dataNodeDirs, 0, false);
     } finally {
       if (cluster != null) cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
     }
   }
 }
Beispiel #8
0
 /**
  * Build up the classpath for execution -behaves very differently on a mini test cluster vs a
  * production production one.
  *
  * @param hoyaConfDir relative path to the dir containing hoya config options to put on the
  *     classpath -or null
  * @param libdir directory containing the JAR files
  * @param config the configuration
  * @param usingMiniMRCluster flag to indicate the MiniMR cluster is in use (and hence the current
  *     classpath should be used, not anything built up)
  * @return a classpath
  */
 public static String buildClasspath(
     String hoyaConfDir, String libdir, Configuration config, boolean usingMiniMRCluster) {
   // Add AppMaster.jar location to classpath
   // At some point we should not be required to add
   // the hadoop specific classpaths to the env.
   // It should be provided out of the box.
   // For now setting all required classpaths including
   // the classpath to "." for the application jar
   StringBuilder classPathEnv = new StringBuilder();
   // add the runtime classpath needed for tests to work
   if (usingMiniMRCluster) {
     // for mini cluster we pass down the java CP properties
     // and nothing else
     classPathEnv.append(System.getProperty("java.class.path"));
   } else {
     char col = File.pathSeparatorChar;
     classPathEnv.append(ApplicationConstants.Environment.CLASSPATH.$());
     String[] strs =
         config.getStrings(
             YarnConfiguration.YARN_APPLICATION_CLASSPATH,
             YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH);
     if (strs != null) {
       for (String c : strs) {
         classPathEnv.append(col);
         classPathEnv.append(c.trim());
       }
     }
     classPathEnv.append(col).append("./").append(libdir).append("/*");
     if (hoyaConfDir != null) {
       classPathEnv.append(col).append(hoyaConfDir);
     }
   }
   return classPathEnv.toString();
 }
  /**
   * Set the configuration values for UGI.
   *
   * @param conf the configuration to use
   */
  private static synchronized void initialize(Configuration conf) {
    String value = conf.get(HADOOP_SECURITY_AUTHENTICATION);
    if (value == null || "simple".equals(value)) {
      useKerberos = false;
      useConfiguredFileAuth = false;
    } else if ("kerberos".equals(value)) {
      useKerberos = true;
      useConfiguredFileAuth = false;
    } else if ("configfile".equals(value)) {
      useKerberos = false;
      useConfiguredFileAuth = true;
    } else {
      throw new IllegalArgumentException(
          "Invalid attribute value for " + HADOOP_SECURITY_AUTHENTICATION + " of " + value);
    }

    // The getUserToGroupsMappingService will change the conf value, record the UGI information
    // firstly
    if (configUGIInformation == null) {
      configUGIInformation = conf.getStrings("hadoop.client.ugi");
    }

    // If we haven't set up testing groups, use the configuration to find it
    if (!(groups instanceof TestingGroups)) {
      groups = Groups.getUserToGroupsMappingService(conf);
    }
    // Set the configuration for JAAS to be the Hadoop configuration.
    // This is done here rather than a static initializer to avoid a
    // circular dependence.
    javax.security.auth.login.Configuration existingConfig = null;
    try {
      existingConfig = javax.security.auth.login.Configuration.getConfiguration();
    } catch (SecurityException se) {
      // If no security configuration is on the classpath, then
      // we catch this exception, and we don't need to delegate
      // to anyone
    }

    if (existingConfig instanceof HadoopConfiguration) {
      LOG.info("JAAS Configuration already set up for Hadoop, not re-installing.");
    } else {
      javax.security.auth.login.Configuration.setConfiguration(
          new HadoopConfiguration(existingConfig));
    }

    // We're done initializing at this point. Important not to classload
    // KerberosName before this point, or else its static initializer
    // may call back into this same method!
    isInitialized = true;
    UserGroupInformation.conf = conf;

    // give the configuration on how to translate Kerberos names
    try {
      KerberosName.setConfiguration(conf);
    } catch (IOException ioe) {
      throw new RuntimeException(
          "Problem with Kerberos auth_to_local name " + "configuration", ioe);
    }
  }
 void setupWorkerEnv(Map<String, String> workerEnv) {
   for (String c :
       configuration.getStrings(
           YarnConfiguration.YARN_APPLICATION_CLASSPATH,
           YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
     addToEnv(workerEnv, Environment.CLASSPATH.name(), c.trim());
   }
   addToEnv(workerEnv, Environment.CLASSPATH.name(), Environment.PWD.$() + File.separator + "*");
 }
 /*
  * Initialize the chain of log cleaners from the configuration. The default
  * three LogCleanerDelegates in this chain are: TimeToLiveLogCleaner,
  * ReplicationLogCleaner and SnapshotLogCleaner.
  */
 private void initLogCleanersChain() {
   String[] logCleaners = conf.getStrings("hbase.master.logcleaner.plugins");
   if (logCleaners != null) {
     for (String className : logCleaners) {
       LogCleanerDelegate logCleaner = newLogCleaner(className, conf);
       addLogCleaner(logCleaner);
     }
   }
 }
 private static ArrayList<String> restoreStrings(Configuration conf, String key) {
   assert conf != null;
   assert key != null;
   ArrayList<String> results = new ArrayList<String>();
   String[] old = conf.getStrings(key);
   if (old != null && old.length >= 1) {
     Collections.addAll(results, old);
   }
   return results;
 }
 private void setupAppMasterEnv(Map<String, String> appMasterEnv) {
   for (String c :
       conf.getStrings(
           YarnConfiguration.YARN_APPLICATION_CLASSPATH,
           YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
     Apps.addToEnvironment(appMasterEnv, Environment.CLASSPATH.name(), c.trim());
   }
   Apps.addToEnvironment(
       appMasterEnv, Environment.CLASSPATH.name(), Environment.PWD.$() + File.separator + "*");
 }
    @Override
    public void setup(Context context) throws IOException {
      Configuration conf = context.getConfiguration();

      String[] sourceList = conf.getStrings(SOURCES);
      sources = new ArrayList<Integer>(sourceList.length);
      for (int i = 0; i < sourceList.length; i++) {
        sources.add(i, Integer.parseInt(sourceList[i]));
      }
    }
  public void startServletContainer(Configuration conf) throws Exception {
    if (server != null) {
      LOG.error("ServletContainer already running");
      return;
    }

    // Inject the conf for the test by being first to make singleton
    RESTServlet.getInstance(conf, UserProvider.instantiate(conf));

    // set up the Jersey servlet container for Jetty
    ResourceConfig app =
        new ResourceConfig()
            .packages("org.apache.hadoop.hbase.rest")
            .register(Jackson1Feature.class);
    ServletHolder sh = new ServletHolder(new ServletContainer(app));

    // set up Jetty and run the embedded server
    server = new Server(0);
    LOG.info("configured " + ServletContainer.class.getName());

    HttpConfiguration httpConfig = new HttpConfiguration();
    httpConfig.setSendDateHeader(false);
    httpConfig.setSendServerVersion(false);
    ServerConnector serverConnector =
        new ServerConnector(server, new HttpConnectionFactory(httpConfig));
    serverConnector.setPort(testServletPort);

    server.addConnector(serverConnector);

    // set up context
    ServletContextHandler ctxHandler =
        new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS);
    ctxHandler.addServlet(sh, "/*");
    // Load filters specified from configuration.
    String[] filterClasses =
        conf.getStrings(Constants.FILTER_CLASSES, ArrayUtils.EMPTY_STRING_ARRAY);
    for (String filter : filterClasses) {
      filter = filter.trim();
      ctxHandler.addFilter(filter, "/*", EnumSet.of(DispatcherType.REQUEST));
    }
    LOG.info("Loaded filter classes :" + filterClasses);

    conf.set(RESTServer.REST_CSRF_BROWSER_USERAGENTS_REGEX_KEY, ".*");
    RESTServer.addCSRFFilter(ctxHandler, conf);

    HttpServerUtil.constrainHttpMethods(ctxHandler);

    // start the server
    server.start();
    // get the port
    testServletPort = ((ServerConnector) server.getConnectors()[0]).getLocalPort();

    LOG.info("started " + server.getClass().getName() + " on port " + testServletPort);
  }
Beispiel #16
0
 // TODO - Move this to MR!
 private static long[] getFileSizes(Configuration conf, String key) {
   String[] strs = conf.getStrings(key);
   if (strs == null) {
     return null;
   }
   long[] result = new long[strs.length];
   for (int i = 0; i < strs.length; ++i) {
     result[i] = Long.parseLong(strs[i]);
   }
   return result;
 }
 /**
  * Gets the input paths for this input format
  *
  * @param conf the job conf
  */
 public static Path[] getInputPaths(Configuration conf) {
   String[] pathStrs =
       Preconditions.checkNotNull(
           conf.getStrings(INPUT_PATHS_KEY),
           "You must call LuceneIndexInputFormat.setInputPaths()");
   Path[] paths = new Path[pathStrs.length];
   for (int i = 0; i < pathStrs.length; i++) {
     paths[i] = new Path(pathStrs[i]);
   }
   return paths;
 }
Beispiel #18
0
  /**
   * Builds and returns a set containing parameters that should be excluded from the parameter
   * space. The parameters are found as a comma-separated string in the Hadoop configuration
   * "starfish.job.optimizer.exclude.parameters"
   *
   * @param conf the configuration
   * @return the exclusion set
   */
  private static Set<String> buildParamExclusionSet(Configuration conf) {
    Set<String> excludeSet = new HashSet<String>();

    String[] excludeArray = conf.getStrings(JobOptimizer.JOB_OPT_EXCLUDE_PARAMS);
    if (excludeArray != null) {
      for (String param : excludeArray) {
        excludeSet.add(param);
      }
    }

    return excludeSet;
  }
Beispiel #19
0
 public static void setupYarnClassPath(Configuration conf, Map<String, String> appMasterEnv) {
   addToEnvironment(
       appMasterEnv,
       Environment.CLASSPATH.name(),
       appMasterEnv.get(YarnConfigKeys.ENV_FLINK_CLASSPATH));
   String[] applicationClassPathEntries =
       conf.getStrings(
           YarnConfiguration.YARN_APPLICATION_CLASSPATH,
           YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH);
   for (String c : applicationClassPathEntries) {
     addToEnvironment(appMasterEnv, Environment.CLASSPATH.name(), c.trim());
   }
 }
Beispiel #20
0
 @Override
 public void init(Configuration conf) {
   dependencyTypes = new HashMap<String, DependencyType>();
   supportedSchemes = new HashSet<String>();
   String[] schemes =
       conf.getStrings(
           URIHandlerService.URI_HANDLER_SUPPORTED_SCHEMES_PREFIX
               + this.getClass().getSimpleName()
               + URIHandlerService.URI_HANDLER_SUPPORTED_SCHEMES_SUFFIX,
           "hcat");
   supportedSchemes.addAll(Arrays.asList(schemes));
   classesToShip = new HCatLauncherURIHandler().getClassesForLauncher();
 }
Beispiel #21
0
 // get secret keys and tokens and store them into TokenCache
 @SuppressWarnings("unchecked")
 private void populateTokenCache(Configuration conf, Credentials credentials) throws IOException {
   readTokensFromFiles(conf, credentials);
   // add the delegation tokens from configuration
   String[] nameNodes = conf.getStrings(MRJobConfig.JOB_NAMENODES);
   LOG.debug("adding the following namenodes' delegation tokens:" + Arrays.toString(nameNodes));
   if (nameNodes != null) {
     Path[] ps = new Path[nameNodes.length];
     for (int i = 0; i < nameNodes.length; i++) {
       ps[i] = new Path(nameNodes[i]);
     }
     TokenCache.obtainTokensForNamenodes(credentials, ps, conf);
   }
 }
Beispiel #22
0
    protected void setup(Context context) throws IOException, InterruptedException {
      Configuration conf = context.getConfiguration();
      this.mapSleepCount = conf.getInt(MAP_SLEEP_COUNT, mapSleepCount);
      this.mapSleepDuration =
          mapSleepCount == 0 ? 0 : conf.getLong(MAP_SLEEP_TIME, 100) / mapSleepCount;
      vertexName = conf.get(org.apache.tez.mapreduce.hadoop.MRJobConfig.VERTEX_NAME);

      TaskAttemptID taId = context.getTaskAttemptID();

      ObjectRegistry objectRegistry = ObjectRegistryFactory.getObjectRegistry();
      String fooBarVal = (String) objectRegistry.get("FooBar");
      if (null == fooBarVal) {
        LOG.info("Adding FooBar key to Object cache");
        objectRegistry.add(
            ObjectLifeCycle.DAG, "FooBar", "BarFooFromTask" + taId.getTaskID().toString());
      } else {
        LOG.info(
            "Got FooBar val from Object cache"
                + ", currentTaskId="
                + taId.getTaskID().toString()
                + ", val="
                + fooBarVal);
      }

      String[] taskIds = conf.getStrings(MAP_ERROR_TASK_IDS);
      if (taId.getId() + 1 >= context.getMaxMapAttempts()) {
        finalAttempt = true;
      }
      boolean found = false;
      if (taskIds != null) {
        if (taskIds.length == 1 && taskIds[0].equals("*")) {
          found = true;
        }
        if (!found) {
          for (String taskId : taskIds) {
            if (Integer.valueOf(taskId).intValue() == taId.getTaskID().getId()) {
              found = true;
              break;
            }
          }
        }
      }
      if (found) {
        if (!finalAttempt) {
          throwError = conf.getBoolean(MAP_THROW_ERROR, false);
        }
        throwFatal = conf.getBoolean(MAP_FATAL_ERROR, false);
      }
    }
Beispiel #23
0
  /**
   * Make a Properties object holding ZooKeeper config equivalent to zoo.cfg. If there is a zoo.cfg
   * in the classpath, simply read it in. Otherwise parse the corresponding config options from the
   * HBase XML configs and generate the appropriate ZooKeeper properties.
   *
   * @param conf Configuration to read from.
   * @return Properties holding mappings representing ZooKeeper zoo.cfg file.
   */
  public static Properties makeZKProps(Configuration conf) {
    // First check if there is a zoo.cfg in the CLASSPATH. If so, simply read
    // it and grab its configuration properties.
    ClassLoader cl = HQuorumPeer.class.getClassLoader();
    final InputStream inputStream = cl.getResourceAsStream(HConstants.ZOOKEEPER_CONFIG_NAME);
    if (inputStream != null) {
      try {
        return parseZooCfg(conf, inputStream);
      } catch (IOException e) {
        LOG.warn("Cannot read " + HConstants.ZOOKEEPER_CONFIG_NAME + ", loading from XML files", e);
      }
    }

    // Otherwise, use the configuration options from HBase's XML files.
    Properties zkProperties = new Properties();

    // Directly map all of the hbase.zookeeper.property.KEY properties.
    for (Entry<String, String> entry : conf) {
      String key = entry.getKey();
      if (key.startsWith(ZK_CFG_PROPERTY)) {
        String zkKey = key.substring(ZK_CFG_PROPERTY_SIZE);
        String value = entry.getValue();
        // If the value has variables substitutions, need to do a get.
        if (value.contains(VARIABLE_START)) {
          value = conf.get(key);
        }
        zkProperties.put(zkKey, value);
      }
    }

    // If clientPort is not set, assign the default
    if (zkProperties.getProperty(ZK_CLIENT_PORT_KEY) == null) {
      zkProperties.put(ZK_CLIENT_PORT_KEY, HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT);
    }

    // Create the server.X properties.
    int peerPort = conf.getInt("hbase.zookeeper.peerport", 2888);
    int leaderPort = conf.getInt("hbase.zookeeper.leaderport", 3888);

    final String[] serverHosts = conf.getStrings(HConstants.ZOOKEEPER_QUORUM, "localhost");
    for (int i = 0; i < serverHosts.length; ++i) {
      String serverHost = serverHosts[i];
      String address = serverHost + ":" + peerPort + ":" + leaderPort;
      String key = "server." + i;
      zkProperties.put(key, address);
    }

    return zkProperties;
  }
    @Override
    public void setup(Context context) throws IOException {
      Configuration conf = context.getConfiguration();

      String[] sourceList = conf.getStrings(SOURCES);
      sources = new ArrayList<Integer>(sourceList.length);
      for (int i = 0; i < sourceList.length; i++) {
        sources.add(i, Integer.parseInt(sourceList[i]));
      }

      totalMass = new float[sources.size()];
      for (int i = 0; i < sourceList.length; i++) {
        totalMass[i] = Float.NEGATIVE_INFINITY;
      }
    }
Beispiel #25
0
 private void init(Configuration conf) {
   ArrayList<String> args = new ArrayList<String>();
   for (String arg : conf.getStrings(TCP_SERVER_START_ARGS, DEFAULT_TCP_SERVER_START_ARGS)) {
     int pos = arg.indexOf('=');
     if (pos == -1) {
       args.add(arg.trim());
     } else {
       args.add(arg.substring(0, pos).trim());
       args.add(arg.substring(pos + 1).trim());
     }
   }
   args.add("-tcpPort");
   args.add("" + tcpPort);
   super.init(args.toArray(new String[0]));
 }
Beispiel #26
0
  public static void main(String[] args) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length < 2) {
      System.out.println("Caching and Versions  not specified");
      System.exit(-1);
    }
    int caching = -1;
    int versions = -1;
    try {
      caching = Integer.parseInt(otherArgs[0]);
    } catch (NumberFormatException nfe) {
      caching = DEFAULT_CACHING;
    }
    try {
      versions = Integer.parseInt(otherArgs[1]);
    } catch (NumberFormatException nfe) {
      versions = DEFAULT_VERSIONS;
    }

    String[] tableName = conf.getStrings(TABLE_NAME_TO_INDEX);
    if (tableName == null) {
      System.out.println(
          "Wrong usage.  Usage is pass the table -Dindex.tablename='table1' "
              + "-Dtable.columns.index='IDX1=>cf1:[q1->datatype& length],[q2],"
              + "[q3];cf2:[q1->datatype&length],[q2->datatype&length],[q3->datatype& lenght]#IDX2=>cf1:q5,q5'");
      System.out.println("The format used here is: ");
      System.out.println("IDX1 - Index name");
      System.out.println("cf1 - Columnfamilyname");
      System.out.println("q1 - qualifier");
      System.out.println("datatype - datatype (Int, String, Double, Float)");
      System.out.println("length - length of the value");
      System.out.println("The columnfamily should be seperated by ';'");
      System.out.println(
          "The qualifier and the datatype and its length should be enclosed in '[]'."
              + "  The qualifier details are specified using '->' following qualifer name and the details are seperated by '&'");
      System.out.println("If the qualifier details are not specified default values are used.");
      System.out.println("# is used to seperate between two index details");
      System.out.println("Pass the scanner caching and maxversions as arguments.");
      System.exit(-1);
    }
    String tableNameToIndex = tableName[0];
    IndexUtils.createIndexTable(tableNameToIndex, conf, cfs);
    createMapReduceJob(tableNameToIndex, conf, caching, versions);
  }
Beispiel #27
0
 private Schema loadSchema(Configuration conf, String[] baseSchemas, String extSchema)
     throws SAXException, IOException {
   List<StreamSource> sources = new ArrayList<StreamSource>();
   for (String baseSchema : baseSchemas) {
     sources.add(new StreamSource(IOUtils.getResourceAsStream(baseSchema, -1)));
   }
   String[] schemas = conf.getStrings(extSchema);
   if (schemas != null) {
     for (String schema : schemas) {
       schema = schema.trim();
       if (!schema.isEmpty()) {
         sources.add(new StreamSource(IOUtils.getResourceAsStream(schema, -1)));
       }
     }
   }
   SchemaFactory factory = SchemaFactory.newInstance(XMLConstants.W3C_XML_SCHEMA_NS_URI);
   return factory.newSchema(sources.toArray(new StreamSource[sources.size()]));
 }
    /**
     * This method gets called everytime before any read/write to make sure that any change to
     * localDirs is reflected immediately.
     */
    private synchronized void confChanged(Configuration conf) throws IOException {
      String newLocalDirs = conf.get(contextCfgItemName);
      if (!newLocalDirs.equals(savedLocalDirs)) {
        String[] localDirs = conf.getStrings(contextCfgItemName);
        localFS = FileSystem.getLocal(conf);
        int numDirs = localDirs.length;
        ArrayList<String> dirs = new ArrayList<String>(numDirs);
        ArrayList<DF> dfList = new ArrayList<DF>(numDirs);
        for (int i = 0; i < numDirs; i++) {
          try {
            // filter problematic directories
            Path tmpDir = new Path(localDirs[i]);
            if (localFS.mkdirs(tmpDir) || localFS.exists(tmpDir)) {
              try {
                DiskChecker.checkDir(new File(localDirs[i]));
                dirs.add(localDirs[i]);
                dfList.add(new DF(new File(localDirs[i]), 30000));
              } catch (DiskErrorException de) {
                LOG.warn(localDirs[i] + "is not writable\n" + StringUtils.stringifyException(de));
              }
            } else {
              LOG.warn("Failed to create " + localDirs[i]);
            }
          } catch (IOException ie) {
            LOG.warn(
                "Failed to create "
                    + localDirs[i]
                    + ": "
                    + ie.getMessage()
                    + "\n"
                    + StringUtils.stringifyException(ie));
          } // ignore
        }
        localDirsPath = new Path[dirs.size()];
        for (int i = 0; i < localDirsPath.length; i++) {
          localDirsPath[i] = new Path(dirs.get(i));
        }
        dirDF = dfList.toArray(new DF[dirs.size()]);
        savedLocalDirs = newLocalDirs;

        // randomize the first disk picked in the round-robin selection
        dirNumLastAccessed = dirIndexRandomizer.nextInt(dirs.size());
      }
    }
Beispiel #29
0
  /*
   * This test attempts to upgrade the datanode from federation version -35 to
   * upper version This test is for federation cluster with 2 namenodes. It
   * changes the layout version and ctime.
   */
  public void testFederationClusterUpgradeAfterFederationVersionWithCTimeChange() throws Exception {
    File[] baseDirs;
    Configuration baseConf = new Configuration();
    UpgradeUtilities.initialize(2, baseConf, true);
    for (int numDirs = 1; numDirs <= 2; numDirs++) {
      conf = new Configuration();
      conf.setInt("dfs.datanode.scan.period.hours", -1);
      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
      log("DataNode upgrade with federation layout version in current and ctime change", numDirs);
      UpgradeUtilities.createFederatedNameNodeStorageDirs(nameNodeDirs);
      conf.set(
          FSConstants.DFS_FEDERATION_NAMESERVICES,
          baseConf.get(FSConstants.DFS_FEDERATION_NAMESERVICES));
      try {
        cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE, false, 2);
        baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
        for (int i = 0; i < 2; i++) {
          UpgradeUtilities.createVersionFile(
              DATA_NODE,
              baseDirs,
              new StorageInfo(
                  FSConstants.FEDERATION_VERSION,
                  cluster.getNameNode(i).getNamespaceID(),
                  cluster.getNameNode(i).versionRequest().getCTime() - 1),
              cluster.getNameNode(i).getNamespaceID());
        }
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);

        for (int i = 0; i < 2; i++) {
          checkResult(DATA_NODE, dataNodeDirs, i, false);
        }
      } finally {
        if (cluster != null) cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
      }
    }
  }
  public void initializeClusters() throws IOException {
    LOG.info("Initializing Clusters");
    String[] clusterNames = conf.getStrings(BALANCER_CLUSTERS_CONF);
    for (String clusterName : clusterNames) {
      String httpAddr = conf.get(BALANCER_CLUSTER_PREFIX + clusterName);

      int minLoad = conf.getInt(BALANCER_CLUSTER_PREFIX + clusterName + CLUSTER_MIN_LOAD, 100);
      int maxLoad = conf.getInt(BALANCER_CLUSTER_PREFIX + clusterName + CLUSTER_MAX_LOAD, 100);
      boolean waitForMaps =
          conf.getBoolean(BALANCER_CLUSTER_PREFIX + clusterName + CLUSTER_WAIT_FOR_MAPS, false);
      int minNodes = conf.getInt(BALANCER_CLUSTER_PREFIX + clusterName + CLUSTER_MIN_NODES, 1);
      String exclusiveTrackers =
          conf.get(BALANCER_CLUSTER_PREFIX + clusterName + CLUSTER_NOT_TO_MOVE_FILE);

      Cluster cluster =
          new Cluster(
              httpAddr, minLoad, maxLoad, waitForMaps, minNodes, ttLauncher, exclusiveTrackers);
      LOG.info("Created a cluster " + httpAddr);
      cluster.load();
      clusters.put(clusterName, cluster);
    }
  }