コード例 #1
0
  /**
   * Constructor.
   *
   * @param conf Configuration to use. Post construction has the master's address.
   * @param noMasters Count of masters to start.
   * @param noRegionServers Count of regionservers to start.
   * @param masterClass
   * @param regionServerClass
   * @throws IOException
   */
  @SuppressWarnings("unchecked")
  public LocalHBaseCluster(
      final Configuration conf,
      final int noMasters,
      final int noRegionServers,
      final Class<? extends HMaster> masterClass,
      final Class<? extends HRegionServer> regionServerClass)
      throws IOException {
    this.conf = conf;
    // Always have masters and regionservers come up on port '0' so we don't
    // clash over default ports.
    conf.set(HConstants.MASTER_PORT, "0");
    conf.set(HConstants.REGIONSERVER_PORT, "0");
    conf.set(HConstants.REGIONSERVER_INFO_PORT, "0");

    this.masterClass =
        (Class<? extends HMaster>) conf.getClass(HConstants.MASTER_IMPL, masterClass);
    // Start the HMasters.
    for (int i = 0; i < noMasters; i++) {
      addMaster(new Configuration(conf), i);
    }
    // Start the HRegionServers.
    this.regionServerClass =
        (Class<? extends HRegionServer>)
            conf.getClass(HConstants.REGION_SERVER_IMPL, regionServerClass);

    for (int i = 0; i < noRegionServers; i++) {
      addRegionServer(new Configuration(conf), i);
    }
  }
コード例 #2
0
 @Override
 public void start() {
   try {
     Configuration conf = getConf();
     eagerInitListener.start();
     taskTrackerManager.addJobInProgressListener(eagerInitListener);
     taskTrackerManager.addJobInProgressListener(jobListener);
     poolMgr = new PoolManager(conf);
     loadMgr =
         (LoadManager)
             ReflectionUtils.newInstance(
                 conf.getClass(
                     "mapred.fairscheduler.loadmanager",
                     CapBasedLoadManager.class,
                     LoadManager.class),
                 conf);
     loadMgr.setTaskTrackerManager(taskTrackerManager);
     loadMgr.start();
     taskSelector =
         (TaskSelector)
             ReflectionUtils.newInstance(
                 conf.getClass(
                     "mapred.fairscheduler.taskselector",
                     DefaultTaskSelector.class,
                     TaskSelector.class),
                 conf);
     taskSelector.setTaskTrackerManager(taskTrackerManager);
     taskSelector.start();
     Class<?> weightAdjClass = conf.getClass("mapred.fairscheduler.weightadjuster", null);
     if (weightAdjClass != null) {
       weightAdjuster = (WeightAdjuster) ReflectionUtils.newInstance(weightAdjClass, conf);
     }
     assignMultiple = conf.getBoolean("mapred.fairscheduler.assignmultiple", false);
     sizeBasedWeight = conf.getBoolean("mapred.fairscheduler.sizebasedweight", false);
     initialized = true;
     running = true;
     lastUpdateTime = clock.getTime();
     // Start a thread to update deficits every UPDATE_INTERVAL
     if (runBackgroundUpdates) new UpdateThread().start();
     // Register servlet with JobTracker's Jetty server
     if (taskTrackerManager instanceof JobTracker) {
       JobTracker jobTracker = (JobTracker) taskTrackerManager;
       StatusHttpServer infoServer = jobTracker.infoServer;
       infoServer.setAttribute("scheduler", this);
       infoServer.addServlet("scheduler", "/scheduler", FairSchedulerServlet.class);
     }
   } catch (Exception e) {
     // Can't load one of the managers - crash the JobTracker now while it is
     // starting up so that the user notices.
     throw new RuntimeException("Failed to start FairScheduler", e);
   }
   LOG.info("Successfully configured FairScheduler");
 }
コード例 #3
0
ファイル: Gridmix.java プロジェクト: AkihiroSuda/PCheck
 private int runJob(Configuration conf, String[] argv) throws IOException, InterruptedException {
   if (argv.length < 2) {
     printUsage(System.err);
     return 1;
   }
   long genbytes = -1L;
   String traceIn = null;
   Path ioPath = null;
   URI userRsrc = null;
   userResolver =
       ReflectionUtils.newInstance(
           conf.getClass(GRIDMIX_USR_RSV, SubmitterUserResolver.class, UserResolver.class), conf);
   try {
     for (int i = 0; i < argv.length - 2; ++i) {
       if ("-generate".equals(argv[i])) {
         genbytes = StringUtils.TraditionalBinaryPrefix.string2long(argv[++i]);
       } else if ("-users".equals(argv[i])) {
         userRsrc = new URI(argv[++i]);
       } else {
         printUsage(System.err);
         return 1;
       }
     }
     if (!userResolver.setTargetUsers(userRsrc, conf)) {
       LOG.warn("Resource " + userRsrc + " ignored");
     }
     ioPath = new Path(argv[argv.length - 2]);
     traceIn = argv[argv.length - 1];
   } catch (Exception e) {
     e.printStackTrace();
     printUsage(System.err);
     return 1;
   }
   return start(conf, traceIn, ioPath, genbytes, userResolver);
 }
コード例 #4
0
    private org.apache.hadoop.mapreduce.OutputCommitter createOutputCommitter(
        boolean newApiCommitter, JobID jobId, Configuration conf) throws Exception {
      org.apache.hadoop.mapreduce.OutputCommitter committer = null;

      LOG.info("OutputCommitter set in config " + conf.get("mapred.output.committer.class"));

      if (newApiCommitter) {
        org.apache.hadoop.mapreduce.TaskID taskId =
            new org.apache.hadoop.mapreduce.TaskID(jobId, true, 0);
        org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID =
            new org.apache.hadoop.mapreduce.TaskAttemptID(taskId, 0);
        org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
            new TaskAttemptContextImpl(conf, taskAttemptID);
        OutputFormat outputFormat =
            ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), conf);
        committer = outputFormat.getOutputCommitter(taskContext);
      } else {
        committer =
            ReflectionUtils.newInstance(
                conf.getClass(
                    "mapred.output.committer.class",
                    FileOutputCommitter.class,
                    org.apache.hadoop.mapred.OutputCommitter.class),
                conf);
      }
      LOG.info("OutputCommitter is " + committer.getClass().getName());
      return committer;
    }
コード例 #5
0
ファイル: URIHandlerService.java プロジェクト: Chyler/oozie-1
  private void init(Configuration conf) throws ClassNotFoundException {
    cache = new HashMap<String, URIHandler>();

    String[] classes = ConfigurationService.getStrings(conf, URI_HANDLERS);
    for (String classname : classes) {
      Class<?> clazz = Class.forName(classname.trim());
      URIHandler uriHandler = (URIHandler) ReflectionUtils.newInstance(clazz, null);
      uriHandler.init(conf);
      for (String scheme : uriHandler.getSupportedSchemes()) {
        cache.put(scheme, uriHandler);
      }
    }

    Class<?> defaultClass = conf.getClass(URI_HANDLER_DEFAULT, null);
    defaultHandler =
        (defaultClass == null)
            ? new FSURIHandler()
            : (URIHandler) ReflectionUtils.newInstance(defaultClass, null);
    defaultHandler.init(conf);
    for (String scheme : defaultHandler.getSupportedSchemes()) {
      cache.put(scheme, defaultHandler);
    }

    initLauncherClassesToShip();
    initLauncherURIHandlerConf();

    LOG.info("Loaded urihandlers {0}", Arrays.toString(classes));
    LOG.info("Loaded default urihandler {0}", defaultHandler.getClass().getName());
  }
コード例 #6
0
 @SuppressWarnings("unchecked")
 private void initInputFormat(Configuration conf) {
   if (realInputFormat == null) {
     realInputFormat =
         ReflectionUtils.newInstance(conf.getClass(CLASS_CONF_KEY, null, InputFormat.class), conf);
   }
 }
コード例 #7
0
 /**
  * instantiate a strategy from a config property. requires conf to have already been set (as well
  * as anything the provider might need to read).
  */
 RegionGroupingStrategy getStrategy(
     final Configuration conf, final String key, final String defaultValue) throws IOException {
   Class<? extends RegionGroupingStrategy> clazz;
   try {
     clazz = Strategies.valueOf(conf.get(key, defaultValue)).clazz;
   } catch (IllegalArgumentException exception) {
     // Fall back to them specifying a class name
     // Note that the passed default class shouldn't actually be used, since the above only fails
     // when there is a config value present.
     clazz = conf.getClass(key, IdentityGroupingStrategy.class, RegionGroupingStrategy.class);
   }
   LOG.info("Instantiating RegionGroupingStrategy of type " + clazz);
   try {
     final RegionGroupingStrategy result = clazz.newInstance();
     result.init(conf);
     return result;
   } catch (InstantiationException exception) {
     LOG.error(
         "couldn't set up region grouping strategy, check config key " + REGION_GROUPING_STRATEGY);
     LOG.debug("Exception details for failure to load region grouping strategy.", exception);
     throw new IOException("couldn't set up region grouping strategy", exception);
   } catch (IllegalAccessException exception) {
     LOG.error(
         "couldn't set up region grouping strategy, check config key " + REGION_GROUPING_STRATEGY);
     LOG.debug("Exception details for failure to load region grouping strategy.", exception);
     throw new IOException("couldn't set up region grouping strategy", exception);
   }
 }
コード例 #8
0
 /** Gets the configured Failover proxy provider's class */
 @VisibleForTesting
 public static <T> Class<FailoverProxyProvider<T>> getFailoverProxyProviderClass(
     Configuration conf, URI nameNodeUri) throws IOException {
   if (nameNodeUri == null) {
     return null;
   }
   String host = nameNodeUri.getHost();
   String configKey = HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + host;
   try {
     @SuppressWarnings("unchecked")
     Class<FailoverProxyProvider<T>> ret =
         (Class<FailoverProxyProvider<T>>)
             conf.getClass(configKey, null, FailoverProxyProvider.class);
     return ret;
   } catch (RuntimeException e) {
     if (e.getCause() instanceof ClassNotFoundException) {
       throw new IOException(
           "Could not load failover proxy provider class "
               + conf.get(configKey)
               + " which is configured for authority "
               + nameNodeUri,
           e);
     } else {
       throw e;
     }
   }
 }
コード例 #9
0
 /**
  * Get a PathFilter instance of the filter set for the input paths.
  *
  * @return the PathFilter instance set for the job, NULL if none has been set.
  */
 public static PathFilter getInputPathFilter(JobContext context) {
   Configuration conf = context.getConfiguration();
   Class<?> filterClass = conf.getClass(PATHFILTER_CLASS, null, PathFilter.class);
   return (filterClass != null)
       ? (PathFilter) ReflectionUtils.newInstance(filterClass, conf)
       : null;
 }
コード例 #10
0
ファイル: HiveCatalogStore.java プロジェクト: javaoracle/tajo
 public HiveCatalogStore(final Configuration conf) throws InternalException {
   if (!(conf instanceof TajoConf)) {
     throw new TajoInternalError("Invalid Configuration Type:" + conf.getClass().getSimpleName());
   }
   this.conf = conf;
   this.defaultTableSpaceUri = TajoConf.getWarehouseDir((TajoConf) conf).toString();
   this.clientPool = new HiveCatalogStoreClientPool(CLIENT_POOL_SIZE, conf);
 }
コード例 #11
0
 public String toString() {
   final org.apache.hadoop.conf.Configuration hadoopConfiguration =
       ConfUtil.makeHadoopConfiguration(this.configuration);
   final String fromString =
       this.configuration.containsKey(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT)
           ? hadoopConfiguration
               .getClass(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT, InputFormat.class)
               .getSimpleName()
           : "no-input";
   final String toString =
       this.configuration.containsKey(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT)
           ? hadoopConfiguration
               .getClass(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT, OutputFormat.class)
               .getSimpleName()
           : "no-output";
   return StringFactory.graphString(
       this, fromString.toLowerCase() + "->" + toString.toLowerCase());
 }
コード例 #12
0
ファイル: RPC.java プロジェクト: jonathangizmo/HadoopDistJ
 // return the RpcEngine configured to handle a protocol
 static synchronized RpcEngine getProtocolEngine(Class<?> protocol, Configuration conf) {
   RpcEngine engine = PROTOCOL_ENGINES.get(protocol);
   if (engine == null) {
     Class<?> impl =
         conf.getClass(ENGINE_PROP + "." + protocol.getName(), WritableRpcEngine.class);
     engine = (RpcEngine) ReflectionUtils.newInstance(impl, conf);
     PROTOCOL_ENGINES.put(protocol, engine);
   }
   return engine;
 }
コード例 #13
0
ファイル: Gridmix.java プロジェクト: Jude7/bc-hadoop2.0
  private int runJob(Configuration conf, String[] argv) throws IOException, InterruptedException {
    if (argv.length < 2) {
      printUsage(System.err);
      return 1;
    }

    // Should gridmix generate distributed cache data ?
    boolean generate = false;
    long genbytes = -1L;
    String traceIn = null;
    Path ioPath = null;
    URI userRsrc = null;
    userResolver =
        ReflectionUtils.newInstance(
            conf.getClass(GRIDMIX_USR_RSV, SubmitterUserResolver.class, UserResolver.class), conf);
    try {
      for (int i = 0; i < argv.length - 2; ++i) {
        if ("-generate".equals(argv[i])) {
          genbytes = StringUtils.TraditionalBinaryPrefix.string2long(argv[++i]);
          generate = true;
        } else if ("-users".equals(argv[i])) {
          userRsrc = new URI(argv[++i]);
        } else {
          printUsage(System.err);
          return 1;
        }
      }

      if (userResolver.needsTargetUsersList()) {
        if (userRsrc != null) {
          if (!userResolver.setTargetUsers(userRsrc, conf)) {
            LOG.warn("Ignoring the user resource '" + userRsrc + "'.");
          }
        } else {
          System.err.println(
              "\n\n"
                  + userResolver.getClass()
                  + " needs target user list. Use -users option."
                  + "\n\n");
          printUsage(System.err);
          return 1;
        }
      } else if (userRsrc != null) {
        LOG.warn("Ignoring the user resource '" + userRsrc + "'.");
      }

      ioPath = new Path(argv[argv.length - 2]);
      traceIn = argv[argv.length - 1];
    } catch (Exception e) {
      e.printStackTrace();
      printUsage(System.err);
      return 1;
    }
    return start(conf, traceIn, ioPath, genbytes, userResolver, generate);
  }
コード例 #14
0
ファイル: MergeTextMapper.java プロジェクト: Kangmo/sqoop
  @Override
  protected void setup(Context c) throws IOException, InterruptedException {
    Configuration conf = c.getConfiguration();

    Class<? extends SqoopRecord> recordClass =
        (Class<? extends SqoopRecord>)
            conf.getClass(MergeJob.MERGE_SQOOP_RECORD_KEY, SqoopRecord.class);
    this.record = ReflectionUtils.newInstance(recordClass, conf);

    super.setup(c);
  }
コード例 #15
0
ファイル: HFileSystem.java プロジェクト: Reidddddd/hbase
 /**
  * Returns an instance of Filesystem wrapped into the class specified in hbase.fs.wrapper
  * property, if one is set in the configuration, returns unmodified FS instance passed in as an
  * argument otherwise.
  *
  * @param base Filesystem instance to wrap
  * @param conf Configuration
  * @return wrapped instance of FS, or the same instance if no wrapping configured.
  */
 private FileSystem maybeWrapFileSystem(FileSystem base, Configuration conf) {
   try {
     Class<?> clazz = conf.getClass("hbase.fs.wrapper", null);
     if (clazz != null) {
       return (FileSystem)
           clazz.getConstructor(FileSystem.class, Configuration.class).newInstance(base, conf);
     }
   } catch (Exception e) {
     LOG.error("Failed to wrap filesystem: " + e);
   }
   return base;
 }
コード例 #16
0
 public JVMClusterUtil.MasterThread addMaster(Configuration c, final int index)
     throws IOException {
   // Create each master with its own Configuration instance so each has
   // its HConnection instance rather than share (see HBASE_INSTANCES down in
   // the guts of HConnectionManager.
   JVMClusterUtil.MasterThread mt =
       JVMClusterUtil.createMasterThread(
           c,
           (Class<? extends HMaster>) conf.getClass(HConstants.MASTER_IMPL, this.masterClass),
           index);
   this.masterThreads.add(mt);
   return mt;
 }
コード例 #17
0
 public static Rasterizer getRasterizer(Configuration job) {
   try {
     Class<? extends Rasterizer> rasterizerClass =
         job.getClass(RasterizerClass, null, Rasterizer.class);
     if (rasterizerClass == null) throw new RuntimeException("Rasterizer class not set in job");
     Rasterizer rasterizer = rasterizerClass.newInstance();
     rasterizer.configure(job);
     return rasterizer;
   } catch (InstantiationException e) {
     throw new RuntimeException("Error creating rasterizer", e);
   } catch (IllegalAccessException e) {
     throw new RuntimeException("Error constructing rasterizer", e);
   }
 }
コード例 #18
0
ファイル: Codec.java プロジェクト: imace/hops
 public ErasureCode createErasureCode(Configuration conf) {
   // Create the scheduler
   Class<?> erasureCode = null;
   try {
     erasureCode =
         conf.getClass(
             ERASURE_CODE_KEY_PREFIX + this.id, conf.getClassByName(this.erasureCodeClass));
   } catch (ClassNotFoundException e) {
     throw new RuntimeException(e);
   }
   ErasureCode code = (ErasureCode) ReflectionUtils.newInstance(erasureCode, conf);
   code.init(this);
   return code;
 }
コード例 #19
0
ファイル: Groups.java プロジェクト: shahidminhas/abc
  public Groups(Configuration conf) {
    impl =
        ReflectionUtils.newInstance(
            conf.getClass(
                CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
                ShellBasedUnixGroupsMapping.class,
                GroupMappingServiceProvider.class),
            conf);

    cacheTimeout =
        conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 5 * 60) * 1000;

    if (LOG.isDebugEnabled())
      LOG.debug(
          "Group mapping impl=" + impl.getClass().getName() + "; cacheTimeout=" + cacheTimeout);
  }
コード例 #20
0
  @Override
  protected void setUp() throws IOException, InterruptedException {

    // Prepare the tests' root dir
    File TEST_ROOT = new File(TEST_ROOT_DIR);
    if (!TEST_ROOT.exists()) {
      TEST_ROOT.mkdirs();
    }

    // Prepare the tests' mapred-local-dir
    ROOT_MAPRED_LOCAL_DIR = new File(TEST_ROOT_DIR, "mapred/local");
    ROOT_MAPRED_LOCAL_DIR.mkdirs();

    String[] localDirs = new String[numLocalDirs];
    for (int i = 0; i < numLocalDirs; i++) {
      File localDir = new File(ROOT_MAPRED_LOCAL_DIR, "0_" + i);
      localDirs[i] = localDir.getPath();
      localDir.mkdir();
    }

    conf = new Configuration();
    conf.setStrings("mapred.local.dir", localDirs);
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
    fs = FileSystem.get(conf);
    Class<? extends TaskController> taskControllerClass =
        conf.getClass(
            "mapred.task.tracker.task-controller",
            DefaultTaskController.class,
            TaskController.class);
    taskController = (TaskController) ReflectionUtils.newInstance(taskControllerClass, conf);

    // setup permissions for mapred local dir
    taskController.setup(localDirAllocator);

    // Create the temporary cache files to be used in the tests.
    firstCacheFile = new Path(TEST_ROOT_DIR, "firstcachefile");
    secondCacheFile = new Path(TEST_ROOT_DIR, "secondcachefile");
    firstCacheFilePublic = new Path(TEST_ROOT_DIR, "firstcachefileOne");
    secondCacheFilePublic = new Path(TEST_ROOT_DIR, "secondcachefileOne");
    createPublicTempFile(firstCacheFilePublic);
    createPublicTempFile(secondCacheFilePublic);
    createPrivateTempFile(firstCacheFile);
    createPrivateTempFile(secondCacheFile);
  }
コード例 #21
0
ファイル: ChildExecutor.java プロジェクト: Brassrat/dragon
  protected void execute(final Configuration conf, final ChildServiceDelegate delegate)
      throws IOException, InterruptedException, ClassNotFoundException {
    Class<?> inputKey = null;
    Class<?> inputValue = null;
    Class<?> outputKey = null;
    Class<?> outputValue = null;
    if (taskType.equals(TaskType.MAP)) {
      inputKey = conf.getClass(DragonJobConfig.JOB_MAP_INPUT_KEY_CLASS, Object.class);
      inputValue = conf.getClass(DragonJobConfig.JOB_MAP_INPUT_VALUE_CLASS, Object.class);
      outputKey = conf.getClass(DragonJobConfig.JOB_MAP_OUTPUT_KEY_CLASS, Object.class);
      outputValue = conf.getClass(DragonJobConfig.JOB_MAP_OUTPUT_VALUE_CLASS, Object.class);

    } else if (taskType.equals(TaskType.REDUCE)) {
      inputKey = conf.getClass(DragonJobConfig.JOB_REDUCE_INPUT_KEY_CLASS, Object.class);
      inputValue = conf.getClass(DragonJobConfig.JOB_REDUCE_INPUT_VALUE_CLASS, Object.class);
      outputKey = conf.getClass(DragonJobConfig.JOB_REDUCE_OUTPUT_KEY_CLASS, Object.class);
      outputValue = conf.getClass(DragonJobConfig.JOB_REDUCE_OUTPUT_VALUE_CLASS, Object.class);
    }
    TaskReporter reporter = startReporter(delegate);
    execute(conf, inputKey, inputValue, outputKey, outputValue, reporter);
  }
コード例 #22
0
 public void setConf(Configuration conf) {
   if (!(conf instanceof JobConf)) {
     mLog.warn("Expected jobconf in setConf, got " + conf.getClass().getName());
     return;
   }
   JobConf jconf = (JobConf) conf;
   try {
     mAsc = (boolean[]) ObjectSerializer.deserialize(jconf.get("pig.sortOrder"));
   } catch (IOException ioe) {
     mLog.error("Unable to deserialize pig.sortOrder " + ioe.getMessage());
     throw new RuntimeException(ioe);
   }
   if (mAsc == null) {
     mAsc = new boolean[1];
     mAsc[0] = true;
   }
   // If there's only one entry in mAsc, it means it's for the whole
   // tuple. So we can't be looking for each column.
   mWholeTuple = (mAsc.length == 1);
 }
コード例 #23
0
  @Override
  public Class<?> getHadoopWrapperClassNameForFileSystem(String scheme) {
    Configuration hadoopConf = getHadoopConfiguration();
    Class<? extends org.apache.hadoop.fs.FileSystem> clazz;
    // We can activate this block once we drop Hadoop1 support (only hd2 has the
    // getFileSystemClass-method)
    //		try {
    //			clazz = org.apache.hadoop.fs.FileSystem.getFileSystemClass(scheme, hadoopConf);
    //		} catch (IOException e) {
    //			LOG.info("Flink could not load the Hadoop File system implementation for scheme "+scheme);
    //			return null;
    //		}
    clazz =
        hadoopConf.getClass("fs." + scheme + ".impl", null, org.apache.hadoop.fs.FileSystem.class);

    if (clazz != null && LOG.isDebugEnabled()) {
      LOG.debug("Flink supports " + scheme + " with the Hadoop file system wrapper, impl " + clazz);
    }
    return clazz;
  }
コード例 #24
0
ファイル: HFileSystem.java プロジェクト: Reidddddd/hbase
  /**
   * Returns a brand new instance of the FileSystem. It does not use the FileSystem.Cache. In newer
   * versions of HDFS, we can directly invoke FileSystem.newInstance(Configuration).
   *
   * @param conf Configuration
   * @return A new instance of the filesystem
   */
  private static FileSystem newInstanceFileSystem(Configuration conf) throws IOException {
    URI uri = FileSystem.getDefaultUri(conf);
    FileSystem fs = null;
    Class<?> clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null);
    if (clazz != null) {
      // This will be true for Hadoop 1.0, or 0.20.
      fs = (FileSystem) org.apache.hadoop.util.ReflectionUtils.newInstance(clazz, conf);
      fs.initialize(uri, conf);
    } else {
      // For Hadoop 2.0, we have to go through FileSystem for the filesystem
      // implementation to be loaded by the service loader in case it has not
      // been loaded yet.
      Configuration clone = new Configuration(conf);
      clone.setBoolean("fs." + uri.getScheme() + ".impl.disable.cache", true);
      fs = FileSystem.get(uri, clone);
    }
    if (fs == null) {
      throw new IOException("No FileSystem for scheme: " + uri.getScheme());
    }

    return fs;
  }
コード例 #25
0
  @Override
  public void setConf(Configuration conf) {
    this.conf = conf;
    // set scheduler
    Class<? extends ResourceScheduler> klass =
        conf.getClass(SLSConfiguration.RM_SCHEDULER, null, ResourceScheduler.class);

    scheduler = ReflectionUtils.newInstance(klass, conf);
    // start metrics
    metricsON = conf.getBoolean(SLSConfiguration.METRICS_SWITCH, true);
    if (metricsON) {
      try {
        initMetrics();
      } catch (Exception e) {
        e.printStackTrace();
      }
    }

    ShutdownHookManager.get()
        .addShutdownHook(
            new Runnable() {
              @Override
              public void run() {
                try {
                  if (metricsLogBW != null) {
                    metricsLogBW.write("]");
                    metricsLogBW.close();
                  }
                  if (web != null) {
                    web.stop();
                  }
                  tearDown();
                } catch (Exception e) {
                  e.printStackTrace();
                }
              }
            },
            SHUTDOWN_HOOK_PRIORITY);
  }
コード例 #26
0
 public static Class<?> getClass(Configuration conf, String name) {
   return conf.getClass(name, Object.class);
 }
コード例 #27
0
  /**
   * Creates a new DistributedFileSystem object to access HDFS
   *
   * @throws IOException throw if the required HDFS classes cannot be instantiated
   */
  public DistributedFileSystem() throws IOException {

    // Create new Hadoop configuration object
    this.conf = getHadoopConfiguration();

    Class<? extends org.apache.hadoop.fs.FileSystem> fsClass = null;

    // try to get the FileSystem implementation class Hadoop 2.0.0 style
    {
      LOG.debug("Trying to load HDFS class Hadoop 2.x style.");

      Object fsHandle = null;
      try {
        Method newApi =
            org.apache.hadoop.fs.FileSystem.class.getMethod(
                "getFileSystemClass", String.class, org.apache.hadoop.conf.Configuration.class);
        fsHandle = newApi.invoke(null, "hdfs", conf);
      } catch (Exception e) {
        // if we can't find the FileSystem class using the new API,
        // clazz will still be null, we assume we're running on an older Hadoop version
      }

      if (fsHandle != null) {
        if (fsHandle instanceof Class
            && org.apache.hadoop.fs.FileSystem.class.isAssignableFrom((Class<?>) fsHandle)) {
          fsClass = ((Class<?>) fsHandle).asSubclass(org.apache.hadoop.fs.FileSystem.class);

          if (LOG.isDebugEnabled()) {
            LOG.debug("Loaded '" + fsClass.getName() + "' as HDFS class.");
          }
        } else {
          LOG.debug(
              "Unexpected return type from 'org.apache.hadoop.fs.FileSystem.getFileSystemClass(String, Configuration)'.");
          throw new RuntimeException(
              "The value returned from org.apache.hadoop.fs.FileSystem.getFileSystemClass(String, Configuration) is not a valid subclass of org.apache.hadoop.fs.FileSystem.");
        }
      }
    }

    // fall back to an older Hadoop version
    if (fsClass == null) {
      // first of all, check for a user-defined hdfs class
      if (LOG.isDebugEnabled()) {
        LOG.debug(
            "Falling back to loading HDFS class old Hadoop style. Looking for HDFS class configuration entry '"
                + HDFS_IMPLEMENTATION_KEY
                + "'.");
      }

      Class<?> classFromConfig = conf.getClass(HDFS_IMPLEMENTATION_KEY, null);

      if (classFromConfig != null) {
        if (org.apache.hadoop.fs.FileSystem.class.isAssignableFrom(classFromConfig)) {
          fsClass = classFromConfig.asSubclass(org.apache.hadoop.fs.FileSystem.class);

          if (LOG.isDebugEnabled()) {
            LOG.debug(
                "Loaded HDFS class '" + fsClass.getName() + "' as specified in configuration.");
          }
        } else {
          if (LOG.isDebugEnabled()) {
            LOG.debug("HDFS class specified by " + HDFS_IMPLEMENTATION_KEY + " is of wrong type.");
          }

          throw new IOException(
              "HDFS class specified by "
                  + HDFS_IMPLEMENTATION_KEY
                  + " cannot be cast to a FileSystem type.");
        }
      } else {
        // load the default HDFS class
        if (LOG.isDebugEnabled()) {
          LOG.debug("Trying to load default HDFS implementation " + DEFAULT_HDFS_CLASS);
        }

        try {
          Class<?> reflectedClass = Class.forName(DEFAULT_HDFS_CLASS);
          if (org.apache.hadoop.fs.FileSystem.class.isAssignableFrom(reflectedClass)) {
            fsClass = reflectedClass.asSubclass(org.apache.hadoop.fs.FileSystem.class);
          } else {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Default HDFS class is of wrong type.");
            }

            throw new IOException(
                "The default HDFS class '"
                    + DEFAULT_HDFS_CLASS
                    + "' cannot be cast to a FileSystem type.");
          }
        } catch (ClassNotFoundException e) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Default HDFS class cannot be loaded.");
          }

          throw new IOException(
              "No HDFS class has been configured and the default class '"
                  + DEFAULT_HDFS_CLASS
                  + "' cannot be loaded.");
        }
      }
    }

    this.fs = instantiateFileSystem(fsClass);
  }
コード例 #28
0
 @SuppressWarnings("unchecked")
 private static Class<? extends HMaster> getMasterImplementation(final Configuration conf) {
   return (Class<? extends HMaster>) conf.getClass(HConstants.MASTER_IMPL, HMaster.class);
 }
コード例 #29
0
 @SuppressWarnings("unchecked")
 private static Class<? extends HRegionServer> getRegionServerImplementation(
     final Configuration conf) {
   return (Class<? extends HRegionServer>)
       conf.getClass(HConstants.REGION_SERVER_IMPL, HRegionServer.class);
 }
コード例 #30
0
 public static Class<? extends MongoSplitter> getSplitterClass(final Configuration conf) {
   return conf.getClass(MONGO_SPLITTER_CLASS, null, MongoSplitter.class);
 }