コード例 #1
0
 @Test
 public void testFsUriSetProperly() throws Exception {
   HMaster master = UTIL.getMiniHBaseCluster().getMaster();
   MasterFileSystem fs = master.getMasterFileSystem();
   Path masterRoot = FSUtils.getRootDir(fs.conf);
   Path rootDir = FSUtils.getRootDir(fs.getFileSystem().getConf());
   // make sure the fs and the found root dir have the same scheme
   LOG.debug("from fs uri:" + FileSystem.getDefaultUri(fs.getFileSystem().getConf()));
   LOG.debug("from configuration uri:" + FileSystem.getDefaultUri(fs.conf));
   // make sure the set uri matches by forcing it.
   assertEquals(masterRoot, rootDir);
 }
コード例 #2
0
ファイル: TestCopyFiles.java プロジェクト: Jude7/bc-hadoop2.0
 /** copy files from dfs file system to local file system */
 public void testCopyFromDfsToLocal() throws Exception {
   MiniDFSCluster cluster = null;
   try {
     Configuration conf = new Configuration();
     final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
     cluster = new MiniDFSCluster(conf, 1, true, null);
     final FileSystem hdfs = cluster.getFileSystem();
     final String namenode = FileSystem.getDefaultUri(conf).toString();
     if (namenode.startsWith("hdfs://")) {
       MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
       ToolRunner.run(
           new DistCpV1(conf),
           new String[] {
             "-log", "/logs", namenode + "/srcdat", "file:///" + TEST_ROOT_DIR + "/destdat"
           });
       assertTrue(
           "Source and destination directories do not match.",
           checkFiles(localfs, TEST_ROOT_DIR + "/destdat", files));
       assertTrue("Log directory does not exist.", hdfs.exists(new Path("/logs")));
       deldir(localfs, TEST_ROOT_DIR + "/destdat");
       deldir(hdfs, "/logs");
       deldir(hdfs, "/srcdat");
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
コード例 #3
0
ファイル: TestCopyFiles.java プロジェクト: Jude7/bc-hadoop2.0
  /** copy empty directory on dfs file system */
  public void testEmptyDir() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 2, true, null);
      final FileSystem hdfs = cluster.getFileSystem();
      namenode = FileSystem.getDefaultUri(conf).toString();
      if (namenode.startsWith("hdfs://")) {

        FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
        fs.mkdirs(new Path("/empty"));

        ToolRunner.run(
            new DistCpV1(conf),
            new String[] {"-log", namenode + "/logs", namenode + "/empty", namenode + "/dest"});
        fs = FileSystem.get(URI.create(namenode + "/destdat"), conf);
        assertTrue(
            "Destination directory does not exist.", fs.exists(new Path(namenode + "/dest")));
        deldir(hdfs, "/dest");
        deldir(hdfs, "/empty");
        deldir(hdfs, "/logs");
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
コード例 #4
0
  /** Verify data-node port usage. */
  public void testDataNodePorts() throws Exception {
    NameNode nn = null;
    try {
      nn = startNameNode();

      // start data-node on the same port as name-node
      Configuration conf2 = new Configuration(config);
      conf2.set("dfs.data.dir", new File(hdfsDir, "data").getPath());
      conf2.set("dfs.datanode.address", FileSystem.getDefaultUri(config).getAuthority());
      conf2.set("dfs.datanode.http.address", NAME_NODE_HTTP_HOST + "0");
      boolean started = canStartDataNode(conf2);
      assertFalse(started); // should fail

      // bind http server to the same port as name-node
      conf2.set("dfs.datanode.address", NAME_NODE_HOST + "0");
      conf2.set("dfs.datanode.http.address", config.get("dfs.http.address"));
      started = canStartDataNode(conf2);
      assertFalse(started); // should fail

      // both ports are different from the name-node ones
      conf2.set("dfs.datanode.address", NAME_NODE_HOST + "0");
      conf2.set("dfs.datanode.http.address", NAME_NODE_HTTP_HOST + "0");
      conf2.set("dfs.datanode.ipc.address", NAME_NODE_HOST + "0");
      started = canStartDataNode(conf2);
      assertTrue(started); // should start now
    } finally {
      stopNameNode(nn);
    }
  }
コード例 #5
0
ファイル: HdfsTable.java プロジェクト: nongli/Impala
  static {
    try {
      // call newInstance() instead of using a shared instance from a cache
      // to avoid accidentally having it closed by someone else
      FileSystem fs = FileSystem.newInstance(FileSystem.getDefaultUri(CONF), CONF);
      if (!(fs instanceof DistributedFileSystem)) {
        String error =
            "Cannot connect to HDFS. "
                + CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY
                + "("
                + CONF.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)
                + ")"
                + " might be set incorrectly";
        throw new RuntimeException(error);
      }
      DFS = (DistributedFileSystem) fs;
    } catch (IOException e) {
      throw new RuntimeException("couldn't retrieve FileSystem:\n" + e.getMessage(), e);
    }

    SUPPORTS_VOLUME_ID =
        CONF.getBoolean(
            DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
            DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
  }
コード例 #6
0
ファイル: TestCopyFiles.java プロジェクト: Jude7/bc-hadoop2.0
 /** tests basedir option copying files from dfs file system to dfs file system */
 public void testBasedir() throws Exception {
   String namenode = null;
   MiniDFSCluster cluster = null;
   try {
     Configuration conf = new Configuration();
     cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem hdfs = cluster.getFileSystem();
     namenode = FileSystem.getDefaultUri(conf).toString();
     if (namenode.startsWith("hdfs://")) {
       MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat");
       ToolRunner.run(
           new DistCpV1(conf),
           new String[] {
             "-basedir", "/basedir", namenode + "/basedir/middle/srcdat", namenode + "/destdat"
           });
       assertTrue(
           "Source and destination directories do not match.",
           checkFiles(hdfs, "/destdat/middle/srcdat", files));
       deldir(hdfs, "/destdat");
       deldir(hdfs, "/basedir");
       deldir(hdfs, "/logs");
     }
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
コード例 #7
0
  @Override
  protected synchronized void startInternal() throws Exception {
    // create filesystem only now, as part of service-start. By this time, RM is
    // authenticated with kerberos so we are good to create a file-system
    // handle.
    fsConf = new Configuration(getConfig());
    fsConf.setBoolean("dfs.client.retry.policy.enabled", true);
    String retryPolicy =
        fsConf.get(
            YarnConfiguration.FS_RM_STATE_STORE_RETRY_POLICY_SPEC,
            YarnConfiguration.DEFAULT_FS_RM_STATE_STORE_RETRY_POLICY_SPEC);
    fsConf.set("dfs.client.retry.policy.spec", retryPolicy);

    String scheme = fsWorkingPath.toUri().getScheme();
    if (scheme == null) {
      scheme = FileSystem.getDefaultUri(fsConf).getScheme();
    }
    if (scheme != null) {
      String disableCacheName = String.format("fs.%s.impl.disable.cache", scheme);
      fsConf.setBoolean(disableCacheName, true);
    }

    fs = fsWorkingPath.getFileSystem(fsConf);
    mkdirsWithRetries(rmDTSecretManagerRoot);
    mkdirsWithRetries(rmAppRoot);
    mkdirsWithRetries(amrmTokenSecretManagerRoot);
    mkdirsWithRetries(reservationRoot);
  }
コード例 #8
0
 /** Returns the Jetty server that the Namenode is listening on. */
 private String getInfoServer() throws IOException {
   URI fsName = FileSystem.getDefaultUri(conf);
   if (!"hdfs".equals(fsName.getScheme())) {
     throw new IOException("This is not a DFS");
   }
   return NetUtils.getServerAddress(
       conf, "dfs.info.bindAddress", "dfs.info.port", "dfs.http.address");
 }
コード例 #9
0
  @BeforeClass
  public static void setUpClass() throws Exception {
    conf = new Configuration();

    // create a fake FileSystem (MyFS) and assosiate it
    // with "hdfs" schema.
    URI uri = new URI(DelegationTokenRenewer.SCHEME + "://localhost:0");
    System.out.println("scheme is : " + uri.getScheme());
    conf.setClass("fs." + uri.getScheme() + ".impl", MyFS.class, DistributedFileSystem.class);
    FileSystem.setDefaultUri(conf, uri);
    LOG.info("filesystem uri = " + FileSystem.getDefaultUri(conf).toString());
  }
コード例 #10
0
ファイル: SecondaryNameNode.java プロジェクト: hmilxin/hadoop
 /** Returns the Jetty server that the Namenode is listening on. */
 private String getInfoServer() throws IOException {
   URI fsName = FileSystem.getDefaultUri(conf);
   if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
     throw new IOException("This is not a DFS");
   }
   String configuredAddress = conf.get("dfs.http.address", "0.0.0.0:50070");
   InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
   if (sockAddr.getAddress().isAnyLocalAddress()) {
     return fsName.getHost() + ":" + sockAddr.getPort();
   } else {
     return configuredAddress;
   }
 }
コード例 #11
0
ファイル: Namespaces.java プロジェクト: RedShifted/oryx
 private Namespaces() {
   if (ConfigUtils.getDefaultConfig().getBoolean("model.local")) {
     prefix = "file:";
   } else {
     URI defaultURI = FileSystem.getDefaultUri(new OryxConfiguration());
     String host = defaultURI.getHost();
     int port = defaultURI.getPort();
     if (port > 0) {
       prefix = "hdfs://" + host + ':' + port;
     } else {
       prefix = "hdfs://" + host;
     }
   }
 }
コード例 #12
0
ファイル: HFileSystem.java プロジェクト: Reidddddd/hbase
  /**
   * Returns a brand new instance of the FileSystem. It does not use the FileSystem.Cache. In newer
   * versions of HDFS, we can directly invoke FileSystem.newInstance(Configuration).
   *
   * @param conf Configuration
   * @return A new instance of the filesystem
   */
  private static FileSystem newInstanceFileSystem(Configuration conf) throws IOException {
    URI uri = FileSystem.getDefaultUri(conf);
    FileSystem fs = null;
    Class<?> clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null);
    if (clazz != null) {
      // This will be true for Hadoop 1.0, or 0.20.
      fs = (FileSystem) org.apache.hadoop.util.ReflectionUtils.newInstance(clazz, conf);
      fs.initialize(uri, conf);
    } else {
      // For Hadoop 2.0, we have to go through FileSystem for the filesystem
      // implementation to be loaded by the service loader in case it has not
      // been loaded yet.
      Configuration clone = new Configuration(conf);
      clone.setBoolean("fs." + uri.getScheme() + ".impl.disable.cache", true);
      fs = FileSystem.get(uri, clone);
    }
    if (fs == null) {
      throw new IOException("No FileSystem for scheme: " + uri.getScheme());
    }

    return fs;
  }
コード例 #13
0
ファイル: TestCopyFiles.java プロジェクト: Jude7/bc-hadoop2.0
  public void testLimits() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster(conf, 2, true, null);
      final String nnUri = FileSystem.getDefaultUri(conf).toString();
      final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
      final DistCpV1 distcp = new DistCpV1(conf);
      final FsShell shell = new FsShell(conf);

      final String srcrootdir = "/src_root";
      final Path srcrootpath = new Path(srcrootdir);
      final String dstrootdir = "/dst_root";
      final Path dstrootpath = new Path(dstrootdir);

      { // test -filelimit
        MyFile[] files = createFiles(URI.create(nnUri), srcrootdir);
        int filelimit = files.length / 2;
        System.out.println("filelimit=" + filelimit);

        ToolRunner.run(
            distcp,
            new String[] {"-filelimit", "" + filelimit, nnUri + srcrootdir, nnUri + dstrootdir});
        String results = execCmd(shell, "-lsr", dstrootdir);
        results = removePrefix(results, dstrootdir);
        System.out.println("results=" + results);

        FileStatus[] dststat = getFileStatus(fs, dstrootdir, files, true);
        assertEquals(filelimit, dststat.length);
        deldir(fs, dstrootdir);
        deldir(fs, srcrootdir);
      }

      { // test -sizelimit
        createFiles(URI.create(nnUri), srcrootdir);
        long sizelimit = fs.getContentSummary(srcrootpath).getLength() / 2;
        System.out.println("sizelimit=" + sizelimit);

        ToolRunner.run(
            distcp,
            new String[] {"-sizelimit", "" + sizelimit, nnUri + srcrootdir, nnUri + dstrootdir});

        ContentSummary summary = fs.getContentSummary(dstrootpath);
        System.out.println("summary=" + summary);
        assertTrue(summary.getLength() <= sizelimit);
        deldir(fs, dstrootdir);
        deldir(fs, srcrootdir);
      }

      { // test update
        final MyFile[] srcs = createFiles(URI.create(nnUri), srcrootdir);
        final long totalsize = fs.getContentSummary(srcrootpath).getLength();
        System.out.println("src.length=" + srcs.length);
        System.out.println("totalsize =" + totalsize);
        fs.mkdirs(dstrootpath);
        final int parts = RAN.nextInt(NFILES / 3 - 1) + 2;
        final int filelimit = srcs.length / parts;
        final long sizelimit = totalsize / parts;
        System.out.println("filelimit=" + filelimit);
        System.out.println("sizelimit=" + sizelimit);
        System.out.println("parts    =" + parts);
        final String[] args = {
          "-filelimit",
          "" + filelimit,
          "-sizelimit",
          "" + sizelimit,
          "-update",
          nnUri + srcrootdir,
          nnUri + dstrootdir
        };

        int dstfilecount = 0;
        long dstsize = 0;
        for (int i = 0; i <= parts; i++) {
          ToolRunner.run(distcp, args);

          FileStatus[] dststat = getFileStatus(fs, dstrootdir, srcs, true);
          System.out.println(i + ") dststat.length=" + dststat.length);
          assertTrue(dststat.length - dstfilecount <= filelimit);
          ContentSummary summary = fs.getContentSummary(dstrootpath);
          System.out.println(i + ") summary.getLength()=" + summary.getLength());
          assertTrue(summary.getLength() - dstsize <= sizelimit);
          assertTrue(checkFiles(fs, dstrootdir, srcs, true));
          dstfilecount = dststat.length;
          dstsize = summary.getLength();
        }

        deldir(fs, dstrootdir);
        deldir(fs, srcrootdir);
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
コード例 #14
0
ファイル: TestCopyFiles.java プロジェクト: Jude7/bc-hadoop2.0
  /** test -delete */
  public void testDelete() throws Exception {
    final Configuration conf = new Configuration();
    conf.setInt("fs.trash.interval", 60);
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster(conf, 2, true, null);
      final URI nnURI = FileSystem.getDefaultUri(conf);
      final String nnUri = nnURI.toString();
      final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);

      final DistCpV1 distcp = new DistCpV1(conf);
      final FsShell shell = new FsShell(conf);

      final String srcrootdir = "/src_root";
      final String dstrootdir = "/dst_root";

      {
        // create source files
        createFiles(nnURI, srcrootdir);
        String srcresults = execCmd(shell, "-lsr", srcrootdir);
        srcresults = removePrefix(srcresults, srcrootdir);
        System.out.println("srcresults=" + srcresults);

        // create some files in dst
        createFiles(nnURI, dstrootdir);
        System.out.println("dstrootdir=" + dstrootdir);
        shell.run(new String[] {"-lsr", dstrootdir});

        // run distcp
        ToolRunner.run(
            distcp,
            new String[] {
              "-delete", "-update", "-log", "/log", nnUri + srcrootdir, nnUri + dstrootdir
            });

        // make sure src and dst contains the same files
        String dstresults = execCmd(shell, "-lsr", dstrootdir);
        dstresults = removePrefix(dstresults, dstrootdir);
        System.out.println("first dstresults=" + dstresults);
        assertEquals(srcresults, dstresults);

        // create additional file in dst
        create(fs, new Path(dstrootdir, "foo"));
        create(fs, new Path(dstrootdir, "foobar"));

        // run distcp again
        ToolRunner.run(
            distcp,
            new String[] {
              "-delete", "-update", "-log", "/log2", nnUri + srcrootdir, nnUri + dstrootdir
            });

        // make sure src and dst contains the same files
        dstresults = execCmd(shell, "-lsr", dstrootdir);
        dstresults = removePrefix(dstresults, dstrootdir);
        System.out.println("second dstresults=" + dstresults);
        assertEquals(srcresults, dstresults);
        // verify that files removed in -delete were moved to the trash
        // regrettably, this test will break if Trash changes incompatibly
        assertTrue(
            fs.exists(new Path(fs.getHomeDirectory(), ".Trash/Current" + dstrootdir + "/foo")));
        assertTrue(
            fs.exists(new Path(fs.getHomeDirectory(), ".Trash/Current" + dstrootdir + "/foobar")));

        // cleanup
        deldir(fs, dstrootdir);
        deldir(fs, srcrootdir);
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
コード例 #15
0
  @Override
  public void loadFileSystem(
      String host,
      String port,
      String username,
      boolean isHA,
      boolean isMapR,
      List<HdfsPair> parameters,
      String connectionName,
      String chorusUsername) {
    loadHadoopClassLoader();
    Configuration config = new Configuration();

    config.set("fs.defaultFS", buildHdfsPath(host, port, isHA));
    config.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

    if (config.get("hadoop.security.authentication", "simple").equalsIgnoreCase("simple")) {
      config.set("ipc.client.fallback-to-simple-auth-allowed", "true");
    }

    if (parameters != null && parameters.size() > 0) {
      for (HdfsPair pair : parameters) {
        config.set(pair.getKey(), pair.getValue());
      }
    }

    UserGroupInformation.setConfiguration(config);

    try {
      if (isKerberos(config)) {
        SecurityInfo securityInfo = new AnnotatedSecurityInfo();
        SecurityUtil.setSecurityInfoProviders(securityInfo);
        UserGroupInformation ugi =
            HdfsSecurityUtil.getCachedUserGroupInfo(
                connectionName, host, config.get(HdfsSecurityUtil.ALPINE_PRINCIPAL));
        if (ugi == null) {
          ugi =
              HdfsSecurityUtil.kerberosInitForHDFS(
                  config, host, port, connectionName, isHA, isMapR);
        }
        UserGroupInformation proxyUGI =
            HdfsSecurityUtil.createProxyUser(
                (chorusUsername == null || chorusUsername.isEmpty() ? username : chorusUsername),
                ugi);
        fileSystem =
            HdfsSecurityUtil.getHadoopFileSystem(
                config, proxyUGI, host, port, connectionName, isHA, isMapR);
        if (!loadedSuccessfully()) {
          if (checkForExpiredTicket()) {
            ugi =
                HdfsSecurityUtil.kerberosInitForHDFS(
                    config, host, port, connectionName, isHA, isMapR);
            proxyUGI =
                HdfsSecurityUtil.createProxyUser(
                    (chorusUsername == null || chorusUsername.isEmpty()
                        ? username
                        : chorusUsername),
                    ugi);
            fileSystem =
                HdfsSecurityUtil.getHadoopFileSystem(
                    config, proxyUGI, host, port, connectionName, isHA, isMapR);
          }
        }
      } else {
        fileSystem = FileSystem.get(FileSystem.getDefaultUri(config), config, username);
      }

    } catch (Exception e) {
      System.err.println("V3 plugin failed FileSystem.get");
      System.err.println(e.getMessage());
      e.printStackTrace(System.err);
    } finally {
      restoreOriginalClassLoader();
    }
  }
コード例 #16
0
  public static HiveServer create(
      Map<String, String> properties,
      File baseDir,
      File confDir,
      File logDir,
      FileSystem fileSystem)
      throws Exception {

    if (!properties.containsKey(WAREHOUSE_DIR)) {
      LOGGER.info("fileSystem " + fileSystem.getClass().getSimpleName());
      if (fileSystem instanceof DistributedFileSystem) {
        String dfsUri = FileSystem.getDefaultUri(fileSystem.getConf()).toString();
        LOGGER.info("dfsUri " + dfsUri);
        properties.put(WAREHOUSE_DIR, dfsUri + "/data");
        fileSystem.mkdirs(new Path("/data/"), new FsPermission((short) 0777));
      } else {
        properties.put(WAREHOUSE_DIR, new File(baseDir, "warehouse").getPath());
        fileSystem.mkdirs(new Path("/", "warehouse"), new FsPermission((short) 0777));
      }
    }
    LOGGER.info("Setting an readable path to hive.exec.scratchdir");
    properties.put("hive.exec.scratchdir", new File(baseDir, "scratchdir").getPath());

    if (!properties.containsKey(METASTORE_CONNECTION_URL)) {
      properties.put(
          METASTORE_CONNECTION_URL,
          String.format(
              "jdbc:derby:;databaseName=%s;create=true", new File(baseDir, "metastore").getPath()));
    }
    if (!properties.containsKey(HS2_PORT)) {
      properties.put(HS2_PORT, String.valueOf(findPort()));
    }
    if (!properties.containsKey(SUPPORT_CONCURRENCY)) {
      properties.put(SUPPORT_CONCURRENCY, "false");
    }
    if (!properties.containsKey(HADOOPBIN)) {
      properties.put(HADOOPBIN, "./target/test-classes/hadoop");
    }

    // Modify the test resource to have executable permission
    java.nio.file.Path hadoopPath =
        FileSystems.getDefault().getPath("target/test-classes", "hadoop");
    if (hadoopPath != null) {
      hadoopPath.toFile().setExecutable(true);
    }

    properties.put(METASTORE_SETUGI, "true");
    properties.put(METASTORE_CLIENT_TIMEOUT, "100");
    properties.put(ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, "true");

    properties.put(ConfVars.HIVESTATSAUTOGATHER.varname, "false");
    properties.put(ConfVars.HIVE_STATS_COLLECT_SCANCOLS.varname, "true");
    String hadoopBinPath = properties.get(HADOOPBIN);
    Assert.assertNotNull(hadoopBinPath, "Hadoop Bin");
    File hadoopBin = new File(hadoopBinPath);
    if (!hadoopBin.isFile()) {
      Assert.fail(
          "Path to hadoop bin "
              + hadoopBin.getPath()
              + " is invalid. "
              + "Perhaps you missed the download-hadoop profile.");
    }

    /*
     * This hack, setting the hiveSiteURL field removes a previous hack involving
     * setting of system properties for each property. Although both are hacks,
     * I prefer this hack because once the system properties are set they can
     * affect later tests unless those tests clear them. This hack allows for
     * a clean switch to a new set of defaults when a new HiveConf object is created.
     */
    Reflection.staticField("hiveSiteURL").ofType(URL.class).in(HiveConf.class).set(null);
    HiveConf hiveConf = new HiveConf();
    for (Map.Entry<String, String> entry : properties.entrySet()) {
      LOGGER.info(entry.getKey() + " => " + entry.getValue());
      hiveConf.set(entry.getKey(), entry.getValue());
    }
    File hiveSite = new File(confDir, "hive-site.xml");

    hiveConf.set(HIVESERVER2_IMPERSONATION, "false");
    OutputStream out = new FileOutputStream(hiveSite);
    hiveConf.writeXml(out);
    out.close();

    Reflection.staticField("hiveSiteURL")
        .ofType(URL.class)
        .in(HiveConf.class)
        .set(hiveSite.toURI().toURL());

    LOGGER.info("Creating InternalHiveServer");
    return new InternalHiveServer(hiveConf);
  }
  @BeforeClass
  public static void setupTestStaticConfiguration() throws Exception {
    LOGGER.info("AbstractTestWithStaticConfiguration setupTestStaticConfiguration");
    properties = Maps.newHashMap();
    if (!policyOnHdfs) {
      policyOnHdfs = new Boolean(System.getProperty("sentry.e2etest.policyonhdfs", "false"));
    }
    if (testServerType != null) {
      properties.put("sentry.e2etest.hiveServer2Type", testServerType);
    }
    baseDir = Files.createTempDir();
    LOGGER.info("BaseDir = " + baseDir);
    logDir = assertCreateDir(new File(baseDir, "log"));
    confDir = assertCreateDir(new File(baseDir, "etc"));
    dataDir = assertCreateDir(new File(baseDir, "data"));
    policyFileLocation = new File(confDir, HiveServerFactory.AUTHZ_PROVIDER_FILENAME);

    String dfsType = System.getProperty(DFSFactory.FS_TYPE);
    dfs = DFSFactory.create(dfsType, baseDir, testServerType);
    fileSystem = dfs.getFileSystem();

    PolicyFile policyFile =
        PolicyFile.setAdminOnServer1(ADMIN1)
            .setUserGroupMapping(StaticUserGroup.getStaticMapping());
    policyFile.write(policyFileLocation);

    String policyURI;
    if (policyOnHdfs) {
      String dfsUri = FileSystem.getDefaultUri(fileSystem.getConf()).toString();
      LOGGER.error("dfsUri " + dfsUri);
      policyURI =
          dfsUri + System.getProperty("sentry.e2etest.hive.policy.location", "/user/hive/sentry");
      policyURI += "/" + HiveServerFactory.AUTHZ_PROVIDER_FILENAME;
    } else {
      policyURI = policyFileLocation.getPath();
    }

    boolean startSentry = new Boolean(System.getProperty(EXTERNAL_SENTRY_SERVICE, "false"));
    if ("true".equalsIgnoreCase(System.getProperty(ENABLE_SENTRY_HA, "false"))) {
      enableSentryHA = true;
    }
    if (useSentryService && (!startSentry)) {
      setupSentryService();
    }

    if (enableHiveConcurrency) {
      properties.put(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "true");
      properties.put(
          HiveConf.ConfVars.HIVE_TXN_MANAGER.varname,
          "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager");
      properties.put(
          HiveConf.ConfVars.HIVE_LOCK_MANAGER.varname,
          "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager");
    }

    hiveServer = create(properties, baseDir, confDir, logDir, policyURI, fileSystem);
    hiveServer.start();
    createContext();

    // Create tmp as scratch dir if it doesn't exist
    Path tmpPath = new Path("/tmp");
    if (!fileSystem.exists(tmpPath)) {
      fileSystem.mkdirs(tmpPath, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    }
  }
コード例 #18
0
  public static void main(String[] args) {
    int numDataNodes = 0;
    int numRacks = 0;
    boolean inject = false;
    long startingBlockId = 1;
    int numBlocksPerDNtoInject = 0;
    int replication = 1;

    Configuration conf = new HdfsConfiguration();

    for (int i = 0; i < args.length; i++) { // parse command line
      if (args[i].equals("-n")) {
        if (++i >= args.length || args[i].startsWith("-")) {
          printUsageExit("missing number of nodes");
        }
        numDataNodes = Integer.parseInt(args[i]);
      } else if (args[i].equals("-racks")) {
        if (++i >= args.length || args[i].startsWith("-")) {
          printUsageExit("Missing number of racks");
        }
        numRacks = Integer.parseInt(args[i]);
      } else if (args[i].equals("-r")) {
        if (++i >= args.length || args[i].startsWith("-")) {
          printUsageExit("Missing replicaiton factor");
        }
        replication = Integer.parseInt(args[i]);
      } else if (args[i].equals("-d")) {
        if (++i >= args.length || args[i].startsWith("-")) {
          printUsageExit("Missing datanode dirs parameter");
        }
        dataNodeDirs = args[i];
      } else if (args[i].equals("-simulated")) {
        conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
      } else if (args[i].equals("-inject")) {
        if (!conf.getBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, false)) {
          System.out.print("-inject is valid only for simulated");
          printUsageExit();
        }
        inject = true;
        if (++i >= args.length || args[i].startsWith("-")) {
          printUsageExit("Missing starting block and number of blocks per DN to inject");
        }
        startingBlockId = Integer.parseInt(args[i]);
        if (++i >= args.length || args[i].startsWith("-")) {
          printUsageExit("Missing number of blocks to inject");
        }
        numBlocksPerDNtoInject = Integer.parseInt(args[i]);
      } else {
        printUsageExit();
      }
    }
    if (numDataNodes <= 0 || replication <= 0) {
      printUsageExit("numDataNodes and replication have to be greater than zero");
    }
    if (replication > numDataNodes) {
      printUsageExit("Replication must be less than or equal to numDataNodes");
    }
    String nameNodeAdr = FileSystem.getDefaultUri(conf).getAuthority();
    if (nameNodeAdr == null) {
      System.out.println("No name node address and port in config");
      System.exit(-1);
    }
    boolean simulated = conf.getBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, false);
    System.out.println(
        "Starting "
            + numDataNodes
            + (simulated ? " Simulated " : " ")
            + " Data Nodes that will connect to Name Node at "
            + nameNodeAdr);

    System.setProperty("test.build.data", dataNodeDirs);

    MiniDFSCluster mc = new MiniDFSCluster();
    try {
      mc.formatDataNodeDirs();
    } catch (IOException e) {
      System.out.println("Error formating data node dirs:" + e);
    }

    String[] rack4DataNode = null;
    if (numRacks > 0) {
      System.out.println("Using " + numRacks + " racks: ");
      String rackPrefix = getUniqueRackPrefix();

      rack4DataNode = new String[numDataNodes];
      for (int i = 0; i < numDataNodes; ++i) {
        // rack4DataNode[i] = racks[i%numRacks];
        rack4DataNode[i] = rackPrefix + "-" + i % numRacks;
        System.out.println("Data Node " + i + " using " + rack4DataNode[i]);
      }
    }
    try {
      mc.startDataNodes(conf, numDataNodes, true, StartupOption.REGULAR, rack4DataNode);
      if (inject) {
        long blockSize = 10;
        System.out.println(
            "Injecting "
                + numBlocksPerDNtoInject
                + " blocks in each DN starting at blockId "
                + startingBlockId
                + " with blocksize of "
                + blockSize);
        Block[] blocks = new Block[numBlocksPerDNtoInject];
        long blkid = startingBlockId;
        for (int i_dn = 0; i_dn < numDataNodes; ++i_dn) {
          for (int i = 0; i < blocks.length; ++i) {
            blocks[i] = new Block(blkid++, blockSize, CreateEditsLog.BLOCK_GENERATION_STAMP);
          }
          for (int i = 1; i <= replication; ++i) {
            // inject blocks for dn_i into dn_i and replica in dn_i's neighbors
            mc.injectBlocks((i_dn + i - 1) % numDataNodes, Arrays.asList(blocks));
            System.out.println(
                "Injecting blocks of dn " + i_dn + " into dn" + ((i_dn + i - 1) % numDataNodes));
          }
        }
        System.out.println("Created blocks from Bids " + startingBlockId + " to " + (blkid - 1));
      }

    } catch (IOException e) {
      System.out.println("Error creating data node:" + e);
    }
  }
コード例 #19
0
ファイル: Context.java プロジェクト: Ymick/sentry
 @SuppressWarnings("static-access")
 public URI getDFSUri() throws IOException {
   return fileSystem.getDefaultUri(fileSystem.getConf());
 }
コード例 #20
0
ファイル: TestCopyFiles.java プロジェクト: Jude7/bc-hadoop2.0
  public void testPreserveOption() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster(conf, 2, true, null);
      String nnUri = FileSystem.getDefaultUri(conf).toString();
      FileSystem fs = FileSystem.get(URI.create(nnUri), conf);

      { // test preserving user
        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
        for (int i = 0; i < srcstat.length; i++) {
          fs.setOwner(srcstat[i].getPath(), "u" + i, null);
        }
        ToolRunner.run(
            new DistCpV1(conf), new String[] {"-pu", nnUri + "/srcdat", nnUri + "/destdat"});
        assertTrue(
            "Source and destination directories do not match.", checkFiles(fs, "/destdat", files));

        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
        for (int i = 0; i < dststat.length; i++) {
          assertEquals("i=" + i, "u" + i, dststat[i].getOwner());
        }
        deldir(fs, "/destdat");
        deldir(fs, "/srcdat");
      }

      { // test preserving group
        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
        for (int i = 0; i < srcstat.length; i++) {
          fs.setOwner(srcstat[i].getPath(), null, "g" + i);
        }
        ToolRunner.run(
            new DistCpV1(conf), new String[] {"-pg", nnUri + "/srcdat", nnUri + "/destdat"});
        assertTrue(
            "Source and destination directories do not match.", checkFiles(fs, "/destdat", files));

        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
        for (int i = 0; i < dststat.length; i++) {
          assertEquals("i=" + i, "g" + i, dststat[i].getGroup());
        }
        deldir(fs, "/destdat");
        deldir(fs, "/srcdat");
      }

      { // test preserving mode
        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
        FsPermission[] permissions = new FsPermission[srcstat.length];
        for (int i = 0; i < srcstat.length; i++) {
          permissions[i] = new FsPermission((short) (i & 0666));
          fs.setPermission(srcstat[i].getPath(), permissions[i]);
        }

        ToolRunner.run(
            new DistCpV1(conf), new String[] {"-pp", nnUri + "/srcdat", nnUri + "/destdat"});
        assertTrue(
            "Source and destination directories do not match.", checkFiles(fs, "/destdat", files));

        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
        for (int i = 0; i < dststat.length; i++) {
          assertEquals("i=" + i, permissions[i], dststat[i].getPermission());
        }
        deldir(fs, "/destdat");
        deldir(fs, "/srcdat");
      }

      { // test preserving times
        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
        fs.mkdirs(new Path("/srcdat/tmpf1"));
        fs.mkdirs(new Path("/srcdat/tmpf2"));
        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
        FsPermission[] permissions = new FsPermission[srcstat.length];
        for (int i = 0; i < srcstat.length; i++) {
          fs.setTimes(srcstat[i].getPath(), 40, 50);
        }

        ToolRunner.run(
            new DistCpV1(conf), new String[] {"-pt", nnUri + "/srcdat", nnUri + "/destdat"});

        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
        for (int i = 0; i < dststat.length; i++) {
          assertEquals("Modif. Time i=" + i, 40, dststat[i].getModificationTime());
          assertEquals(
              "Access Time i=" + i + srcstat[i].getPath() + "-" + dststat[i].getPath(),
              50,
              dststat[i].getAccessTime());
        }

        assertTrue(
            "Source and destination directories do not match.", checkFiles(fs, "/destdat", files));

        deldir(fs, "/destdat");
        deldir(fs, "/srcdat");
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
コード例 #21
0
ファイル: TestCopyFiles.java プロジェクト: Jude7/bc-hadoop2.0
  public void testHftpAccessControl() throws Exception {
    MiniDFSCluster cluster = null;
    try {
      final UserGroupInformation DFS_UGI = createUGI("dfs", true);
      final UserGroupInformation USER_UGI = createUGI("user", false);

      // start cluster by DFS_UGI
      final Configuration dfsConf = new Configuration();
      cluster = new MiniDFSCluster(dfsConf, 2, true, null);
      cluster.waitActive();

      final String httpAdd = dfsConf.get("dfs.http.address");
      final URI nnURI = FileSystem.getDefaultUri(dfsConf);
      final String nnUri = nnURI.toString();
      FileSystem fs1 =
          DFS_UGI.doAs(
              new PrivilegedExceptionAction<FileSystem>() {
                public FileSystem run() throws IOException {
                  return FileSystem.get(nnURI, dfsConf);
                }
              });
      final Path home = createHomeDirectory(fs1, USER_UGI);

      // now, login as USER_UGI
      final Configuration userConf = new Configuration();
      final FileSystem fs =
          USER_UGI.doAs(
              new PrivilegedExceptionAction<FileSystem>() {
                public FileSystem run() throws IOException {
                  return FileSystem.get(nnURI, userConf);
                }
              });

      final Path srcrootpath = new Path(home, "src_root");
      final String srcrootdir = srcrootpath.toString();
      final Path dstrootpath = new Path(home, "dst_root");
      final String dstrootdir = dstrootpath.toString();
      final DistCpV1 distcp =
          USER_UGI.doAs(
              new PrivilegedExceptionAction<DistCpV1>() {
                public DistCpV1 run() {
                  return new DistCpV1(userConf);
                }
              });

      FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short) 0700));
      final String[] args = {"hftp://" + httpAdd + srcrootdir, nnUri + dstrootdir};

      { // copy with permission 000, should fail
        fs.setPermission(srcrootpath, new FsPermission((short) 0));
        USER_UGI.doAs(
            new PrivilegedExceptionAction<Void>() {
              public Void run() throws Exception {
                assertEquals(-3, ToolRunner.run(distcp, args));
                return null;
              }
            });
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
コード例 #22
0
ファイル: NameNode.java プロジェクト: imace/hops
 public static InetSocketAddress getAddress(Configuration conf) {
   URI filesystemURI = FileSystem.getDefaultUri(conf);
   return getAddress(filesystemURI);
 }