コード例 #1
0
 public static void main(String[] args) throws Exception {
   VolumeManager fs = VolumeManagerImpl.get();
   Opts opts = new Opts();
   opts.parseArgs(TableDiskUsage.class.getName(), args);
   Connector conn = opts.getConnector();
   org.apache.accumulo.server.util.TableDiskUsage.printDiskUsage(
       DefaultConfiguration.getInstance(), opts.tables, fs, conn, false);
 }
コード例 #2
0
  /**
   * Dump a Log File (Map or Sequence) to stdout. Will read from HDFS or local file system.
   *
   * @param args - first argument is the file to print
   */
  public static void main(String[] args) throws IOException {
    Opts opts = new Opts();
    opts.parseArgs(LogReader.class.getName(), args);
    VolumeManager fs = VolumeManagerImpl.get();

    Matcher rowMatcher = null;
    KeyExtent ke = null;
    Text row = null;
    if (opts.files.isEmpty()) {
      new JCommander(opts).usage();
      return;
    }
    if (opts.row != null) row = new Text(opts.row);
    if (opts.extent != null) {
      String sa[] = opts.extent.split(";");
      ke = new KeyExtent(new Text(sa[0]), new Text(sa[1]), new Text(sa[2]));
    }
    if (opts.regexp != null) {
      Pattern pattern = Pattern.compile(opts.regexp);
      rowMatcher = pattern.matcher("");
    }

    Set<Integer> tabletIds = new HashSet<Integer>();

    for (String file : opts.files) {

      Path path = new Path(file);
      LogFileKey key = new LogFileKey();
      LogFileValue value = new LogFileValue();

      if (fs.isFile(path)) {
        // read log entries from a simple hdfs file
        DFSLoggerInputStreams streams =
            DfsLogger.readHeaderAndReturnStream(fs, path, SiteConfiguration.getInstance());
        DataInputStream input = streams.getDecryptingInputStream();

        try {
          while (true) {
            try {
              key.readFields(input);
              value.readFields(input);
            } catch (EOFException ex) {
              break;
            }
            printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
          }
        } finally {
          input.close();
        }
      } else {
        // read the log entries sorted in a map file
        MultiReader input = new MultiReader(fs, path);
        while (input.next(key, value)) {
          printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
        }
      }
    }
  }
コード例 #3
0
  public static Mutation createDeleteMutation(String tableId, String pathToRemove)
      throws IOException {
    if (!pathToRemove.contains(":")) {
      if (pathToRemove.startsWith("../")) pathToRemove = pathToRemove.substring(2);
      else pathToRemove = "/" + tableId + pathToRemove;
    }

    Path path = VolumeManagerImpl.get().getFullPath(FileType.TABLE, pathToRemove);
    Mutation delFlag =
        new Mutation(new Text(MetadataSchema.DeletesSection.getRowPrefix() + path.toString()));
    delFlag.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
    return delFlag;
  }
コード例 #4
0
ファイル: TraceServer.java プロジェクト: harayz/accumulo
 public static void main(String[] args) throws Exception {
   SecurityUtil.serverLogin();
   ServerOpts opts = new ServerOpts();
   opts.parseArgs("tracer", args);
   Instance instance = HdfsZooInstance.getInstance();
   ServerConfiguration conf = new ServerConfiguration(instance);
   VolumeManager fs = VolumeManagerImpl.get();
   Accumulo.init(fs, conf, "tracer");
   String hostname = opts.getAddress();
   TraceServer server = new TraceServer(conf, hostname);
   Accumulo.enableTracing(hostname, "tserver");
   server.run();
   log.info("tracer stopping");
 }
コード例 #5
0
  public static Pair<List<LogEntry>, SortedMap<FileRef, DataFileValue>> getFileAndLogEntries(
      Credentials credentials, KeyExtent extent)
      throws KeeperException, InterruptedException, IOException {
    ArrayList<LogEntry> result = new ArrayList<LogEntry>();
    TreeMap<FileRef, DataFileValue> sizes = new TreeMap<FileRef, DataFileValue>();

    VolumeManager fs = VolumeManagerImpl.get();
    if (extent.isRootTablet()) {
      getRootLogEntries(result);
      Path rootDir = new Path(getRootTabletDir());
      FileStatus[] files = fs.listStatus(rootDir);
      for (FileStatus fileStatus : files) {
        if (fileStatus.getPath().toString().endsWith("_tmp")) {
          continue;
        }
        DataFileValue dfv = new DataFileValue(0, 0);
        sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
      }

    } else {
      String systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
      Scanner scanner =
          new ScannerImpl(
              HdfsZooInstance.getInstance(), credentials, systemTableToCheck, Authorizations.EMPTY);
      scanner.fetchColumnFamily(LogColumnFamily.NAME);
      scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
      scanner.setRange(extent.toMetadataRange());

      for (Entry<Key, Value> entry : scanner) {
        if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
          throw new RuntimeException(
              "Unexpected row "
                  + entry.getKey().getRow()
                  + " expected "
                  + extent.getMetadataEntry());
        }

        if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
          result.add(LogEntry.fromKeyValue(entry.getKey(), entry.getValue()));
        } else if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
          DataFileValue dfv = new DataFileValue(entry.getValue().get());
          sizes.put(new FileRef(fs, entry.getKey()), dfv);
        } else {
          throw new RuntimeException("Unexpected col fam " + entry.getKey().getColumnFamily());
        }
      }
    }

    return new Pair<List<LogEntry>, SortedMap<FileRef, DataFileValue>>(result, sizes);
  }
コード例 #6
0
ファイル: Monitor.java プロジェクト: joshelser/accumulo
  public static void main(String[] args) throws Exception {
    SecurityUtil.serverLogin(ServerConfiguration.getSiteConfiguration());

    VolumeManager fs = VolumeManagerImpl.get();
    ServerOpts opts = new ServerOpts();
    opts.parseArgs("monitor", args);
    String hostname = opts.getAddress();

    instance = HdfsZooInstance.getInstance();
    config = new ServerConfiguration(instance);
    Accumulo.init(fs, config, "monitor");
    Monitor monitor = new Monitor();
    Accumulo.enableTracing(hostname, "monitor");
    monitor.run(hostname);
  }
コード例 #7
0
  public static Map<FileRef, Long> getBulkFilesLoaded(Credentials credentials, KeyExtent extent)
      throws IOException {
    Text metadataRow = extent.getMetadataEntry();
    Map<FileRef, Long> ret = new HashMap<FileRef, Long>();

    VolumeManager fs = VolumeManagerImpl.get();
    Scanner scanner =
        new ScannerImpl(
            HdfsZooInstance.getInstance(),
            credentials,
            extent.isMeta() ? RootTable.ID : MetadataTable.ID,
            Authorizations.EMPTY);
    scanner.setRange(new Range(metadataRow));
    scanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
    for (Entry<Key, Value> entry : scanner) {
      Long tid = Long.parseLong(entry.getValue().toString());
      ret.put(new FileRef(fs, entry.getKey()), tid);
    }
    return ret;
  }
コード例 #8
0
 public static List<FileRef> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid)
     throws IOException {
   List<FileRef> result = new ArrayList<FileRef>();
   try {
     VolumeManager fs = VolumeManagerImpl.get();
     Scanner mscanner =
         new IsolatedScanner(
             conn.createScanner(
                 extent.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY));
     mscanner.setRange(extent.toMetadataRange());
     mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
     for (Entry<Key, Value> entry : mscanner) {
       if (Long.parseLong(entry.getValue().toString()) == tid) {
         result.add(new FileRef(fs, entry.getKey()));
       }
     }
     return result;
   } catch (TableNotFoundException ex) {
     // unlikely
     throw new RuntimeException("Onos! teh metadata table has vanished!!");
   }
 }
コード例 #9
0
ファイル: LocalityCheck.java プロジェクト: ekohlwey/accumulo
  public int run(String[] args) throws Exception {
    ClientOpts opts = new ClientOpts();
    opts.parseArgs(LocalityCheck.class.getName(), args);

    VolumeManager fs = VolumeManagerImpl.get();
    Connector connector = opts.getConnector();
    Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
    scanner.setRange(MetadataSchema.TabletsSection.getRange());

    Map<String, Long> totalBlocks = new HashMap<String, Long>();
    Map<String, Long> localBlocks = new HashMap<String, Long>();
    ArrayList<String> files = new ArrayList<String>();

    for (Entry<Key, Value> entry : scanner) {
      Key key = entry.getKey();
      if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
        String location = entry.getValue().toString();
        String[] parts = location.split(":");
        String host = parts[0];
        addBlocks(fs, host, files, totalBlocks, localBlocks);
        files.clear();
      } else if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {

        files.add(fs.getFullPath(key).toString());
      }
    }
    System.out.println(" Server         %local  total blocks");
    for (String host : totalBlocks.keySet()) {
      System.out.println(
          String.format(
              "%15s %5.1f %8d",
              host, (localBlocks.get(host) * 100.) / totalBlocks.get(host), totalBlocks.get(host)));
    }
    return 0;
  }
コード例 #10
0
  public static SortedMap<FileRef, DataFileValue> getDataFileSizes(
      KeyExtent extent, Credentials credentials) throws IOException {
    TreeMap<FileRef, DataFileValue> sizes = new TreeMap<FileRef, DataFileValue>();

    Scanner mdScanner =
        new ScannerImpl(
            HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
    mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
    Text row = extent.getMetadataEntry();
    VolumeManager fs = VolumeManagerImpl.get();

    Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text(""));
    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);

    mdScanner.setRange(new Range(new Key(row), endKey));
    for (Entry<Key, Value> entry : mdScanner) {

      if (!entry.getKey().getRow().equals(row)) break;
      DataFileValue dfv = new DataFileValue(entry.getValue().get());
      sizes.put(new FileRef(fs, entry.getKey()), dfv);
    }

    return sizes;
  }
コード例 #11
0
  public static void deleteTable(
      String tableId, boolean insertDeletes, Credentials credentials, ZooLock lock)
      throws AccumuloException, IOException {
    Scanner ms =
        new ScannerImpl(
            HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
    Text tableIdText = new Text(tableId);
    BatchWriter bw =
        new BatchWriterImpl(
            HdfsZooInstance.getInstance(),
            credentials,
            MetadataTable.ID,
            new BatchWriterConfig()
                .setMaxMemory(1000000)
                .setMaxLatency(120000l, TimeUnit.MILLISECONDS)
                .setMaxWriteThreads(2));

    // scan metadata for our table and delete everything we find
    Mutation m = null;
    ms.setRange(new KeyExtent(tableIdText, null, null).toMetadataRange());

    // insert deletes before deleting data from metadata... this makes the code fault tolerant
    if (insertDeletes) {

      ms.fetchColumnFamily(DataFileColumnFamily.NAME);
      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);

      for (Entry<Key, Value> cell : ms) {
        Key key = cell.getKey();

        if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
          FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
          bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
        }

        if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
          bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
        }
      }

      bw.flush();

      ms.clearColumns();
    }

    for (Entry<Key, Value> cell : ms) {
      Key key = cell.getKey();

      if (m == null) {
        m = new Mutation(key.getRow());
        if (lock != null) putLockID(lock, m);
      }

      if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
        bw.addMutation(m);
        m = new Mutation(key.getRow());
        if (lock != null) putLockID(lock, m);
      }
      m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
    }

    if (m != null) bw.addMutation(m);

    bw.close();
  }