예제 #1
0
  /**
   * Dump a Log File (Map or Sequence) to stdout. Will read from HDFS or local file system.
   *
   * @param args - first argument is the file to print
   */
  public static void main(String[] args) throws IOException {
    Opts opts = new Opts();
    opts.parseArgs(LogReader.class.getName(), args);
    VolumeManager fs = VolumeManagerImpl.get();

    Matcher rowMatcher = null;
    KeyExtent ke = null;
    Text row = null;
    if (opts.files.isEmpty()) {
      new JCommander(opts).usage();
      return;
    }
    if (opts.row != null) row = new Text(opts.row);
    if (opts.extent != null) {
      String sa[] = opts.extent.split(";");
      ke = new KeyExtent(new Text(sa[0]), new Text(sa[1]), new Text(sa[2]));
    }
    if (opts.regexp != null) {
      Pattern pattern = Pattern.compile(opts.regexp);
      rowMatcher = pattern.matcher("");
    }

    Set<Integer> tabletIds = new HashSet<Integer>();

    for (String file : opts.files) {

      Path path = new Path(file);
      LogFileKey key = new LogFileKey();
      LogFileValue value = new LogFileValue();

      if (fs.isFile(path)) {
        // read log entries from a simple hdfs file
        DFSLoggerInputStreams streams =
            DfsLogger.readHeaderAndReturnStream(fs, path, SiteConfiguration.getInstance());
        DataInputStream input = streams.getDecryptingInputStream();

        try {
          while (true) {
            try {
              key.readFields(input);
              value.readFields(input);
            } catch (EOFException ex) {
              break;
            }
            printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
          }
        } finally {
          input.close();
        }
      } else {
        // read the log entries sorted in a map file
        MultiReader input = new MultiReader(fs, path);
        while (input.next(key, value)) {
          printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
        }
      }
    }
  }
예제 #2
0
  public static Pair<List<LogEntry>, SortedMap<FileRef, DataFileValue>> getFileAndLogEntries(
      Credentials credentials, KeyExtent extent)
      throws KeeperException, InterruptedException, IOException {
    ArrayList<LogEntry> result = new ArrayList<LogEntry>();
    TreeMap<FileRef, DataFileValue> sizes = new TreeMap<FileRef, DataFileValue>();

    VolumeManager fs = VolumeManagerImpl.get();
    if (extent.isRootTablet()) {
      getRootLogEntries(result);
      Path rootDir = new Path(getRootTabletDir());
      FileStatus[] files = fs.listStatus(rootDir);
      for (FileStatus fileStatus : files) {
        if (fileStatus.getPath().toString().endsWith("_tmp")) {
          continue;
        }
        DataFileValue dfv = new DataFileValue(0, 0);
        sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
      }

    } else {
      String systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
      Scanner scanner =
          new ScannerImpl(
              HdfsZooInstance.getInstance(), credentials, systemTableToCheck, Authorizations.EMPTY);
      scanner.fetchColumnFamily(LogColumnFamily.NAME);
      scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
      scanner.setRange(extent.toMetadataRange());

      for (Entry<Key, Value> entry : scanner) {
        if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
          throw new RuntimeException(
              "Unexpected row "
                  + entry.getKey().getRow()
                  + " expected "
                  + extent.getMetadataEntry());
        }

        if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
          result.add(LogEntry.fromKeyValue(entry.getKey(), entry.getValue()));
        } else if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
          DataFileValue dfv = new DataFileValue(entry.getValue().get());
          sizes.put(new FileRef(fs, entry.getKey()), dfv);
        } else {
          throw new RuntimeException("Unexpected col fam " + entry.getKey().getColumnFamily());
        }
      }
    }

    return new Pair<List<LogEntry>, SortedMap<FileRef, DataFileValue>>(result, sizes);
  }
예제 #3
0
 private void addBlocks(
     VolumeManager fs,
     String host,
     ArrayList<String> files,
     Map<String, Long> totalBlocks,
     Map<String, Long> localBlocks)
     throws Exception {
   long allBlocks = 0;
   long matchingBlocks = 0;
   if (!totalBlocks.containsKey(host)) {
     totalBlocks.put(host, 0L);
     localBlocks.put(host, 0L);
   }
   for (String file : files) {
     Path filePath = new Path(file);
     FileSystem ns = fs.getFileSystemByPath(filePath);
     FileStatus fileStatus = ns.getFileStatus(filePath);
     BlockLocation[] fileBlockLocations =
         ns.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
     for (BlockLocation blockLocation : fileBlockLocations) {
       allBlocks++;
       for (String location : blockLocation.getHosts()) {
         HostAndPort hap = HostAndPort.fromParts(location, 0);
         if (hap.getHostText().equals(host)) {
           matchingBlocks++;
           break;
         }
       }
     }
   }
   totalBlocks.put(host, allBlocks + totalBlocks.get(host));
   localBlocks.put(host, matchingBlocks + localBlocks.get(host));
 }
 @Test
 public void testMoveToTrash_UsingTrash_VolMgrFailure() throws Exception {
   Path path = createMock(Path.class);
   expect(volMgr.moveToTrash(path)).andThrow(new FileNotFoundException());
   replay(volMgr);
   assertFalse(gc.archiveOrMoveToTrash(path));
   verify(volMgr);
 }
 @Test
 public void testMoveToTrash_UsingTrash() throws Exception {
   Path path = createMock(Path.class);
   expect(volMgr.moveToTrash(path)).andReturn(true);
   replay(volMgr);
   assertTrue(gc.archiveOrMoveToTrash(path));
   verify(volMgr);
 }
예제 #6
0
  public int run(String[] args) throws Exception {
    ClientOpts opts = new ClientOpts();
    opts.parseArgs(LocalityCheck.class.getName(), args);

    VolumeManager fs = VolumeManagerImpl.get();
    Connector connector = opts.getConnector();
    Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
    scanner.setRange(MetadataSchema.TabletsSection.getRange());

    Map<String, Long> totalBlocks = new HashMap<String, Long>();
    Map<String, Long> localBlocks = new HashMap<String, Long>();
    ArrayList<String> files = new ArrayList<String>();

    for (Entry<Key, Value> entry : scanner) {
      Key key = entry.getKey();
      if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
        String location = entry.getValue().toString();
        String[] parts = location.split(":");
        String host = parts[0];
        addBlocks(fs, host, files, totalBlocks, localBlocks);
        files.clear();
      } else if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {

        files.add(fs.getFullPath(key).toString());
      }
    }
    System.out.println(" Server         %local  total blocks");
    for (String host : totalBlocks.keySet()) {
      System.out.println(
          String.format(
              "%15s %5.1f %8d",
              host, (localBlocks.get(host) * 100.) / totalBlocks.get(host), totalBlocks.get(host)));
    }
    return 0;
  }
예제 #7
0
  public static Map<TreeSet<String>, Long> getDiskUsage(
      AccumuloConfiguration acuConf, Set<String> tableIds, VolumeManager fs, Connector conn)
      throws IOException {
    TableDiskUsage tdu = new TableDiskUsage();

    // Add each tableID
    for (String tableId : tableIds) tdu.addTable(tableId);

    HashSet<String> tablesReferenced = new HashSet<String>(tableIds);
    HashSet<String> emptyTableIds = new HashSet<String>();
    HashSet<String> nameSpacesReferenced = new HashSet<String>();

    // For each table ID
    for (String tableId : tableIds) {
      Scanner mdScanner = null;
      try {
        mdScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
      } catch (TableNotFoundException e) {
        throw new RuntimeException(e);
      }
      mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
      mdScanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());

      if (!mdScanner.iterator().hasNext()) {
        emptyTableIds.add(tableId);
      }

      // Read each file referenced by that table
      for (Entry<Key, Value> entry : mdScanner) {
        String file = entry.getKey().getColumnQualifier().toString();
        String parts[] = file.split("/");
        // the filename
        String uniqueName = parts[parts.length - 1];
        if (file.contains(":") || file.startsWith("../")) {
          String ref = parts[parts.length - 3];
          // Track any tables which are referenced externally by the current table
          if (!ref.equals(tableId)) {
            tablesReferenced.add(ref);
          }
          if (file.contains(":") && parts.length > 3) {
            List<String> base = Arrays.asList(Arrays.copyOf(parts, parts.length - 3));
            nameSpacesReferenced.add(Joiner.on("/").join(base));
          }
        }

        // add this file to this table
        tdu.linkFileAndTable(tableId, uniqueName);
      }
    }

    // Each table seen (provided by user, or reference by table the user provided)
    for (String tableId : tablesReferenced) {
      for (String tableDir : nameSpacesReferenced) {
        // Find each file and add its size
        FileStatus[] files = fs.globStatus(new Path(tableDir + "/" + tableId + "/*/*"));
        if (files != null) {
          for (FileStatus fileStatus : files) {
            // Assumes that all filenames are unique
            String name = fileStatus.getPath().getName();
            tdu.addFileSize(name, fileStatus.getLen());
          }
        }
      }
    }

    // Invert tableId->tableName
    HashMap<String, String> reverseTableIdMap = new HashMap<String, String>();
    for (Entry<String, String> entry : conn.tableOperations().tableIdMap().entrySet())
      reverseTableIdMap.put(entry.getValue(), entry.getKey());

    TreeMap<TreeSet<String>, Long> usage =
        new TreeMap<TreeSet<String>, Long>(
            new Comparator<TreeSet<String>>() {

              @Override
              public int compare(TreeSet<String> o1, TreeSet<String> o2) {
                int len1 = o1.size();
                int len2 = o2.size();

                int min = Math.min(len1, len2);

                Iterator<String> iter1 = o1.iterator();
                Iterator<String> iter2 = o2.iterator();

                int count = 0;

                while (count < min) {
                  String s1 = iter1.next();
                  String s2 = iter2.next();

                  int cmp = s1.compareTo(s2);

                  if (cmp != 0) return cmp;

                  count++;
                }

                return len1 - len2;
              }
            });

    for (Entry<List<String>, Long> entry : tdu.calculateUsage().entrySet()) {
      TreeSet<String> tableNames = new TreeSet<String>();
      // Convert size shared by each table id into size shared by each table name
      for (String tableId : entry.getKey()) tableNames.add(reverseTableIdMap.get(tableId));

      // Make table names to shared file size
      usage.put(tableNames, entry.getValue());
    }

    if (!emptyTableIds.isEmpty()) {
      TreeSet<String> emptyTables = new TreeSet<String>();
      for (String tableId : emptyTableIds) {
        emptyTables.add(reverseTableIdMap.get(tableId));
      }
      usage.put(emptyTables, 0L);
    }

    return usage;
  }
예제 #8
0
  public static void cloneTable(
      Instance instance, String srcTableId, String tableId, VolumeManager volumeManager)
      throws Exception {

    Connector conn =
        instance.getConnector(
            SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());

    while (true) {

      try {
        initializeClone(srcTableId, tableId, conn, bw);

        // the following loop looks changes in the file that occurred during the copy.. if files
        // were dereferenced then they could have been GCed

        while (true) {
          int rewrites = checkClone(srcTableId, tableId, conn, bw);

          if (rewrites == 0) break;
        }

        bw.flush();
        break;

      } catch (TabletIterator.TabletDeletedException tde) {
        // tablets were merged in the src table
        bw.flush();

        // delete what we have cloned and try again
        deleteTable(tableId, false, SystemCredentials.get(), null);

        log.debug(
            "Tablets merged in table " + srcTableId + " while attempting to clone, trying again");

        UtilWaitThread.sleep(100);
      }
    }

    // delete the clone markers and create directory entries
    Scanner mscanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
    mscanner.fetchColumnFamily(ClonedColumnFamily.NAME);

    int dirCount = 0;

    for (Entry<Key, Value> entry : mscanner) {
      Key k = entry.getKey();
      Mutation m = new Mutation(k.getRow());
      m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
      String dir =
          volumeManager.choose(ServerConstants.getTablesDirs())
              + "/"
              + tableId
              + new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes()));
      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(dir.getBytes()));
      bw.addMutation(m);
    }

    bw.close();
  }