示例#1
0
  public static void removeScanFiles(
      KeyExtent extent, Set<FileRef> scanFiles, Credentials credentials, ZooLock zooLock) {
    Mutation m = new Mutation(extent.getMetadataEntry());

    for (FileRef pathToRemove : scanFiles)
      m.putDelete(ScanFileColumnFamily.NAME, pathToRemove.meta());

    update(credentials, zooLock, m, extent);
  }
示例#2
0
  public static void addDeleteEntries(
      KeyExtent extent, Set<FileRef> datafilesToDelete, Credentials credentials)
      throws IOException {

    String tableId = extent.getTableId().toString();

    // TODO could use batch writer,would need to handle failure and retry like update does -
    // ACCUMULO-1294
    for (FileRef pathToRemove : datafilesToDelete) {
      update(credentials, createDeleteMutation(tableId, pathToRemove.path().toString()), extent);
    }
  }
示例#3
0
  public static void finishSplit(
      Text metadataEntry,
      Map<FileRef, DataFileValue> datafileSizes,
      List<FileRef> highDatafilesToRemove,
      Credentials credentials,
      ZooLock zooLock) {
    Mutation m = new Mutation(metadataEntry);
    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m);
    TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m);
    ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);

    for (Entry<FileRef, DataFileValue> entry : datafileSizes.entrySet()) {
      m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
    }

    for (FileRef pathToRemove : highDatafilesToRemove) {
      m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
    }

    update(credentials, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
  }
示例#4
0
  public static void deleteTable(
      String tableId, boolean insertDeletes, Credentials credentials, ZooLock lock)
      throws AccumuloException, IOException {
    Scanner ms =
        new ScannerImpl(
            HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
    Text tableIdText = new Text(tableId);
    BatchWriter bw =
        new BatchWriterImpl(
            HdfsZooInstance.getInstance(),
            credentials,
            MetadataTable.ID,
            new BatchWriterConfig()
                .setMaxMemory(1000000)
                .setMaxLatency(120000l, TimeUnit.MILLISECONDS)
                .setMaxWriteThreads(2));

    // scan metadata for our table and delete everything we find
    Mutation m = null;
    ms.setRange(new KeyExtent(tableIdText, null, null).toMetadataRange());

    // insert deletes before deleting data from metadata... this makes the code fault tolerant
    if (insertDeletes) {

      ms.fetchColumnFamily(DataFileColumnFamily.NAME);
      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);

      for (Entry<Key, Value> cell : ms) {
        Key key = cell.getKey();

        if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
          FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
          bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
        }

        if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
          bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
        }
      }

      bw.flush();

      ms.clearColumns();
    }

    for (Entry<Key, Value> cell : ms) {
      Key key = cell.getKey();

      if (m == null) {
        m = new Mutation(key.getRow());
        if (lock != null) putLockID(lock, m);
      }

      if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
        bw.addMutation(m);
        m = new Mutation(key.getRow());
        if (lock != null) putLockID(lock, m);
      }
      m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
    }

    if (m != null) bw.addMutation(m);

    bw.close();
  }