Exemplo n.º 1
0
  public boolean alterElementVisibility(
      Mutation m, AccumuloElement element, Visibility newVisibility) {
    ColumnVisibility currentColumnVisibility =
        visibilityToAccumuloVisibility(element.getVisibility());
    ColumnVisibility newColumnVisibility = visibilityToAccumuloVisibility(newVisibility);
    if (currentColumnVisibility.equals(newColumnVisibility)) {
      return false;
    }

    if (element instanceof AccumuloEdge) {
      AccumuloEdge edge = (AccumuloEdge) element;
      m.putDelete(
          AccumuloEdge.CF_SIGNAL,
          new Text(edge.getLabel()),
          currentColumnVisibility,
          currentTimeMillis());
      m.put(
          AccumuloEdge.CF_SIGNAL,
          new Text(edge.getLabel()),
          newColumnVisibility,
          currentTimeMillis(),
          ElementMutationBuilder.EMPTY_VALUE);

      m.putDelete(
          AccumuloEdge.CF_OUT_VERTEX,
          new Text(edge.getVertexId(Direction.OUT)),
          currentColumnVisibility,
          currentTimeMillis());
      m.put(
          AccumuloEdge.CF_OUT_VERTEX,
          new Text(edge.getVertexId(Direction.OUT)),
          newColumnVisibility,
          currentTimeMillis(),
          ElementMutationBuilder.EMPTY_VALUE);

      m.putDelete(
          AccumuloEdge.CF_IN_VERTEX,
          new Text(edge.getVertexId(Direction.IN)),
          currentColumnVisibility,
          currentTimeMillis());
      m.put(
          AccumuloEdge.CF_IN_VERTEX,
          new Text(edge.getVertexId(Direction.IN)),
          newColumnVisibility,
          currentTimeMillis(),
          ElementMutationBuilder.EMPTY_VALUE);
    } else if (element instanceof AccumuloVertex) {
      m.putDelete(
          AccumuloVertex.CF_SIGNAL, EMPTY_TEXT, currentColumnVisibility, currentTimeMillis());
      m.put(
          AccumuloVertex.CF_SIGNAL,
          EMPTY_TEXT,
          newColumnVisibility,
          currentTimeMillis(),
          ElementMutationBuilder.EMPTY_VALUE);
    } else {
      throw new IllegalArgumentException("Invalid element type: " + element);
    }
    return true;
  }
Exemplo n.º 2
0
 public static void moveMetaDeleteMarkers(Instance instance, Credentials creds) {
   // move old delete markers to new location, to standardize table schema between all metadata
   // tables
   byte[] EMPTY_BYTES = new byte[0];
   Scanner scanner = new ScannerImpl(instance, creds, RootTable.ID, Authorizations.EMPTY);
   String oldDeletesPrefix = "!!~del";
   Range oldDeletesRange = new Range(oldDeletesPrefix, true, "!!~dem", false);
   scanner.setRange(oldDeletesRange);
   for (Entry<Key, Value> entry : scanner) {
     String row = entry.getKey().getRow().toString();
     if (row.startsWith(oldDeletesPrefix)) {
       String filename = row.substring(oldDeletesPrefix.length());
       // add the new entry first
       log.info("Moving " + filename + " marker in " + RootTable.NAME);
       Mutation m = new Mutation(MetadataSchema.DeletesSection.getRowPrefix() + filename);
       m.put(EMPTY_BYTES, EMPTY_BYTES, EMPTY_BYTES);
       update(creds, m, RootTable.EXTENT);
       // remove the old entry
       m = new Mutation(entry.getKey().getRow());
       m.putDelete(EMPTY_BYTES, EMPTY_BYTES);
       update(creds, m, RootTable.OLD_EXTENT);
     } else {
       break;
     }
   }
 }
Exemplo n.º 3
0
 public static void removeUnusedWALEntries(
     KeyExtent extent, List<LogEntry> logEntries, ZooLock zooLock) {
   if (extent.isRootTablet()) {
     for (LogEntry entry : logEntries) {
       String root = getZookeeperLogLocation();
       while (true) {
         try {
           IZooReaderWriter zoo = ZooReaderWriter.getInstance();
           if (zoo.isLockHeld(zooLock.getLockID()))
             zoo.recursiveDelete(root + "/" + entry.filename, NodeMissingPolicy.SKIP);
           break;
         } catch (Exception e) {
           log.error(e, e);
         }
         UtilWaitThread.sleep(1000);
       }
     }
   } else {
     Mutation m = new Mutation(extent.getMetadataEntry());
     for (LogEntry entry : logEntries) {
       m.putDelete(LogColumnFamily.NAME, new Text(entry.toString()));
     }
     update(SystemCredentials.get(), zooLock, m, extent);
   }
 }
Exemplo n.º 4
0
 private void addPropertyMetadataItemDeleteToMutation(
     Mutation m, Text columnQualifier, ColumnVisibility metadataVisibility) {
   m.putDelete(
       AccumuloElement.CF_PROPERTY_METADATA,
       columnQualifier,
       metadataVisibility,
       currentTimeMillis());
 }
Exemplo n.º 5
0
  public static void removeScanFiles(
      KeyExtent extent, Set<FileRef> scanFiles, Credentials credentials, ZooLock zooLock) {
    Mutation m = new Mutation(extent.getMetadataEntry());

    for (FileRef pathToRemove : scanFiles)
      m.putDelete(ScanFileColumnFamily.NAME, pathToRemove.meta());

    update(credentials, zooLock, m, extent);
  }
Exemplo n.º 6
0
 public void addPropertyDeleteToMutation(Mutation m, PropertyDeleteMutation propertyDelete) {
   Text columnQualifier =
       KeyHelper.getColumnQualifierFromPropertyColumnQualifier(
           propertyDelete.getKey(), propertyDelete.getName(), getNameSubstitutionStrategy());
   ColumnVisibility columnVisibility =
       visibilityToAccumuloVisibility(propertyDelete.getVisibility());
   m.putDelete(
       AccumuloElement.CF_PROPERTY, columnQualifier, columnVisibility, currentTimeMillis());
   addPropertyDeleteMetadataToMutation(m, propertyDelete);
 }
Exemplo n.º 7
0
  public static void removeBulkLoadInProgressFlag(String path) {

    Mutation m = new Mutation(MetadataSchema.BlipSection.getRowPrefix() + path);
    m.putDelete(EMPTY_TEXT, EMPTY_TEXT);

    // new KeyExtent is only added to force update to write to the metadata table, not the root
    // table
    // because bulk loads aren't supported to the metadata table
    update(SystemCredentials.get(), m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
  }
Exemplo n.º 8
0
 private Mutation createAlterEdgeLabelMutation(
     AccumuloEdge edge, String newEdgeLabel, ColumnVisibility edgeColumnVisibility) {
   String edgeRowKey = edge.getId();
   Mutation m = new Mutation(edgeRowKey);
   m.putDelete(
       AccumuloEdge.CF_SIGNAL,
       new Text(edge.getLabel()),
       edgeColumnVisibility,
       currentTimeMillis());
   m.put(
       AccumuloEdge.CF_SIGNAL,
       new Text(newEdgeLabel),
       edgeColumnVisibility,
       currentTimeMillis(),
       ElementMutationBuilder.EMPTY_VALUE);
   return m;
 }
Exemplo n.º 9
0
 public boolean alterEdgeVertexInVertex(
     Mutation vertexInMutation, Edge edge, Visibility newVisibility) {
   ColumnVisibility currentColumnVisibility = visibilityToAccumuloVisibility(edge.getVisibility());
   ColumnVisibility newColumnVisibility = visibilityToAccumuloVisibility(newVisibility);
   if (currentColumnVisibility.equals(newColumnVisibility)) {
     return false;
   }
   EdgeInfo edgeInfo =
       new EdgeInfo(
           getNameSubstitutionStrategy().deflate(edge.getLabel()),
           edge.getVertexId(Direction.OUT));
   vertexInMutation.putDelete(
       AccumuloVertex.CF_IN_EDGE, new Text(edge.getId()), currentColumnVisibility);
   vertexInMutation.put(
       AccumuloVertex.CF_IN_EDGE, new Text(edge.getId()), newColumnVisibility, edgeInfo.toValue());
   return true;
 }
Exemplo n.º 10
0
 public static void removeBulkLoadEntries(Connector conn, String tableId, long tid)
     throws Exception {
   Scanner mscanner =
       new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
   mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
   mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
   BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
   for (Entry<Key, Value> entry : mscanner) {
     log.debug("Looking at entry " + entry + " with tid " + tid);
     if (Long.parseLong(entry.getValue().toString()) == tid) {
       log.debug("deleting entry " + entry);
       Mutation m = new Mutation(entry.getKey().getRow());
       m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
       bw.addMutation(m);
     }
   }
   bw.close();
 }
Exemplo n.º 11
0
 public void addPropertyDeleteToMutation(Mutation m, Property property) {
   Preconditions.checkNotNull(m, "mutation cannot be null");
   Preconditions.checkNotNull(property, "property cannot be null");
   Text columnQualifier =
       KeyHelper.getColumnQualifierFromPropertyColumnQualifier(
           property, getNameSubstitutionStrategy());
   ColumnVisibility columnVisibility = visibilityToAccumuloVisibility(property.getVisibility());
   m.putDelete(
       AccumuloElement.CF_PROPERTY, columnQualifier, columnVisibility, currentTimeMillis());
   for (Metadata.Entry metadataEntry : property.getMetadata().entrySet()) {
     Text metadataEntryColumnQualifier =
         getPropertyMetadataColumnQualifierText(property, metadataEntry);
     ColumnVisibility metadataEntryVisibility =
         visibilityToAccumuloVisibility(metadataEntry.getVisibility());
     addPropertyMetadataItemDeleteToMutation(
         m, metadataEntryColumnQualifier, metadataEntryVisibility);
   }
 }
Exemplo n.º 12
0
 private Mutation createMutationForEdge(AccumuloEdge edge, ColumnVisibility edgeColumnVisibility) {
   String edgeRowKey = edge.getId();
   Mutation m = new Mutation(edgeRowKey);
   String edgeLabel = edge.getLabel();
   if (edge.getNewEdgeLabel() != null) {
     edgeLabel = edge.getNewEdgeLabel();
     m.putDelete(
         AccumuloEdge.CF_SIGNAL,
         new Text(edge.getLabel()),
         edgeColumnVisibility,
         currentTimeMillis());
   }
   m.put(
       AccumuloEdge.CF_SIGNAL,
       new Text(edgeLabel),
       edgeColumnVisibility,
       edge.getTimestamp(),
       ElementMutationBuilder.EMPTY_VALUE);
   m.put(
       AccumuloEdge.CF_OUT_VERTEX,
       new Text(edge.getVertexId(Direction.OUT)),
       edgeColumnVisibility,
       edge.getTimestamp(),
       ElementMutationBuilder.EMPTY_VALUE);
   m.put(
       AccumuloEdge.CF_IN_VERTEX,
       new Text(edge.getVertexId(Direction.IN)),
       edgeColumnVisibility,
       edge.getTimestamp(),
       ElementMutationBuilder.EMPTY_VALUE);
   for (PropertyDeleteMutation propertyDeleteMutation : edge.getPropertyDeleteMutations()) {
     addPropertyDeleteToMutation(m, propertyDeleteMutation);
   }
   for (PropertySoftDeleteMutation propertySoftDeleteMutation :
       edge.getPropertySoftDeleteMutations()) {
     addPropertySoftDeleteToMutation(m, propertySoftDeleteMutation);
   }
   for (Property property : edge.getProperties()) {
     addPropertyToMutation(edge.getGraph(), m, edgeRowKey, property);
   }
   return m;
 }
Exemplo n.º 13
0
  public static void finishSplit(
      Text metadataEntry,
      Map<FileRef, DataFileValue> datafileSizes,
      List<FileRef> highDatafilesToRemove,
      Credentials credentials,
      ZooLock zooLock) {
    Mutation m = new Mutation(metadataEntry);
    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m);
    TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m);
    ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);

    for (Entry<FileRef, DataFileValue> entry : datafileSizes.entrySet()) {
      m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
    }

    for (FileRef pathToRemove : highDatafilesToRemove) {
      m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
    }

    update(credentials, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
  }
Exemplo n.º 14
0
  private static long scrambleDeleteHalfAndCheck(
      ClientOnDefaultTable opts,
      ScannerOpts scanOpts,
      BatchWriterOpts bwOpts,
      String tableName,
      Set<RowColumn> rows)
      throws Exception {
    int result = 0;
    ArrayList<RowColumn> entries = new ArrayList<RowColumn>(rows);
    java.util.Collections.shuffle(entries);

    Connector connector = opts.getConnector();
    BatchWriter mutations = connector.createBatchWriter(tableName, bwOpts.getBatchWriterConfig());

    for (int i = 0; i < (entries.size() + 1) / 2; i++) {
      RowColumn rc = entries.get(i);
      Mutation m = new Mutation(rc.row);
      m.putDelete(
          new Text(rc.column.columnFamily),
          new Text(rc.column.columnQualifier),
          new ColumnVisibility(rc.column.getColumnVisibility()),
          rc.timestamp + 1);
      mutations.addMutation(m);
      rows.remove(rc);
      result++;
    }

    mutations.close();

    Set<RowColumn> current = scanAll(opts, scanOpts, tableName);
    current.removeAll(rows);
    if (current.size() > 0) {
      throw new RuntimeException(current.size() + " records not deleted");
    }
    return result;
  }
  @Test
  public void waitsUntilEntriesAreReplicated() throws Exception {
    Connector conn = inst.getConnector("root", new PasswordToken(""));
    conn.tableOperations().create("foo");
    Text tableId = new Text(conn.tableOperations().tableIdMap().get("foo"));

    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(),
        file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat =
        Status.newBuilder()
            .setBegin(0)
            .setEnd(10000)
            .setInfiniteEnd(false)
            .setClosed(false)
            .build();

    BatchWriter bw = ReplicationTable.getBatchWriter(conn);

    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    m = new Mutation(file2);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    bw.close();

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId, ProtobufUtil.toValue(stat));

    bw.addMutation(m);

    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.put(ReplicationSection.COLF, tableId, ProtobufUtil.toValue(stat));

    bw.close();

    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    ClientContext context =
        new ClientContext(
            inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
    final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context);
    Thread t =
        new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  roi.drain("foo");
                } catch (Exception e) {
                  log.error("Got error", e);
                  exception.set(true);
                }
                done.set(true);
              }
            });

    t.start();

    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, tableId);
    bw.addMutation(m);
    bw.flush();

    Assert.assertFalse(done.get());

    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.putDelete(ReplicationSection.COLF, tableId);
    bw.addMutation(m);
    bw.flush();
    bw.close();

    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());

    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, tableId);
    bw.addMutation(m);
    bw.flush();

    Assert.assertFalse(done.get());

    m = new Mutation(file2);
    m.putDelete(StatusSection.NAME, tableId);
    bw.addMutation(m);
    bw.flush();

    try {
      t.join(5000);
    } catch (InterruptedException e) {
      Assert.fail("ReplicationOperations.drain did not complete");
    }

    // After both metadata and replication
    Assert.assertTrue(done.get());
    Assert.assertFalse(exception.get());
  }
  @Test
  public void laterCreatedLogsDontBlockExecution() throws Exception {
    Connector conn = inst.getConnector("root", new PasswordToken(""));
    conn.tableOperations().create("foo");

    Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo"));

    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat =
        Status.newBuilder()
            .setBegin(0)
            .setEnd(10000)
            .setInfiniteEnd(false)
            .setClosed(false)
            .build();

    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    bw.close();

    System.out.println("Reading metadata first time");
    for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
      System.out.println(e.getKey());
    }

    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    ClientContext context =
        new ClientContext(
            inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
    final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context);
    Thread t =
        new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  roi.drain("foo");
                } catch (Exception e) {
                  log.error("Got error", e);
                  exception.set(true);
                }
                done.set(true);
              }
            });

    t.start();

    // We need to wait long enough for the table to read once
    Thread.sleep(2000);

    // Write another file, but also delete the old files
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m =
        new Mutation(
            ReplicationSection.getRowPrefix() + "/accumulo/wals/tserver+port/" + UUID.randomUUID());
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, tableId1);
    bw.addMutation(m);
    bw.close();

    System.out.println("Reading metadata second time");
    for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
      System.out.println(e.getKey());
    }

    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, tableId1);
    bw.addMutation(m);
    bw.close();

    try {
      t.join(5000);
    } catch (InterruptedException e) {
      Assert.fail("ReplicationOperatiotns.drain did not complete");
    }

    // We should pass immediately because we aren't waiting on both files to be deleted (just the
    // one that we did)
    Assert.assertTrue(done.get());
  }
Exemplo n.º 17
0
  public static void deleteTable(
      String tableId, boolean insertDeletes, Credentials credentials, ZooLock lock)
      throws AccumuloException, IOException {
    Scanner ms =
        new ScannerImpl(
            HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
    Text tableIdText = new Text(tableId);
    BatchWriter bw =
        new BatchWriterImpl(
            HdfsZooInstance.getInstance(),
            credentials,
            MetadataTable.ID,
            new BatchWriterConfig()
                .setMaxMemory(1000000)
                .setMaxLatency(120000l, TimeUnit.MILLISECONDS)
                .setMaxWriteThreads(2));

    // scan metadata for our table and delete everything we find
    Mutation m = null;
    ms.setRange(new KeyExtent(tableIdText, null, null).toMetadataRange());

    // insert deletes before deleting data from metadata... this makes the code fault tolerant
    if (insertDeletes) {

      ms.fetchColumnFamily(DataFileColumnFamily.NAME);
      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);

      for (Entry<Key, Value> cell : ms) {
        Key key = cell.getKey();

        if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
          FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
          bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
        }

        if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
          bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
        }
      }

      bw.flush();

      ms.clearColumns();
    }

    for (Entry<Key, Value> cell : ms) {
      Key key = cell.getKey();

      if (m == null) {
        m = new Mutation(key.getRow());
        if (lock != null) putLockID(lock, m);
      }

      if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
        bw.addMutation(m);
        m = new Mutation(key.getRow());
        if (lock != null) putLockID(lock, m);
      }
      m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
    }

    if (m != null) bw.addMutation(m);

    bw.close();
  }
Exemplo n.º 18
0
  static int checkClone(String srcTableId, String tableId, Connector conn, BatchWriter bw)
      throws TableNotFoundException, MutationsRejectedException {
    TabletIterator srcIter =
        new TabletIterator(
            createCloneScanner(srcTableId, conn),
            new KeyExtent(new Text(srcTableId), null, null).toMetadataRange(),
            true,
            true);
    TabletIterator cloneIter =
        new TabletIterator(
            createCloneScanner(tableId, conn),
            new KeyExtent(new Text(tableId), null, null).toMetadataRange(),
            true,
            true);

    if (!cloneIter.hasNext() || !srcIter.hasNext())
      throw new RuntimeException(
          " table deleted during clone?  srcTableId = " + srcTableId + " tableId=" + tableId);

    int rewrites = 0;

    while (cloneIter.hasNext()) {
      Map<Key, Value> cloneTablet = cloneIter.next();
      Text cloneEndRow =
          new KeyExtent(cloneTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow();
      HashSet<String> cloneFiles = new HashSet<String>();

      boolean cloneSuccessful = false;
      for (Entry<Key, Value> entry : cloneTablet.entrySet()) {
        if (entry.getKey().getColumnFamily().equals(ClonedColumnFamily.NAME)) {
          cloneSuccessful = true;
          break;
        }
      }

      if (!cloneSuccessful) getFiles(cloneFiles, cloneTablet, null);

      List<Map<Key, Value>> srcTablets = new ArrayList<Map<Key, Value>>();
      Map<Key, Value> srcTablet = srcIter.next();
      srcTablets.add(srcTablet);

      Text srcEndRow =
          new KeyExtent(srcTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow();

      int cmp = compareEndRows(cloneEndRow, srcEndRow);
      if (cmp < 0)
        throw new TabletIterator.TabletDeletedException(
            "Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow);

      HashSet<String> srcFiles = new HashSet<String>();
      if (!cloneSuccessful) getFiles(srcFiles, srcTablet, srcTableId);

      while (cmp > 0) {
        srcTablet = srcIter.next();
        srcTablets.add(srcTablet);
        srcEndRow =
            new KeyExtent(srcTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow();
        cmp = compareEndRows(cloneEndRow, srcEndRow);
        if (cmp < 0)
          throw new TabletIterator.TabletDeletedException(
              "Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow);

        if (!cloneSuccessful) getFiles(srcFiles, srcTablet, srcTableId);
      }

      if (cloneSuccessful) continue;

      if (!srcFiles.containsAll(cloneFiles)) {
        // delete existing cloned tablet entry
        Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow());

        for (Entry<Key, Value> entry : cloneTablet.entrySet()) {
          Key k = entry.getKey();
          m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), k.getTimestamp());
        }

        bw.addMutation(m);

        for (Map<Key, Value> st : srcTablets)
          bw.addMutation(createCloneMutation(srcTableId, tableId, st));

        rewrites++;
      } else {
        // write out marker that this tablet was successfully cloned
        Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow());
        m.put(ClonedColumnFamily.NAME, new Text(""), new Value("OK".getBytes()));
        bw.addMutation(m);
      }
    }

    bw.flush();
    return rewrites;
  }
Exemplo n.º 19
0
  public static void cloneTable(
      Instance instance, String srcTableId, String tableId, VolumeManager volumeManager)
      throws Exception {

    Connector conn =
        instance.getConnector(
            SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());

    while (true) {

      try {
        initializeClone(srcTableId, tableId, conn, bw);

        // the following loop looks changes in the file that occurred during the copy.. if files
        // were dereferenced then they could have been GCed

        while (true) {
          int rewrites = checkClone(srcTableId, tableId, conn, bw);

          if (rewrites == 0) break;
        }

        bw.flush();
        break;

      } catch (TabletIterator.TabletDeletedException tde) {
        // tablets were merged in the src table
        bw.flush();

        // delete what we have cloned and try again
        deleteTable(tableId, false, SystemCredentials.get(), null);

        log.debug(
            "Tablets merged in table " + srcTableId + " while attempting to clone, trying again");

        UtilWaitThread.sleep(100);
      }
    }

    // delete the clone markers and create directory entries
    Scanner mscanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
    mscanner.fetchColumnFamily(ClonedColumnFamily.NAME);

    int dirCount = 0;

    for (Entry<Key, Value> entry : mscanner) {
      Key k = entry.getKey();
      Mutation m = new Mutation(k.getRow());
      m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
      String dir =
          volumeManager.choose(ServerConstants.getTablesDirs())
              + "/"
              + tableId
              + new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes()));
      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(dir.getBytes()));
      bw.addMutation(m);
    }

    bw.close();
  }