Ejemplo n.º 1
0
 /**
  * Set the table's replication switch if the table's replication switch is already not set.
  *
  * @param tableName name of the table
  * @param isRepEnabled is replication switch enable or disable
  * @throws IOException if a remote or network exception occurs
  */
 private void setTableRep(final TableName tableName, boolean isRepEnabled) throws IOException {
   Admin admin = null;
   try {
     admin = this.connection.getAdmin();
     HTableDescriptor htd = admin.getTableDescriptor(tableName);
     if (isTableRepEnabled(htd) ^ isRepEnabled) {
       boolean isOnlineSchemaUpdateEnabled =
           this.connection
               .getConfiguration()
               .getBoolean("hbase.online.schema.update.enable", true);
       if (!isOnlineSchemaUpdateEnabled) {
         admin.disableTable(tableName);
       }
       for (HColumnDescriptor hcd : htd.getFamilies()) {
         hcd.setScope(
             isRepEnabled
                 ? HConstants.REPLICATION_SCOPE_GLOBAL
                 : HConstants.REPLICATION_SCOPE_LOCAL);
       }
       admin.modifyTable(tableName, htd);
       if (!isOnlineSchemaUpdateEnabled) {
         admin.enableTable(tableName);
       }
     }
   } finally {
     if (admin != null) {
       try {
         admin.close();
       } catch (IOException e) {
         LOG.warn("Failed to close admin connection.");
         LOG.debug("Details on failure to close admin connection.", e);
       }
     }
   }
 }
Ejemplo n.º 2
0
 /**
  * Serialize column family to data block encoding map to configuration. Invoked while configuring
  * the MR job for incremental load.
  *
  * @param tableDescriptor to read the properties from
  * @param conf to persist serialized values into
  * @throws IOException on failure to read column family descriptors
  */
 @VisibleForTesting
 static void configureDataBlockEncoding(HTableDescriptor tableDescriptor, Configuration conf)
     throws UnsupportedEncodingException {
   if (tableDescriptor == null) {
     // could happen with mock table instance
     return;
   }
   StringBuilder dataBlockEncodingConfigValue = new StringBuilder();
   Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
   int i = 0;
   for (HColumnDescriptor familyDescriptor : families) {
     if (i++ > 0) {
       dataBlockEncodingConfigValue.append('&');
     }
     dataBlockEncodingConfigValue.append(
         URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
     dataBlockEncodingConfigValue.append('=');
     DataBlockEncoding encoding = familyDescriptor.getDataBlockEncoding();
     if (encoding == null) {
       encoding = DataBlockEncoding.NONE;
     }
     dataBlockEncodingConfigValue.append(URLEncoder.encode(encoding.toString(), "UTF-8"));
   }
   conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY, dataBlockEncodingConfigValue.toString());
 }
Ejemplo n.º 3
0
 /**
  * @param htd table descriptor details for the table to check
  * @return true if table's replication switch is enabled
  */
 private boolean isTableRepEnabled(HTableDescriptor htd) {
   for (HColumnDescriptor hcd : htd.getFamilies()) {
     if (hcd.getScope() != HConstants.REPLICATION_SCOPE_GLOBAL) {
       return false;
     }
   }
   return true;
 }
Ejemplo n.º 4
0
 /**
  * Check whether region has Reference file
  *
  * @param htd table desciptor of the region
  * @return true if region has reference file
  * @throws IOException
  */
 public boolean hasReferences(final HTableDescriptor htd) throws IOException {
   for (HColumnDescriptor family : htd.getFamilies()) {
     if (hasReferences(family.getNameAsString())) {
       return true;
     }
   }
   return false;
 }
Ejemplo n.º 5
0
  private void populateMappingComboAndFamilyStuff() {
    String tableName = "";
    if (!Const.isEmpty(m_existingTableNamesCombo.getText().trim())) {
      tableName = m_existingTableNamesCombo.getText().trim();

      if (tableName.indexOf('@') > 0) {
        tableName = tableName.substring(0, tableName.indexOf('@'));
      }
    }

    // defaults if we fail to connect, table doesn't exist etc..
    m_familyCI.setComboValues(new String[] {""});
    m_existingMappingNamesCombo.removeAll();

    if (m_admin != null && !Const.isEmpty(tableName)) {
      try {

        // first get the existing mapping names (if any)
        List<String> mappingNames = m_admin.getMappingNames(tableName);
        for (String m : mappingNames) {
          m_existingMappingNamesCombo.add(m);
        }

        // now get family information for this table
        Configuration conf = m_admin.getConnection();
        HBaseAdmin admin = new HBaseAdmin(conf);

        if (admin.tableExists(tableName)) {
          HTableDescriptor descriptor = admin.getTableDescriptor(Bytes.toBytes(tableName));

          Collection<HColumnDescriptor> families = descriptor.getFamilies();
          String[] familyNames = new String[families.size()];
          int i = 0;
          for (HColumnDescriptor d : families) {
            familyNames[i++] = d.getNameAsString();
          }

          m_familyCI.setComboValues(familyNames);
        } else {
          m_familyCI.setComboValues(new String[] {""});
        }

        m_familiesInvalidated = false;
        return;

      } catch (Exception e) {
        // TODO popup error dialog
        e.printStackTrace();
      }
    }
  }
Ejemplo n.º 6
0
 /**
  * Serialize column family to block size map to configuration. Invoked while configuring the MR
  * job for incremental load.
  *
  * @param tableDescriptor to read the properties from
  * @param conf to persist serialized values into
  * @throws IOException on failure to read column family descriptors
  */
 @VisibleForTesting
 static void configureBlockSize(HTableDescriptor tableDescriptor, Configuration conf)
     throws UnsupportedEncodingException {
   StringBuilder blockSizeConfigValue = new StringBuilder();
   if (tableDescriptor == null) {
     // could happen with mock table instance
     return;
   }
   Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
   int i = 0;
   for (HColumnDescriptor familyDescriptor : families) {
     if (i++ > 0) {
       blockSizeConfigValue.append('&');
     }
     blockSizeConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
     blockSizeConfigValue.append('=');
     blockSizeConfigValue.append(
         URLEncoder.encode(String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
   }
   // Get rid of the last ampersand
   conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
 }
Ejemplo n.º 7
0
 /**
  * Serialize column family to bloom type map to configuration. Invoked while configuring the MR
  * job for incremental load.
  *
  * @param tableDescriptor to read the properties from
  * @param conf to persist serialized values into
  * @throws IOException on failure to read column family descriptors
  */
 @VisibleForTesting
 static void configureBloomType(HTableDescriptor tableDescriptor, Configuration conf)
     throws UnsupportedEncodingException {
   if (tableDescriptor == null) {
     // could happen with mock table instance
     return;
   }
   StringBuilder bloomTypeConfigValue = new StringBuilder();
   Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
   int i = 0;
   for (HColumnDescriptor familyDescriptor : families) {
     if (i++ > 0) {
       bloomTypeConfigValue.append('&');
     }
     bloomTypeConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
     bloomTypeConfigValue.append('=');
     String bloomType = familyDescriptor.getBloomFilterType().toString();
     if (bloomType == null) {
       bloomType = HColumnDescriptor.DEFAULT_BLOOMFILTER;
     }
     bloomTypeConfigValue.append(URLEncoder.encode(bloomType, "UTF-8"));
   }
   conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, bloomTypeConfigValue.toString());
 }
Ejemplo n.º 8
0
 /**
  * Serialize column family to compression algorithm map to configuration. Invoked while
  * configuring the MR job for incremental load.
  *
  * @param tableDescriptor to read the properties from
  * @param conf to persist serialized values into
  * @throws IOException on failure to read column family descriptors
  */
 @edu.umd.cs.findbugs.annotations.SuppressWarnings(
     value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")
 @VisibleForTesting
 static void configureCompression(Configuration conf, HTableDescriptor tableDescriptor)
     throws UnsupportedEncodingException {
   StringBuilder compressionConfigValue = new StringBuilder();
   if (tableDescriptor == null) {
     // could happen with mock table instance
     return;
   }
   Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
   int i = 0;
   for (HColumnDescriptor familyDescriptor : families) {
     if (i++ > 0) {
       compressionConfigValue.append('&');
     }
     compressionConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
     compressionConfigValue.append('=');
     compressionConfigValue.append(
         URLEncoder.encode(familyDescriptor.getCompressionType().getName(), "UTF-8"));
   }
   // Get rid of the last ampersand
   conf.set(COMPRESSION_FAMILIES_CONF_KEY, compressionConfigValue.toString());
 }
Ejemplo n.º 9
0
  /**
   * Test that {@link HFileOutputFormat2} RecordWriter uses compression and bloom filter settings
   * from the column family descriptor
   */
  @Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
  @Test
  public void testColumnFamilySettings() throws Exception {
    Configuration conf = new Configuration(this.util.getConfiguration());
    RecordWriter<ImmutableBytesWritable, Cell> writer = null;
    TaskAttemptContext context = null;
    Path dir = util.getDataTestDir("testColumnFamilySettings");

    // Setup table descriptor
    Table table = Mockito.mock(Table.class);
    RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
    HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
    Mockito.doReturn(htd).when(table).getTableDescriptor();
    for (HColumnDescriptor hcd : HBaseTestingUtility.generateColumnDescriptors()) {
      htd.addFamily(hcd);
    }

    // set up the table to return some mock keys
    setupMockStartKeys(regionLocator);

    try {
      // partial map red setup to get an operational writer for testing
      // We turn off the sequence file compression, because DefaultCodec
      // pollutes the GZip codec pool with an incompatible compressor.
      conf.set("io.seqfile.compression.type", "NONE");
      conf.set("hbase.fs.tmp.dir", dir.toString());
      // turn locality off to eliminate getRegionLocation fail-and-retry time when writing kvs
      conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, false);

      Job job = new Job(conf, "testLocalMRIncrementalLoad");
      job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings"));
      setupRandomGeneratorMapper(job);
      HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
      FileOutputFormat.setOutputPath(job, dir);
      context = createTestTaskAttemptContext(job);
      HFileOutputFormat2 hof = new HFileOutputFormat2();
      writer = hof.getRecordWriter(context);

      // write out random rows
      writeRandomKeyValues(writer, context, htd.getFamiliesKeys(), ROWSPERSPLIT);
      writer.close(context);

      // Make sure that a directory was created for every CF
      FileSystem fs = dir.getFileSystem(conf);

      // commit so that the filesystem has one directory per column family
      hof.getOutputCommitter(context).commitTask(context);
      hof.getOutputCommitter(context).commitJob(context);
      FileStatus[] families = FSUtils.listStatus(fs, dir, new FSUtils.FamilyDirFilter(fs));
      assertEquals(htd.getFamilies().size(), families.length);
      for (FileStatus f : families) {
        String familyStr = f.getPath().getName();
        HColumnDescriptor hcd = htd.getFamily(Bytes.toBytes(familyStr));
        // verify that the compression on this file matches the configured
        // compression
        Path dataFilePath = fs.listStatus(f.getPath())[0].getPath();
        Reader reader = HFile.createReader(fs, dataFilePath, new CacheConfig(conf), conf);
        Map<byte[], byte[]> fileInfo = reader.loadFileInfo();

        byte[] bloomFilter = fileInfo.get(StoreFile.BLOOM_FILTER_TYPE_KEY);
        if (bloomFilter == null) bloomFilter = Bytes.toBytes("NONE");
        assertEquals(
            "Incorrect bloom filter used for column family "
                + familyStr
                + "(reader: "
                + reader
                + ")",
            hcd.getBloomFilterType(),
            BloomType.valueOf(Bytes.toString(bloomFilter)));
        assertEquals(
            "Incorrect compression used for column family "
                + familyStr
                + "(reader: "
                + reader
                + ")",
            hcd.getCompressionType(),
            reader.getFileContext().getCompression());
      }
    } finally {
      dir.getFileSystem(conf).delete(dir, true);
    }
  }
Ejemplo n.º 10
0
  /**
   * Verify schema modification takes.
   *
   * @throws IOException
   * @throws InterruptedException
   */
  @Test(timeout = 300000)
  public void testOnlineChangeTableSchema() throws IOException, InterruptedException {
    final TableName tableName = TableName.valueOf("changeTableSchemaOnline");
    TEST_UTIL
        .getMiniHBaseCluster()
        .getMaster()
        .getConfiguration()
        .setBoolean("hbase.online.schema.update.enable", true);
    HTableDescriptor[] tables = admin.listTables();
    int numTables = tables.length;
    TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
    tables = this.admin.listTables();
    assertEquals(numTables + 1, tables.length);

    // FIRST, do htabledescriptor changes.
    HTableDescriptor htd = this.admin.getTableDescriptor(tableName);
    // Make a copy and assert copy is good.
    HTableDescriptor copy = new HTableDescriptor(htd);
    assertTrue(htd.equals(copy));
    // Now amend the copy. Introduce differences.
    long newFlushSize = htd.getMemStoreFlushSize() / 2;
    if (newFlushSize <= 0) {
      newFlushSize = HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2;
    }
    copy.setMemStoreFlushSize(newFlushSize);
    final String key = "anyoldkey";
    assertTrue(htd.getValue(key) == null);
    copy.setValue(key, key);
    boolean expectedException = false;
    try {
      admin.modifyTable(tableName, copy);
    } catch (TableNotDisabledException re) {
      expectedException = true;
    }
    assertFalse(expectedException);
    HTableDescriptor modifiedHtd = this.admin.getTableDescriptor(tableName);
    assertFalse(htd.equals(modifiedHtd));
    assertTrue(copy.equals(modifiedHtd));
    assertEquals(newFlushSize, modifiedHtd.getMemStoreFlushSize());
    assertEquals(key, modifiedHtd.getValue(key));

    // Now work on column family changes.
    int countOfFamilies = modifiedHtd.getFamilies().size();
    assertTrue(countOfFamilies > 0);
    HColumnDescriptor hcd = modifiedHtd.getFamilies().iterator().next();
    int maxversions = hcd.getMaxVersions();
    final int newMaxVersions = maxversions + 1;
    hcd.setMaxVersions(newMaxVersions);
    final byte[] hcdName = hcd.getName();
    expectedException = false;
    try {
      this.admin.modifyColumn(tableName, hcd);
    } catch (TableNotDisabledException re) {
      expectedException = true;
    }
    assertFalse(expectedException);
    modifiedHtd = this.admin.getTableDescriptor(tableName);
    HColumnDescriptor modifiedHcd = modifiedHtd.getFamily(hcdName);
    assertEquals(newMaxVersions, modifiedHcd.getMaxVersions());

    // Try adding a column
    assertFalse(this.admin.isTableDisabled(tableName));
    final String xtracolName = "xtracol";
    HColumnDescriptor xtracol = new HColumnDescriptor(xtracolName);
    xtracol.setValue(xtracolName, xtracolName);
    expectedException = false;
    try {
      this.admin.addColumn(tableName, xtracol);
    } catch (TableNotDisabledException re) {
      expectedException = true;
    }
    // Add column should work even if the table is enabled
    assertFalse(expectedException);
    modifiedHtd = this.admin.getTableDescriptor(tableName);
    hcd = modifiedHtd.getFamily(xtracol.getName());
    assertTrue(hcd != null);
    assertTrue(hcd.getValue(xtracolName).equals(xtracolName));

    // Delete the just-added column.
    this.admin.deleteColumn(tableName, xtracol.getName());
    modifiedHtd = this.admin.getTableDescriptor(tableName);
    hcd = modifiedHtd.getFamily(xtracol.getName());
    assertTrue(hcd == null);

    // Delete the table
    this.admin.disableTable(tableName);
    this.admin.deleteTable(tableName);
    this.admin.listTables();
    assertFalse(this.admin.tableExists(tableName));
  }
Ejemplo n.º 11
0
  @Test
  public void testProperties() throws Exception {
    Connection conn = DriverManager.getConnection(getUrl());
    conn.createStatement()
        .execute(
            "CREATE TABLE NON_TX_TABLE1(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR) TTL=1000");
    conn.createStatement().execute("CREATE INDEX idx1 ON NON_TX_TABLE1(a.v, b.v) TTL=1000");
    conn.createStatement()
        .execute("CREATE INDEX idx2 ON NON_TX_TABLE1(c.v) INCLUDE (a.v, b.v) TTL=1000");

    conn.createStatement().execute("ALTER TABLE NON_TX_TABLE1 SET TRANSACTIONAL=true");

    HTableDescriptor desc =
        conn.unwrap(PhoenixConnection.class)
            .getQueryServices()
            .getTableDescriptor(Bytes.toBytes("NON_TX_TABLE1"));
    for (HColumnDescriptor colDesc : desc.getFamilies()) {
      assertEquals(
          QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions());
      assertEquals(1000, colDesc.getTimeToLive());
      assertEquals(1000, Integer.parseInt(colDesc.getValue(TxConstants.PROPERTY_TTL)));
    }

    desc =
        conn.unwrap(PhoenixConnection.class)
            .getQueryServices()
            .getTableDescriptor(Bytes.toBytes("IDX1"));
    for (HColumnDescriptor colDesc : desc.getFamilies()) {
      assertEquals(
          QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions());
      assertEquals(1000, colDesc.getTimeToLive());
      assertEquals(1000, Integer.parseInt(colDesc.getValue(TxConstants.PROPERTY_TTL)));
    }

    desc =
        conn.unwrap(PhoenixConnection.class)
            .getQueryServices()
            .getTableDescriptor(Bytes.toBytes("IDX2"));
    for (HColumnDescriptor colDesc : desc.getFamilies()) {
      assertEquals(
          QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions());
      assertEquals(1000, colDesc.getTimeToLive());
      assertEquals(1000, Integer.parseInt(colDesc.getValue(TxConstants.PROPERTY_TTL)));
    }

    conn.createStatement()
        .execute(
            "CREATE TABLE NON_TX_TABLE2(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)");
    conn.createStatement().execute("ALTER TABLE NON_TX_TABLE2 SET TRANSACTIONAL=true, VERSIONS=10");
    desc =
        conn.unwrap(PhoenixConnection.class)
            .getQueryServices()
            .getTableDescriptor(Bytes.toBytes("NON_TX_TABLE2"));
    for (HColumnDescriptor colDesc : desc.getFamilies()) {
      assertEquals(10, colDesc.getMaxVersions());
      assertEquals(HColumnDescriptor.DEFAULT_TTL, colDesc.getTimeToLive());
      assertEquals(null, colDesc.getValue(TxConstants.PROPERTY_TTL));
    }
    conn.createStatement().execute("ALTER TABLE NON_TX_TABLE2 SET TTL=1000");
    desc =
        conn.unwrap(PhoenixConnection.class)
            .getQueryServices()
            .getTableDescriptor(Bytes.toBytes("NON_TX_TABLE2"));
    for (HColumnDescriptor colDesc : desc.getFamilies()) {
      assertEquals(10, colDesc.getMaxVersions());
      assertEquals(1000, colDesc.getTimeToLive());
      assertEquals(1000, Integer.parseInt(colDesc.getValue(TxConstants.PROPERTY_TTL)));
    }

    conn.createStatement()
        .execute(
            "CREATE TABLE NON_TX_TABLE3(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)");
    conn.createStatement()
        .execute("ALTER TABLE NON_TX_TABLE3 SET TRANSACTIONAL=true, b.VERSIONS=10, c.VERSIONS=20");
    desc =
        conn.unwrap(PhoenixConnection.class)
            .getQueryServices()
            .getTableDescriptor(Bytes.toBytes("NON_TX_TABLE3"));
    assertEquals(
        QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL,
        desc.getFamily(Bytes.toBytes("A")).getMaxVersions());
    assertEquals(10, desc.getFamily(Bytes.toBytes("B")).getMaxVersions());
    assertEquals(20, desc.getFamily(Bytes.toBytes("C")).getMaxVersions());

    conn.createStatement()
        .execute(
            "CREATE TABLE NON_TX_TABLE4(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)");
    try {
      conn.createStatement()
          .execute("ALTER TABLE NON_TX_TABLE4 SET TRANSACTIONAL=true, VERSIONS=1");
      fail();
    } catch (SQLException e) {
      assertEquals(
          SQLExceptionCode.TX_MAX_VERSIONS_MUST_BE_GREATER_THAN_ONE.getErrorCode(),
          e.getErrorCode());
    }

    try {
      conn.createStatement()
          .execute("ALTER TABLE NON_TX_TABLE4 SET TRANSACTIONAL=true, b.VERSIONS=1");
      fail();
    } catch (SQLException e) {
      assertEquals(
          SQLExceptionCode.TX_MAX_VERSIONS_MUST_BE_GREATER_THAN_ONE.getErrorCode(),
          e.getErrorCode());
    }

    conn.createStatement()
        .execute(
            "CREATE TABLE TX_TABLE1(k INTEGER PRIMARY KEY, v VARCHAR) TTL=1000, TRANSACTIONAL=true");
    desc =
        conn.unwrap(PhoenixConnection.class)
            .getQueryServices()
            .getTableDescriptor(Bytes.toBytes("TX_TABLE1"));
    for (HColumnDescriptor colDesc : desc.getFamilies()) {
      assertEquals(
          QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions());
      assertEquals(HColumnDescriptor.DEFAULT_TTL, colDesc.getTimeToLive());
      assertEquals(1000, Integer.parseInt(colDesc.getValue(TxConstants.PROPERTY_TTL)));
    }
  }
Ejemplo n.º 12
0
  /** {@inheritDoc} */
  @Override
  public KijiTableLayout modifyTableLayout(
      TableLayoutDesc update, boolean dryRun, PrintStream printStream) throws IOException {
    final State state = mState.get();
    Preconditions.checkState(
        state == State.OPEN,
        "Cannot modify table layout in Kiji instance %s in state %s.",
        this,
        state);
    Preconditions.checkNotNull(update);

    ensureValidationCompatibility(update);

    if (dryRun && (null == printStream)) {
      printStream = System.out;
    }

    final KijiMetaTable metaTable = getMetaTable();

    final String tableName = update.getName();
    // Throws a KijiTableNotFoundException if there is no table.
    metaTable.getTableLayout(tableName);

    final KijiURI tableURI = KijiURI.newBuilder(mURI).withTableName(tableName).build();
    LOG.debug("Applying layout update {} on table {}", update, tableURI);

    KijiTableLayout newLayout = null;

    if (dryRun) {
      // Process column ids and perform validation, but don't actually update the meta table.
      final List<KijiTableLayout> layouts = metaTable.getTableLayoutVersions(tableName, 1);
      final KijiTableLayout currentLayout = layouts.isEmpty() ? null : layouts.get(0);
      newLayout = KijiTableLayout.createUpdatedLayout(update, currentLayout);
    } else {
      // Actually set it.
      if (mSystemVersion.compareTo(Versions.SYSTEM_2_0) >= 0) {
        try {
          // Use ZooKeeper to inform all watchers that a new table layout is available.
          final HBaseTableLayoutUpdater updater =
              new HBaseTableLayoutUpdater(this, tableURI, update);
          try {
            updater.update();
            newLayout = updater.getNewLayout();
          } finally {
            updater.close();
          }
        } catch (KeeperException ke) {
          throw new IOException(ke);
        }
      } else {
        // System versions before system-2.0 do not enforce table layout update consistency or
        // validation.
        newLayout = metaTable.updateTableLayout(tableName, update);
      }
    }
    Preconditions.checkState(newLayout != null);

    if (dryRun) {
      printStream.println("This table layout is valid.");
    }

    LOG.debug("Computing new HBase schema");
    final HTableSchemaTranslator translator = new HTableSchemaTranslator();
    final HTableDescriptor newTableDescriptor =
        translator.toHTableDescriptor(mURI.getInstance(), newLayout);

    LOG.debug("Reading existing HBase schema");
    final KijiManagedHBaseTableName hbaseTableName =
        KijiManagedHBaseTableName.getKijiTableName(mURI.getInstance(), tableName);
    HTableDescriptor currentTableDescriptor = null;
    byte[] tableNameAsBytes = hbaseTableName.toBytes();
    try {
      currentTableDescriptor = getHBaseAdmin().getTableDescriptor(tableNameAsBytes);
    } catch (TableNotFoundException tnfe) {
      if (!dryRun) {
        throw tnfe; // Not in dry-run mode; table needs to exist. Rethrow exception.
      }
    }
    if (currentTableDescriptor == null) {
      if (dryRun) {
        printStream.println("Would create new table: " + tableName);
        currentTableDescriptor =
            HTableDescriptorComparator.makeEmptyTableDescriptor(hbaseTableName);
      } else {
        throw new RuntimeException(
            "Table " + hbaseTableName.getKijiTableName() + " does not exist");
      }
    }
    LOG.debug("Existing table descriptor: {}", currentTableDescriptor);
    LOG.debug("New table descriptor: {}", newTableDescriptor);

    LOG.debug("Checking for differences between the new HBase schema and the existing one");
    final HTableDescriptorComparator comparator = new HTableDescriptorComparator();
    if (0 == comparator.compare(currentTableDescriptor, newTableDescriptor)) {
      LOG.debug("HBase schemas are the same.  No need to change HBase schema");
      if (dryRun) {
        printStream.println("This layout does not require any physical table schema changes.");
      }
    } else {
      LOG.debug("HBase schema must be changed, but no columns will be deleted");

      if (dryRun) {
        printStream.println("Changes caused by this table layout:");
      } else {
        LOG.debug("Disabling HBase table");
        getHBaseAdmin().disableTable(hbaseTableName.toString());
      }

      for (HColumnDescriptor newColumnDescriptor : newTableDescriptor.getFamilies()) {
        final String columnName = Bytes.toString(newColumnDescriptor.getName());
        final ColumnId columnId = ColumnId.fromString(columnName);
        final String lgName = newLayout.getLocalityGroupIdNameMap().get(columnId);
        final HColumnDescriptor currentColumnDescriptor =
            currentTableDescriptor.getFamily(newColumnDescriptor.getName());
        if (null == currentColumnDescriptor) {
          if (dryRun) {
            printStream.println("  Creating new locality group: " + lgName);
          } else {
            LOG.debug("Creating new column " + columnName);
            getHBaseAdmin().addColumn(hbaseTableName.toString(), newColumnDescriptor);
          }
        } else if (!newColumnDescriptor.equals(currentColumnDescriptor)) {
          if (dryRun) {
            printStream.println("  Modifying locality group: " + lgName);
          } else {
            LOG.debug("Modifying column " + columnName);
            getHBaseAdmin().modifyColumn(hbaseTableName.toString(), newColumnDescriptor);
          }
        } else {
          LOG.debug("No changes needed for column " + columnName);
        }
      }

      if (dryRun) {
        if (newTableDescriptor.getMaxFileSize() != currentTableDescriptor.getMaxFileSize()) {
          printStream.printf(
              "  Changing max_filesize from %d to %d: %n",
              currentTableDescriptor.getMaxFileSize(), newTableDescriptor.getMaxFileSize());
        }
        if (newTableDescriptor.getMaxFileSize() != currentTableDescriptor.getMaxFileSize()) {
          printStream.printf(
              "  Changing memstore_flushsize from %d to %d: %n",
              currentTableDescriptor.getMemStoreFlushSize(),
              newTableDescriptor.getMemStoreFlushSize());
        }
      } else {
        LOG.debug("Modifying table descriptor");
        getHBaseAdmin().modifyTable(tableNameAsBytes, newTableDescriptor);
      }

      if (!dryRun) {
        LOG.debug("Re-enabling HBase table");
        getHBaseAdmin().enableTable(hbaseTableName.toString());
      }
    }

    return newLayout;
  }
Ejemplo n.º 13
0
  /**
   * Verify schema modification takes.
   *
   * @throws IOException
   */
  @Test
  public void testChangeTableSchema() throws IOException {
    final byte[] tableName = Bytes.toBytes("changeTableSchema");
    HTableDescriptor[] tables = admin.listTables();
    int numTables = tables.length;
    TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
    tables = this.admin.listTables();
    assertEquals(numTables + 1, tables.length);

    // FIRST, do htabledescriptor changes.
    HTableDescriptor htd = this.admin.getTableDescriptor(tableName);
    // Make a copy and assert copy is good.
    HTableDescriptor copy = new HTableDescriptor(htd);
    assertTrue(htd.equals(copy));
    // Now amend the copy. Introduce differences.
    long newFlushSize = htd.getMemStoreFlushSize() / 2;
    copy.setMemStoreFlushSize(newFlushSize);
    final String key = "anyoldkey";
    assertTrue(htd.getValue(key) == null);
    copy.setValue(key, key);
    boolean expectedException = false;
    try {
      this.admin.modifyTable(tableName, copy);
    } catch (TableNotDisabledException re) {
      expectedException = true;
    }
    assertTrue(expectedException);
    this.admin.disableTable(tableName);
    assertTrue(this.admin.isTableDisabled(tableName));
    modifyTable(tableName, copy);
    HTableDescriptor modifiedHtd = this.admin.getTableDescriptor(tableName);
    // Assert returned modifiedhcd is same as the copy.
    assertFalse(htd.equals(modifiedHtd));
    assertTrue(copy.equals(modifiedHtd));
    assertEquals(newFlushSize, modifiedHtd.getMemStoreFlushSize());
    assertEquals(key, modifiedHtd.getValue(key));

    // Reenable table to test it fails if not disabled.
    this.admin.enableTable(tableName);
    assertFalse(this.admin.isTableDisabled(tableName));

    // Now work on column family changes.
    int countOfFamilies = modifiedHtd.getFamilies().size();
    assertTrue(countOfFamilies > 0);
    HColumnDescriptor hcd = modifiedHtd.getFamilies().iterator().next();
    int maxversions = hcd.getMaxVersions();
    final int newMaxVersions = maxversions + 1;
    hcd.setMaxVersions(newMaxVersions);
    final byte[] hcdName = hcd.getName();
    expectedException = false;
    try {
      this.admin.modifyColumn(tableName, hcd);
    } catch (TableNotDisabledException re) {
      expectedException = true;
    }
    assertTrue(expectedException);
    this.admin.disableTable(tableName);
    assertTrue(this.admin.isTableDisabled(tableName));
    // Modify Column is synchronous
    this.admin.modifyColumn(tableName, hcd);
    modifiedHtd = this.admin.getTableDescriptor(tableName);
    HColumnDescriptor modifiedHcd = modifiedHtd.getFamily(hcdName);
    assertEquals(newMaxVersions, modifiedHcd.getMaxVersions());

    // Try adding a column
    // Reenable table to test it fails if not disabled.
    this.admin.enableTable(tableName);
    assertFalse(this.admin.isTableDisabled(tableName));
    final String xtracolName = "xtracol";
    HColumnDescriptor xtracol = new HColumnDescriptor(xtracolName);
    xtracol.setValue(xtracolName, xtracolName);
    try {
      this.admin.addColumn(tableName, xtracol);
    } catch (TableNotDisabledException re) {
      expectedException = true;
    }
    assertTrue(expectedException);
    this.admin.disableTable(tableName);
    assertTrue(this.admin.isTableDisabled(tableName));
    this.admin.addColumn(tableName, xtracol);
    modifiedHtd = this.admin.getTableDescriptor(tableName);
    hcd = modifiedHtd.getFamily(xtracol.getName());
    assertTrue(hcd != null);
    assertTrue(hcd.getValue(xtracolName).equals(xtracolName));

    // Delete the just-added column.
    this.admin.deleteColumn(tableName, xtracol.getName());
    modifiedHtd = this.admin.getTableDescriptor(tableName);
    hcd = modifiedHtd.getFamily(xtracol.getName());
    assertTrue(hcd == null);

    // Delete the table
    this.admin.deleteTable(tableName);
    this.admin.listTables();
    assertFalse(this.admin.tableExists(tableName));
  }