コード例 #1
0
  /**
   * Inspect the log directory to recover any log file without
   * an active region server.
   */
  void splitLogAfterStartup() {
    boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors",
        HLog.SPLIT_SKIP_ERRORS_DEFAULT);
    Path logsDirPath = new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
    do {
      if (master.isStopped()) {
        LOG.warn("Master stopped while splitting logs");
        break;
      }
      List<ServerName> serverNames = new ArrayList<ServerName>();
      try {
        if (!this.fs.exists(logsDirPath)) return;
        FileStatus[] logFolders = FSUtils.listStatus(this.fs, logsDirPath, null);
        // Get online servers after getting log folders to avoid log folder deletion of newly
        // checked in region servers . see HBASE-5916
        Set<ServerName> onlineServers = ((HMaster) master).getServerManager().getOnlineServers()
            .keySet();

        if (logFolders == null || logFolders.length == 0) {
          LOG.debug("No log files to split, proceeding...");
          return;
        }
        for (FileStatus status : logFolders) {
          String sn = status.getPath().getName();
          // truncate splitting suffix if present (for ServerName parsing)
          if (sn.endsWith(HLog.SPLITTING_EXT)) {
            sn = sn.substring(0, sn.length() - HLog.SPLITTING_EXT.length());
          }
          ServerName serverName = ServerName.parseServerName(sn);
          if (!onlineServers.contains(serverName)) {
            LOG.info("Log folder " + status.getPath() + " doesn't belong "
                + "to a known region server, splitting");
            serverNames.add(serverName);
          } else {
            LOG.info("Log folder " + status.getPath()
                + " belongs to an existing region server");
          }
        }
        splitLog(serverNames);
        retrySplitting = false;
      } catch (IOException ioe) {
        LOG.warn("Failed splitting of " + serverNames, ioe);
        if (!checkFileSystem()) {
          LOG.warn("Bad Filesystem, exiting");
          Runtime.getRuntime().halt(1);
        }
        try {
          if (retrySplitting) {
            Thread.sleep(conf.getInt(
              "hbase.hlog.split.failure.retry.interval", 30 * 1000));
          }
        } catch (InterruptedException e) {
          LOG.warn("Interrupted, aborting since cannot return w/o splitting");
          Thread.currentThread().interrupt();
          retrySplitting = false;
          Runtime.getRuntime().halt(1);
        }
      }
    } while (retrySplitting);
  }
コード例 #2
0
 /**
  * Get the list of all the region servers from the specified peer
  *
  * @param zkw zk connection to use
  * @return list of region server addresses or an empty list if the slave is unavailable
  */
 private static List<ServerName> fetchSlavesAddresses(ZooKeeperWatcher zkw)
     throws KeeperException {
   List<String> children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.rsZNode);
   if (children == null) {
     return Collections.emptyList();
   }
   List<ServerName> addresses = new ArrayList<ServerName>(children.size());
   for (String child : children) {
     addresses.add(ServerName.parseServerName(child));
   }
   return addresses;
 }
コード例 #3
0
/**
 * Implementation of ConnectionQueryServices used in testing where no connection to an hbase cluster
 * is necessary.
 *
 * @since 0.1
 */
public class ConnectionlessQueryServicesImpl extends DelegateQueryServices
    implements ConnectionQueryServices {
  private static ServerName SERVER_NAME =
      ServerName.parseServerName(
          HConstants.LOCALHOST
              + Addressing.HOSTNAME_PORT_SEPARATOR
              + HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT);

  private PMetaData metaData;
  private final Map<SequenceKey, SequenceInfo> sequenceMap = Maps.newHashMap();
  private final String userName;
  private final TransactionSystemClient txSystemClient;
  private KeyValueBuilder kvBuilder;
  private volatile boolean initialized;
  private volatile SQLException initializationException;
  private final Map<String, List<HRegionLocation>> tableSplits = Maps.newHashMap();

  public ConnectionlessQueryServicesImpl(
      QueryServices services, ConnectionInfo connInfo, Properties info) {
    super(services);
    userName = connInfo.getPrincipal();
    metaData = newEmptyMetaData();

    // Use KeyValueBuilder that builds real KeyValues, as our test utils require this
    this.kvBuilder = GenericKeyValueBuilder.INSTANCE;
    Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
    for (Entry<String, String> entry : services.getProps()) {
      config.set(entry.getKey(), entry.getValue());
    }
    if (info != null) {
      for (Object key : info.keySet()) {
        config.set((String) key, info.getProperty((String) key));
      }
    }
    for (Entry<String, String> entry : connInfo.asProps()) {
      config.set(entry.getKey(), entry.getValue());
    }

    // Without making a copy of the configuration we cons up, we lose some of our properties
    // on the server side during testing.
    config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(config);
    TransactionManager txnManager = new TransactionManager(config);
    this.txSystemClient = new InMemoryTxSystemClient(txnManager);
  }

  private PMetaData newEmptyMetaData() {
    long maxSizeBytes =
        getProps()
            .getLong(
                QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB,
                QueryServicesOptions.DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE);
    return new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, maxSizeBytes);
  }

  @Override
  public ConnectionQueryServices getChildQueryServices(ImmutableBytesWritable childId) {
    return this; // Just reuse the same query services
  }

  @Override
  public HTableInterface getTable(byte[] tableName) throws SQLException {
    throw new UnsupportedOperationException();
  }

  @Override
  public List<HRegionLocation> getAllTableRegions(byte[] tableName) throws SQLException {
    List<HRegionLocation> regions = tableSplits.get(Bytes.toString(tableName));
    if (regions != null) {
      return regions;
    }
    return Collections.singletonList(
        new HRegionLocation(
            new HRegionInfo(
                TableName.valueOf(tableName), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW),
            SERVER_NAME,
            -1));
  }

  @Override
  public PMetaData addTable(PTable table, long resolvedTime) throws SQLException {
    return metaData = metaData.addTable(table, resolvedTime);
  }

  @Override
  public PMetaData updateResolvedTimestamp(PTable table, long resolvedTimestamp)
      throws SQLException {
    return metaData = metaData.updateResolvedTimestamp(table, resolvedTimestamp);
  }

  @Override
  public PMetaData addColumn(
      PName tenantId,
      String tableName,
      List<PColumn> columns,
      long tableTimeStamp,
      long tableSeqNum,
      boolean isImmutableRows,
      boolean isWalDisabled,
      boolean isMultitenant,
      boolean storeNulls,
      boolean isTransactional,
      long updateCacheFrequency,
      boolean isNamespaceMapped,
      long resolvedTime)
      throws SQLException {
    return metaData =
        metaData.addColumn(
            tenantId,
            tableName,
            columns,
            tableTimeStamp,
            tableSeqNum,
            isImmutableRows,
            isWalDisabled,
            isMultitenant,
            storeNulls,
            isTransactional,
            updateCacheFrequency,
            isNamespaceMapped,
            resolvedTime);
  }

  @Override
  public PMetaData removeTable(
      PName tenantId, String tableName, String parentTableName, long tableTimeStamp)
      throws SQLException {
    return metaData = metaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
  }

  @Override
  public PMetaData removeColumn(
      PName tenantId,
      String tableName,
      List<PColumn> columnsToRemove,
      long tableTimeStamp,
      long tableSeqNum,
      long resolvedTime)
      throws SQLException {
    return metaData =
        metaData.removeColumn(
            tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime);
  }

  @Override
  public PhoenixConnection connect(String url, Properties info) throws SQLException {
    return new PhoenixConnection(this, url, info, metaData);
  }

  @Override
  public MetaDataMutationResult getTable(
      PName tenantId,
      byte[] schemaBytes,
      byte[] tableBytes,
      long tableTimestamp,
      long clientTimestamp)
      throws SQLException {
    // Return result that will cause client to use it's own metadata instead of needing
    // to get anything from the server (since we don't have a connection)
    try {
      String fullTableName = SchemaUtil.getTableName(schemaBytes, tableBytes);
      PTable table = metaData.getTableRef(new PTableKey(tenantId, fullTableName)).getTable();
      return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, table, true);
    } catch (TableNotFoundException e) {
      return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null);
    }
  }

  private static byte[] getTableName(List<Mutation> tableMetaData, byte[] physicalTableName) {
    if (physicalTableName != null) {
      return physicalTableName;
    }
    byte[][] rowKeyMetadata = new byte[3][];
    Mutation m = MetaDataUtil.getTableHeaderRow(tableMetaData);
    byte[] key = m.getRow();
    SchemaUtil.getVarChars(key, rowKeyMetadata);
    byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
    byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
    return SchemaUtil.getTableNameAsBytes(schemaBytes, tableBytes);
  }

  private static List<HRegionLocation> generateRegionLocations(
      byte[] physicalName, byte[][] splits) {
    byte[] startKey = HConstants.EMPTY_START_ROW;
    List<HRegionLocation> regions = Lists.newArrayListWithExpectedSize(splits.length);
    for (byte[] split : splits) {
      regions.add(
          new HRegionLocation(
              new HRegionInfo(TableName.valueOf(physicalName), startKey, split), SERVER_NAME, -1));
      startKey = split;
    }
    regions.add(
        new HRegionLocation(
            new HRegionInfo(TableName.valueOf(physicalName), startKey, HConstants.EMPTY_END_ROW),
            SERVER_NAME,
            -1));
    return regions;
  }

  @Override
  public MetaDataMutationResult createTable(
      List<Mutation> tableMetaData,
      byte[] physicalName,
      PTableType tableType,
      Map<String, Object> tableProps,
      List<Pair<byte[], Map<String, Object>>> families,
      byte[][] splits,
      boolean isNamespaceMapped)
      throws SQLException {
    if (splits != null) {
      byte[] tableName = getTableName(tableMetaData, physicalName);
      tableSplits.put(Bytes.toString(tableName), generateRegionLocations(tableName, splits));
    }
    return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null);
  }

  @Override
  public MetaDataMutationResult dropTable(
      List<Mutation> tableMetadata, PTableType tableType, boolean cascade) throws SQLException {
    byte[] tableName = getTableName(tableMetadata, null);
    tableSplits.remove(Bytes.toString(tableName));
    return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, null);
  }

  @Override
  public MetaDataMutationResult addColumn(
      List<Mutation> tableMetaData,
      PTable table,
      Map<String, List<Pair<String, Object>>> properties,
      Set<String> colFamiliesForPColumnsToBeAdded)
      throws SQLException {
    return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, null);
  }

  @Override
  public MetaDataMutationResult dropColumn(List<Mutation> tableMetadata, PTableType tableType)
      throws SQLException {
    return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, null);
  }

  @Override
  public void clearTableFromCache(
      byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS) throws SQLException {}
  // TODO: share this with ConnectionQueryServicesImpl
  @Override
  public void init(String url, Properties props) throws SQLException {
    if (initialized) {
      if (initializationException != null) {
        throw initializationException;
      }
      return;
    }
    synchronized (this) {
      if (initialized) {
        if (initializationException != null) {
          throw initializationException;
        }
        return;
      }
      SQLException sqlE = null;
      PhoenixConnection metaConnection = null;
      try {
        Properties scnProps = PropertiesUtil.deepCopy(props);
        scnProps.setProperty(
            PhoenixRuntime.CURRENT_SCN_ATTRIB,
            Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
        scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
        String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB);
        metaConnection = new PhoenixConnection(this, globalUrl, scnProps, newEmptyMetaData());
        try {
          metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_TABLE_METADATA);
        } catch (TableAlreadyExistsException ignore) {
          // Ignore, as this will happen if the SYSTEM.TABLE already exists at this fixed timestamp.
          // A TableAlreadyExistsException is not thrown, since the table only exists *after* this
          // fixed timestamp.
        }
        try {
          int nSaltBuckets = getSequenceSaltBuckets();
          String createTableStatement = Sequence.getCreateTableStatement(nSaltBuckets);
          metaConnection.createStatement().executeUpdate(createTableStatement);
        } catch (NewerTableAlreadyExistsException ignore) {
          // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed
          // timestamp.
          // A TableAlreadyExistsException is not thrown, since the table only exists *after* this
          // fixed timestamp.
        }
        try {
          metaConnection
              .createStatement()
              .executeUpdate(QueryConstants.CREATE_STATS_TABLE_METADATA);
        } catch (NewerTableAlreadyExistsException ignore) {
          // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed
          // timestamp.
          // A TableAlreadyExistsException is not thrown, since the table only exists *after* this
          // fixed timestamp.
        }

        try {
          metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_FUNCTION_METADATA);
        } catch (NewerTableAlreadyExistsException ignore) {
        }
      } catch (SQLException e) {
        sqlE = e;
      } finally {
        try {
          if (metaConnection != null) metaConnection.close();
        } catch (SQLException e) {
          if (sqlE != null) {
            sqlE.setNextException(e);
          } else {
            sqlE = e;
          }
        } finally {
          try {
            if (sqlE != null) {
              initializationException = sqlE;
              throw sqlE;
            }
          } finally {
            initialized = true;
          }
        }
      }
    }
  }

  @Override
  public MutationState updateData(MutationPlan plan) throws SQLException {
    return new MutationState(0, plan.getContext().getConnection());
  }

  @Override
  public int getLowestClusterHBaseVersion() {
    return Integer.MAX_VALUE; // Allow everything for connectionless
  }

  @Override
  public HBaseAdmin getAdmin() throws SQLException {
    throw new UnsupportedOperationException();
  }

  @Override
  public MetaDataMutationResult updateIndexState(
      List<Mutation> tableMetadata, String parentTableName) throws SQLException {
    byte[][] rowKeyMetadata = new byte[3][];
    SchemaUtil.getVarChars(tableMetadata.get(0).getRow(), rowKeyMetadata);
    Mutation m = MetaDataUtil.getTableHeaderRow(tableMetadata);
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    if (!MetaDataUtil.getMutationValue(m, INDEX_STATE_BYTES, kvBuilder, ptr)) {
      throw new IllegalStateException();
    }
    PIndexState newState = PIndexState.fromSerializedValue(ptr.get()[ptr.getOffset()]);
    byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    String schemaName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]);
    String indexName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
    String indexTableName = SchemaUtil.getTableName(schemaName, indexName);
    PName tenantId = tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes);
    PTable index = metaData.getTableRef(new PTableKey(tenantId, indexTableName)).getTable();
    index =
        PTableImpl.makePTable(
            index,
            newState == PIndexState.USABLE
                ? PIndexState.ACTIVE
                : newState == PIndexState.UNUSABLE ? PIndexState.INACTIVE : newState);
    return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, index);
  }

  @Override
  public HTableDescriptor getTableDescriptor(byte[] tableName) throws SQLException {
    return null;
  }

  @Override
  public void clearTableRegionCache(byte[] tableName) throws SQLException {}

  @Override
  public boolean hasIndexWALCodec() {
    return true;
  }

  @Override
  public long createSequence(
      String tenantId,
      String schemaName,
      String sequenceName,
      long startWith,
      long incrementBy,
      long cacheSize,
      long minValue,
      long maxValue,
      boolean cycle,
      long timestamp)
      throws SQLException {
    SequenceKey key = new SequenceKey(tenantId, schemaName, sequenceName, getSequenceSaltBuckets());
    if (sequenceMap.get(key) != null) {
      throw new SequenceAlreadyExistsException(schemaName, sequenceName);
    }
    sequenceMap.put(key, new SequenceInfo(startWith, incrementBy, minValue, maxValue, 1l, cycle));
    return timestamp;
  }

  @Override
  public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp)
      throws SQLException {
    SequenceKey key = new SequenceKey(tenantId, schemaName, sequenceName, getSequenceSaltBuckets());
    if (sequenceMap.remove(key) == null) {
      throw new SequenceNotFoundException(schemaName, sequenceName);
    }
    return timestamp;
  }

  @Override
  public void validateSequences(
      List<SequenceAllocation> sequenceAllocations,
      long timestamp,
      long[] values,
      SQLException[] exceptions,
      Sequence.ValueOp action)
      throws SQLException {
    int i = 0;
    for (SequenceAllocation sequenceAllocation : sequenceAllocations) {
      SequenceInfo info = sequenceMap.get(sequenceAllocation.getSequenceKey());
      if (info == null) {
        exceptions[i] =
            new SequenceNotFoundException(
                sequenceAllocation.getSequenceKey().getSchemaName(),
                sequenceAllocation.getSequenceKey().getSequenceName());
      } else {
        values[i] = info.sequenceValue;
      }
      i++;
    }
  }

  @Override
  public void incrementSequences(
      List<SequenceAllocation> sequenceAllocations,
      long timestamp,
      long[] values,
      SQLException[] exceptions)
      throws SQLException {
    int i = 0;
    for (SequenceAllocation sequenceAllocation : sequenceAllocations) {
      SequenceKey key = sequenceAllocation.getSequenceKey();
      SequenceInfo info = sequenceMap.get(key);
      if (info == null) {
        exceptions[i] = new SequenceNotFoundException(key.getSchemaName(), key.getSequenceName());
      } else {
        boolean increaseSeq = info.incrementBy > 0;
        if (info.limitReached) {
          SQLExceptionCode code =
              increaseSeq
                  ? SQLExceptionCode.SEQUENCE_VAL_REACHED_MAX_VALUE
                  : SQLExceptionCode.SEQUENCE_VAL_REACHED_MIN_VALUE;
          exceptions[i] = new SQLExceptionInfo.Builder(code).build().buildException();
        } else {
          values[i] = info.sequenceValue;
          info.sequenceValue += info.incrementBy * info.cacheSize;
          info.limitReached = SequenceUtil.checkIfLimitReached(info);
          if (info.limitReached && info.cycle) {
            info.sequenceValue = increaseSeq ? info.minValue : info.maxValue;
            info.limitReached = false;
          }
        }
      }
      i++;
    }
    i = 0;
    for (SQLException e : exceptions) {
      if (e != null) {
        sequenceMap.remove(sequenceAllocations.get(i).getSequenceKey());
      }
      i++;
    }
  }

  @Override
  public long currentSequenceValue(SequenceKey sequenceKey, long timestamp) throws SQLException {
    SequenceInfo info = sequenceMap.get(sequenceKey);
    if (info == null) {
      throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE)
          .setSchemaName(sequenceKey.getSchemaName())
          .setTableName(sequenceKey.getSequenceName())
          .build()
          .buildException();
    }
    return info.sequenceValue;
  }

  @Override
  public void returnSequences(
      List<SequenceKey> sequenceKeys, long timestamp, SQLException[] exceptions)
      throws SQLException {}

  @Override
  public void addConnection(PhoenixConnection connection) throws SQLException {}

  @Override
  public void removeConnection(PhoenixConnection connection) throws SQLException {}

  @Override
  public KeyValueBuilder getKeyValueBuilder() {
    return this.kvBuilder;
  }

  @Override
  public boolean supportsFeature(Feature feature) {
    return true;
  }

  @Override
  public String getUserName() {
    return userName;
  }

  @Override
  public PTableStats getTableStats(byte[] physicalName, long clientTimeStamp) {
    return PTableStats.EMPTY_STATS;
  }

  @Override
  public long clearCache() throws SQLException {
    return 0;
  }

  @Override
  public int getSequenceSaltBuckets() {
    return getProps()
        .getInt(
            QueryServices.SEQUENCE_SALT_BUCKETS_ATTRIB,
            QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
  }

  @Override
  public TransactionSystemClient getTransactionSystemClient() {
    return txSystemClient;
  }

  public MetaDataMutationResult createFunction(
      List<Mutation> functionData, PFunction function, boolean temporary) throws SQLException {
    return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, 0l, null);
  }

  @Override
  public PMetaData addFunction(PFunction function) throws SQLException {
    return metaData = this.metaData.addFunction(function);
  }

  @Override
  public PMetaData removeFunction(PName tenantId, String function, long functionTimeStamp)
      throws SQLException {
    return metaData = this.metaData.removeFunction(tenantId, function, functionTimeStamp);
  }

  @Override
  public MetaDataMutationResult getFunctions(
      PName tenantId, List<Pair<byte[], Long>> functionNameAndTimeStampPairs, long clientTimestamp)
      throws SQLException {
    List<PFunction> functions = new ArrayList<PFunction>(functionNameAndTimeStampPairs.size());
    for (Pair<byte[], Long> functionInfo : functionNameAndTimeStampPairs) {
      try {
        PFunction function2 =
            metaData.getFunction(new PTableKey(tenantId, Bytes.toString(functionInfo.getFirst())));
        functions.add(function2);
      } catch (FunctionNotFoundException e) {
        return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, 0, null);
      }
    }
    if (functions.isEmpty()) {
      return null;
    }
    return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, 0, functions, true);
  }

  @Override
  public MetaDataMutationResult dropFunction(List<Mutation> tableMetadata, boolean ifExists)
      throws SQLException {
    return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, 0, null);
  }

  @Override
  public long getRenewLeaseThresholdMilliSeconds() {
    return 0;
  }

  @Override
  public boolean isRenewingLeasesEnabled() {
    return false;
  }

  public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] row) throws SQLException {
    List<HRegionLocation> regions = tableSplits.get(Bytes.toString(tableName));
    if (regions != null) {
      for (HRegionLocation region : regions) {
        if (Bytes.compareTo(region.getRegionInfo().getStartKey(), row) <= 0
            && Bytes.compareTo(region.getRegionInfo().getEndKey(), row) > 0) {
          return region;
        }
      }
    }
    return new HRegionLocation(
        new HRegionInfo(
            TableName.valueOf(tableName), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW),
        SERVER_NAME,
        -1);
  }

  @Override
  public MetaDataMutationResult createSchema(List<Mutation> schemaMutations, String schemaName) {
    return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, 0l, null);
  }

  @Override
  public PMetaData addSchema(PSchema schema) throws SQLException {
    return metaData = this.metaData.addSchema(schema);
  }

  @Override
  public MetaDataMutationResult getSchema(String schemaName, long clientTimestamp)
      throws SQLException {
    try {
      PSchema schema = metaData.getSchema(new PTableKey(null, schemaName));
      new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, schema, 0);
    } catch (SchemaNotFoundException e) {
    }
    return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, 0, null);
  }

  @Override
  public PMetaData removeSchema(PSchema schema, long schemaTimeStamp) {
    return metaData = metaData.removeSchema(schema, schemaTimeStamp);
  }

  @Override
  public MetaDataMutationResult dropSchema(List<Mutation> schemaMetaData, String schemaName) {
    return new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, 0, null);
  }
}