private void verifyViewCreation() { // replace works for new view doCreateView(temporaryCreateView, true); // replace works for existing view doCreateView(temporaryCreateView, true); // create fails for existing view try { doCreateView(temporaryCreateView, false); fail("create existing should fail"); } catch (ViewAlreadyExistsException e) { assertEquals(e.getViewName(), temporaryCreateView); } // drop works when view exists metadata.dropView(SESSION, temporaryCreateView); assertEquals(metadata.getViews(SESSION, temporaryCreateView.toSchemaTablePrefix()).size(), 0); assertFalse( metadata .listViews(SESSION, temporaryCreateView.getSchemaName()) .contains(temporaryCreateView)); // drop fails when view does not exist try { metadata.dropView(SESSION, temporaryCreateView); fail("drop non-existing should fail"); } catch (ViewNotFoundException e) { assertEquals(e.getViewName(), temporaryCreateView); } // create works for new view doCreateView(temporaryCreateView, false); }
private void doCreateView(SchemaTableName viewName, boolean replace) { String viewData = "test data"; metadata.createView(SESSION, viewName, viewData, replace); Map<SchemaTableName, String> views = metadata.getViews(SESSION, viewName.toSchemaTablePrefix()); assertEquals(views.size(), 1); assertEquals(views.get(viewName), viewData); assertTrue(metadata.listViews(SESSION, viewName.getSchemaName()).contains(viewName)); }
@Test public void testListUnknownSchema() { assertNull( metadata.getTableHandle( SESSION, new SchemaTableName("totally_invalid_database_name", "dual"))); assertEquals(metadata.listTables(SESSION, "totally_invalid_database_name"), ImmutableList.of()); assertEquals( metadata.listTableColumns( SESSION, new SchemaTablePrefix("totally_invalid_database_name", "dual")), ImmutableMap.of()); }
@Test public void testListUnknownSchema() { assertNull( metadata.getTableHandle(SESSION, new SchemaTableName(INVALID_DATABASE, INVALID_TABLE))); assertEquals(metadata.listTables(SESSION, INVALID_DATABASE), ImmutableList.of()); assertEquals( metadata.listTableColumns(SESSION, new SchemaTablePrefix(INVALID_DATABASE, INVALID_TABLE)), ImmutableMap.of()); assertEquals(metadata.listViews(SESSION, INVALID_DATABASE), ImmutableList.of()); assertEquals( metadata.getViews(SESSION, new SchemaTablePrefix(INVALID_DATABASE, INVALID_TABLE)), ImmutableMap.of()); }
@Test public void testGetPartitionSplitsTableOfflinePartition() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(tableOfflinePartition); assertNotNull(tableHandle); ConnectorColumnHandle dsColumn = metadata.getColumnHandle(tableHandle, "ds"); assertNotNull(dsColumn); Domain domain = Domain.singleValue(utf8Slice("2012-12-30")); TupleDomain<ConnectorColumnHandle> tupleDomain = TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, domain)); ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, tupleDomain); for (ConnectorPartition partition : partitionResult.getPartitions()) { if (domain.equals(partition.getTupleDomain().getDomains().get(dsColumn))) { try { getSplitCount(splitManager.getPartitionSplits(tableHandle, ImmutableList.of(partition))); fail("Expected PartitionOfflineException"); } catch (PartitionOfflineException e) { assertEquals(e.getTableName(), tableOfflinePartition); assertEquals(e.getPartition(), "ds=2012-12-30"); } } else { getSplitCount(splitManager.getPartitionSplits(tableHandle, ImmutableList.of(partition))); } } }
@Test public void testHiveViewsHaveNoColumns() throws Exception { assertEquals( metadata.listTableColumns( SESSION, new SchemaTablePrefix(view.getSchemaName(), view.getTableName())), ImmutableMap.of()); }
@Test public void testBucketedTableDoubleFloat() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(tableBucketedDoubleFloat); List<ConnectorColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(tableHandle).values()); Map<String, Integer> columnIndex = indexColumns(columnHandles); assertTableIsBucketed(tableHandle); ImmutableMap<ConnectorColumnHandle, Comparable<?>> bindings = ImmutableMap.<ConnectorColumnHandle, Comparable<?>>builder() .put(columnHandles.get(columnIndex.get("t_float")), 406.1000061035156) .put(columnHandles.get(columnIndex.get("t_double")), 407.2) .build(); // floats and doubles are not supported, so we should see all splits ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.withFixedValues(bindings)); List<ConnectorSplit> splits = getAllSplits(splitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions())); assertEquals(splits.size(), 32); int count = 0; for (ConnectorSplit split : splits) { try (RecordCursor cursor = recordSetProvider.getRecordSet(split, columnHandles).cursor()) { while (cursor.advanceNextPosition()) { count++; } } } assertEquals(count, 300); }
@SuppressWarnings({"ValueOfIncrementOrDecrementUsed", "UnusedAssignment"}) @Test public void testGetTableSchema() throws Exception { ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(getTableHandle(table)); Map<String, ColumnMetadata> map = uniqueIndex(tableMetadata.getColumns(), columnNameGetter()); int i = 0; assertPrimitiveField(map, i++, "t_string", VARCHAR, false); assertPrimitiveField(map, i++, "t_tinyint", BIGINT, false); assertPrimitiveField(map, i++, "t_smallint", BIGINT, false); assertPrimitiveField(map, i++, "t_int", BIGINT, false); assertPrimitiveField(map, i++, "t_bigint", BIGINT, false); assertPrimitiveField(map, i++, "t_float", DOUBLE, false); assertPrimitiveField(map, i++, "t_double", DOUBLE, false); assertPrimitiveField(map, i++, "t_map", VARCHAR, false); // Currently mapped as a string assertPrimitiveField(map, i++, "t_boolean", BOOLEAN, false); assertPrimitiveField(map, i++, "t_timestamp", TIMESTAMP, false); assertPrimitiveField(map, i++, "t_binary", VARBINARY, false); assertPrimitiveField( map, i++, "t_array_string", VARCHAR, false); // Currently mapped as a string assertPrimitiveField(map, i++, "t_complex", VARCHAR, false); // Currently mapped as a string assertPrimitiveField(map, i++, "ds", VARCHAR, true); assertPrimitiveField(map, i++, "file_format", VARCHAR, true); assertPrimitiveField(map, i++, "dummy", BIGINT, true); }
@Test public void testGetTableSchemaOfflinePartition() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(tableOfflinePartition); ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(tableHandle); Map<String, ColumnMetadata> map = uniqueIndex(tableMetadata.getColumns(), columnNameGetter()); assertPrimitiveField(map, 0, "t_string", VARCHAR, false); }
@Test public void testViewCreation() { try { verifyViewCreation(); } finally { try { metadata.dropView(SESSION, temporaryCreateView); } catch (RuntimeException e) { Logger.get(getClass()).warn(e, "Failed to drop view: %s", temporaryCreateView); } } }
@Test public void testBucketedTableBigintBoolean() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(tableBucketedBigintBoolean); List<ConnectorColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(tableHandle).values()); Map<String, Integer> columnIndex = indexColumns(columnHandles); assertTableIsBucketed(tableHandle); String testString = "textfile test"; // This needs to match one of the rows where t_string is not empty or null, and where t_bigint // is not null // (i.e. (testBigint - 604) % 19 > 1 and (testBigint - 604) % 13 != 0) Long testBigint = 608L; Boolean testBoolean = true; ImmutableMap<ConnectorColumnHandle, Comparable<?>> bindings = ImmutableMap.<ConnectorColumnHandle, Comparable<?>>builder() .put(columnHandles.get(columnIndex.get("t_string")), utf8Slice(testString)) .put(columnHandles.get(columnIndex.get("t_bigint")), testBigint) .put(columnHandles.get(columnIndex.get("t_boolean")), testBoolean) .build(); ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.withFixedValues(bindings)); List<ConnectorSplit> splits = getAllSplits(splitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions())); assertEquals(splits.size(), 1); boolean rowFound = false; try (RecordCursor cursor = recordSetProvider.getRecordSet(splits.get(0), columnHandles).cursor()) { while (cursor.advanceNextPosition()) { if (testString.equals(cursor.getSlice(columnIndex.get("t_string")).toStringUtf8()) && testBigint == cursor.getLong(columnIndex.get("t_bigint")) && testBoolean == cursor.getBoolean(columnIndex.get("t_boolean"))) { rowFound = true; break; } } assertTrue(rowFound); } }
@Test public void testGetRecordsUnpartitioned() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(tableUnpartitioned); List<ConnectorColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(tableHandle).values()); Map<String, Integer> columnIndex = indexColumns(columnHandles); ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.<ConnectorColumnHandle>all()); List<ConnectorSplit> splits = getAllSplits(splitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions())); assertEquals(splits.size(), 1); for (ConnectorSplit split : splits) { HiveSplit hiveSplit = (HiveSplit) split; assertEquals(hiveSplit.getPartitionKeys(), ImmutableList.of()); long rowNumber = 0; try (RecordCursor cursor = recordSetProvider.getRecordSet(split, columnHandles).cursor()) { assertRecordCursorType(cursor, "textfile"); assertEquals(cursor.getTotalBytes(), hiveSplit.getLength()); while (cursor.advanceNextPosition()) { rowNumber++; if (rowNumber % 19 == 0) { assertTrue(cursor.isNull(columnIndex.get("t_string"))); } else if (rowNumber % 19 == 1) { assertEquals(cursor.getSlice(columnIndex.get("t_string")).toStringUtf8(), ""); } else { assertEquals( cursor.getSlice(columnIndex.get("t_string")).toStringUtf8(), "unpartitioned"); } assertEquals(cursor.getLong(columnIndex.get("t_tinyint")), 1 + rowNumber); } } assertEquals(rowNumber, 100); } }
@Test public void testBucketedTableStringInt() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(tableBucketedStringInt); List<ConnectorColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(tableHandle).values()); Map<String, Integer> columnIndex = indexColumns(columnHandles); assertTableIsBucketed(tableHandle); String testString = "sequencefile test"; Long testInt = 413L; Long testSmallint = 412L; // Reverse the order of bindings as compared to bucketing order ImmutableMap<ConnectorColumnHandle, Comparable<?>> bindings = ImmutableMap.<ConnectorColumnHandle, Comparable<?>>builder() .put(columnHandles.get(columnIndex.get("t_int")), testInt) .put(columnHandles.get(columnIndex.get("t_string")), utf8Slice(testString)) .put(columnHandles.get(columnIndex.get("t_smallint")), testSmallint) .build(); ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.withFixedValues(bindings)); List<ConnectorSplit> splits = getAllSplits(splitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions())); assertEquals(splits.size(), 1); boolean rowFound = false; try (RecordCursor cursor = recordSetProvider.getRecordSet(splits.get(0), columnHandles).cursor()) { while (cursor.advanceNextPosition()) { if (testString.equals(cursor.getSlice(columnIndex.get("t_string")).toStringUtf8()) && testInt == cursor.getLong(columnIndex.get("t_int")) && testSmallint == cursor.getLong(columnIndex.get("t_smallint"))) { rowFound = true; } } assertTrue(rowFound); } }
@Test public void testGetPartialRecords() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(table); List<ConnectorColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(tableHandle).values()); Map<String, Integer> columnIndex = indexColumns(columnHandles); ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.<ConnectorColumnHandle>all()); List<ConnectorSplit> splits = getAllSplits(splitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions())); assertEquals(splits.size(), this.partitions.size()); for (ConnectorSplit split : splits) { HiveSplit hiveSplit = (HiveSplit) split; List<HivePartitionKey> partitionKeys = hiveSplit.getPartitionKeys(); String ds = partitionKeys.get(0).getValue(); String fileType = partitionKeys.get(1).getValue(); long dummy = Long.parseLong(partitionKeys.get(2).getValue()); long baseValue = getBaseValueForFileType(fileType); long rowNumber = 0; try (RecordCursor cursor = recordSetProvider.getRecordSet(hiveSplit, columnHandles).cursor()) { assertRecordCursorType(cursor, fileType); while (cursor.advanceNextPosition()) { rowNumber++; assertEquals(cursor.getDouble(columnIndex.get("t_double")), baseValue + 6.2 + rowNumber); assertEquals(cursor.getSlice(columnIndex.get("ds")).toStringUtf8(), ds); assertEquals(cursor.getSlice(columnIndex.get("file_format")).toStringUtf8(), fileType); assertEquals(cursor.getLong(columnIndex.get("dummy")), dummy); } } assertEquals(rowNumber, 100); } }
@Test public void testGetTableNames() throws Exception { List<SchemaTableName> tables = metadata.listTables(SESSION, database); assertTrue(tables.contains(table)); }
@Test public void testGetDatabaseNames() throws Exception { List<String> databases = metadata.listSchemaNames(SESSION); assertTrue(databases.contains(database.toLowerCase(ENGLISH))); }
@Override public Map<SchemaTableName, List<ColumnMetadata>> listTableColumns(SchemaTablePrefix prefix) { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { return delegate.listTableColumns(prefix); } }
@Override public ColumnMetadata getColumnMetadata(TableHandle tableHandle, ColumnHandle columnHandle) { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { return delegate.getColumnMetadata(tableHandle, columnHandle); } }
@Override public Map<String, ColumnHandle> getColumnHandles(TableHandle tableHandle) { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { return delegate.getColumnHandles(tableHandle); } }
@Override public ColumnHandle getColumnHandle(TableHandle tableHandle, String columnName) { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { return delegate.getColumnHandle(tableHandle, columnName); } }
@Override public List<SchemaTableName> listTables(String schemaNameOrNull) { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { return delegate.listTables(schemaNameOrNull); } }
@Override public ConnectorTableMetadata getTableMetadata(TableHandle table) { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { return delegate.getTableMetadata(table); } }
@Override public TableHandle getTableHandle(SchemaTableName tableName) { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { return delegate.getTableHandle(tableName); } }
// disabled until metadata manager is updated to handle invalid catalogs and schemas @Test(enabled = false, expectedExceptions = SchemaNotFoundException.class) public void testGetTableNamesException() throws Exception { metadata.listTables(SESSION, INVALID_DATABASE); }
@Test public void testGetRecords() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(table); ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(SESSION, tableHandle); List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(SESSION, tableHandle).values()); Map<String, Integer> columnIndex = indexColumns(columnHandles); ConnectorPartitionResult partitionResult = splitManager.getPartitions(SESSION, tableHandle, TupleDomain.<ColumnHandle>all()); List<ConnectorSplit> splits = getAllSplits( splitManager.getPartitionSplits(SESSION, tableHandle, partitionResult.getPartitions())); long rowNumber = 0; for (ConnectorSplit split : splits) { CassandraSplit cassandraSplit = (CassandraSplit) split; long completedBytes = 0; try (RecordCursor cursor = recordSetProvider.getRecordSet(SESSION, cassandraSplit, columnHandles).cursor()) { while (cursor.advanceNextPosition()) { try { assertReadFields(cursor, tableMetadata.getColumns()); } catch (RuntimeException e) { throw new RuntimeException("row " + rowNumber, e); } rowNumber++; String keyValue = cursor.getSlice(columnIndex.get("key")).toStringUtf8(); assertTrue(keyValue.startsWith("key ")); int rowId = Integer.parseInt(keyValue.substring(4)); assertEquals(keyValue, String.format("key %d", rowId)); assertEquals( Bytes.toHexString(cursor.getSlice(columnIndex.get("typebytes")).getBytes()), String.format("0x%08X", rowId)); // VARINT is returned as a string assertEquals( cursor.getSlice(columnIndex.get("typeinteger")).toStringUtf8(), String.valueOf(rowId)); assertEquals(cursor.getLong(columnIndex.get("typelong")), 1000 + rowId); assertEquals( cursor.getSlice(columnIndex.get("typeuuid")).toStringUtf8(), String.format("00000000-0000-0000-0000-%012d", rowId)); assertEquals( cursor.getSlice(columnIndex.get("typetimestamp")).toStringUtf8(), Long.valueOf(DATE.getTime()).toString()); long newCompletedBytes = cursor.getCompletedBytes(); assertTrue(newCompletedBytes >= completedBytes); completedBytes = newCompletedBytes; } } } assertEquals(rowNumber, 9); }
@Override public void commitCreateTable(OutputTableHandle tableHandle, Collection<String> fragments) { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { delegate.commitCreateTable(tableHandle, fragments); } }
@Override public String toString() { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { return delegate.toString(); } }
@Override public List<String> listSchemaNames() { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { return delegate.listSchemaNames(); } }
private ConnectorTableHandle getTableHandle(SchemaTableName tableName) { ConnectorTableHandle handle = metadata.getTableHandle(SESSION, tableName); checkArgument(handle != null, "table not found: %s", tableName); return handle; }
@Override public boolean canHandle(TableHandle tableHandle) { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { return delegate.canHandle(tableHandle); } }