@Test public void testGetPartitionNames() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(table); ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.<ConnectorColumnHandle>all()); assertExpectedPartitions(partitionResult.getPartitions()); }
@Test public void testGetPartitionNamesUnpartitioned() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(tableUnpartitioned); ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.<ConnectorColumnHandle>all()); assertEquals(partitionResult.getPartitions().size(), 1); assertEquals(partitionResult.getPartitions(), unpartitionedPartitions); }
@Test public void testGetPartitionTableOffline() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(tableOffline); try { splitManager.getPartitions(tableHandle, TupleDomain.<ConnectorColumnHandle>all()); fail("expected TableOfflineException"); } catch (TableOfflineException e) { assertEquals(e.getTableName(), tableOffline); } }
@Test public void testGetPartitionSplitsBatchUnpartitioned() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(tableUnpartitioned); ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.<ConnectorColumnHandle>all()); ConnectorSplitSource splitSource = splitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions()); assertEquals(getSplitCount(splitSource), 1); }
private void assertTableIsBucketed(ConnectorTableHandle tableHandle) throws Exception { // the bucketed test tables should have exactly 32 splits ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.<ConnectorColumnHandle>all()); List<ConnectorSplit> splits = getAllSplits(splitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions())); assertEquals(splits.size(), 32); // verify all paths are unique Set<String> paths = new HashSet<>(); for (ConnectorSplit split : splits) { assertTrue(paths.add(((HiveSplit) split).getPath())); } }
@Test( expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = ".*" + INVALID_COLUMN + ".*") public void testGetRecordsInvalidColumn() throws Exception { ConnectorTableHandle table = getTableHandle(tableUnpartitioned); ConnectorPartitionResult partitionResult = splitManager.getPartitions(table, TupleDomain.<ConnectorColumnHandle>all()); ConnectorSplit split = Iterables.getFirst( getAllSplits(splitManager.getPartitionSplits(table, partitionResult.getPartitions())), null); RecordSet recordSet = recordSetProvider.getRecordSet(split, ImmutableList.of(invalidColumnHandle)); recordSet.cursor(); }
@Override protected RelationPlan visitTable(Table node, Void context) { Query namedQuery = analysis.getNamedQuery(node); if (namedQuery != null) { RelationPlan subPlan = process(namedQuery, null); return new RelationPlan( subPlan.getRoot(), analysis.getOutputDescriptor(node), subPlan.getOutputSymbols(), subPlan.getSampleWeight()); } TupleDescriptor descriptor = analysis.getOutputDescriptor(node); TableHandle handle = analysis.getTableHandle(node); ImmutableList.Builder<Symbol> outputSymbolsBuilder = ImmutableList.builder(); ImmutableMap.Builder<Symbol, ColumnHandle> columns = ImmutableMap.builder(); for (Field field : descriptor.getAllFields()) { Symbol symbol = symbolAllocator.newSymbol(field.getName().get(), field.getType()); outputSymbolsBuilder.add(symbol); columns.put(symbol, analysis.getColumn(field)); } List<Symbol> planOutputSymbols = outputSymbolsBuilder.build(); Optional<ColumnHandle> sampleWeightColumn = metadata.getSampleWeightColumnHandle(session, handle); Symbol sampleWeightSymbol = null; if (sampleWeightColumn.isPresent()) { sampleWeightSymbol = symbolAllocator.newSymbol("$sampleWeight", BIGINT); outputSymbolsBuilder.add(sampleWeightSymbol); columns.put(sampleWeightSymbol, sampleWeightColumn.get()); } List<Symbol> nodeOutputSymbols = outputSymbolsBuilder.build(); PlanNode root = new TableScanNode( idAllocator.getNextId(), handle, nodeOutputSymbols, columns.build(), Optional.empty(), TupleDomain.all(), null); return new RelationPlan( root, descriptor, planOutputSymbols, Optional.ofNullable(sampleWeightSymbol)); }
@Test public void testGetRecordsUnpartitioned() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(tableUnpartitioned); List<ConnectorColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(tableHandle).values()); Map<String, Integer> columnIndex = indexColumns(columnHandles); ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.<ConnectorColumnHandle>all()); List<ConnectorSplit> splits = getAllSplits(splitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions())); assertEquals(splits.size(), 1); for (ConnectorSplit split : splits) { HiveSplit hiveSplit = (HiveSplit) split; assertEquals(hiveSplit.getPartitionKeys(), ImmutableList.of()); long rowNumber = 0; try (RecordCursor cursor = recordSetProvider.getRecordSet(split, columnHandles).cursor()) { assertRecordCursorType(cursor, "textfile"); assertEquals(cursor.getTotalBytes(), hiveSplit.getLength()); while (cursor.advanceNextPosition()) { rowNumber++; if (rowNumber % 19 == 0) { assertTrue(cursor.isNull(columnIndex.get("t_string"))); } else if (rowNumber % 19 == 1) { assertEquals(cursor.getSlice(columnIndex.get("t_string")).toStringUtf8(), ""); } else { assertEquals( cursor.getSlice(columnIndex.get("t_string")).toStringUtf8(), "unpartitioned"); } assertEquals(cursor.getLong(columnIndex.get("t_tinyint")), 1 + rowNumber); } } assertEquals(rowNumber, 100); } }
@Test public void testGetPartialRecords() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(table); List<ConnectorColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(tableHandle).values()); Map<String, Integer> columnIndex = indexColumns(columnHandles); ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.<ConnectorColumnHandle>all()); List<ConnectorSplit> splits = getAllSplits(splitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions())); assertEquals(splits.size(), this.partitions.size()); for (ConnectorSplit split : splits) { HiveSplit hiveSplit = (HiveSplit) split; List<HivePartitionKey> partitionKeys = hiveSplit.getPartitionKeys(); String ds = partitionKeys.get(0).getValue(); String fileType = partitionKeys.get(1).getValue(); long dummy = Long.parseLong(partitionKeys.get(2).getValue()); long baseValue = getBaseValueForFileType(fileType); long rowNumber = 0; try (RecordCursor cursor = recordSetProvider.getRecordSet(hiveSplit, columnHandles).cursor()) { assertRecordCursorType(cursor, fileType); while (cursor.advanceNextPosition()) { rowNumber++; assertEquals(cursor.getDouble(columnIndex.get("t_double")), baseValue + 6.2 + rowNumber); assertEquals(cursor.getSlice(columnIndex.get("ds")).toStringUtf8(), ds); assertEquals(cursor.getSlice(columnIndex.get("file_format")).toStringUtf8(), fileType); assertEquals(cursor.getLong(columnIndex.get("dummy")), dummy); } } assertEquals(rowNumber, 100); } }
@Override public TupleDomain getTupleDomain() { return TupleDomain.all(); }
private void doCreateSampledTable() throws InterruptedException { // begin creating the table List<ColumnMetadata> columns = ImmutableList.<ColumnMetadata>builder() .add(new ColumnMetadata("sales", BIGINT, 1, false)) .build(); ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(temporaryCreateSampledTable, columns, tableOwner, true); ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(SESSION, tableMetadata); // write the records RecordSink sink = recordSinkProvider.getRecordSink(outputHandle); sink.beginRecord(8); sink.appendLong(2); sink.finishRecord(); sink.beginRecord(5); sink.appendLong(3); sink.finishRecord(); sink.beginRecord(7); sink.appendLong(4); sink.finishRecord(); String fragment = sink.commit(); // commit the table metadata.commitCreateTable(outputHandle, ImmutableList.of(fragment)); // load the new table ConnectorTableHandle tableHandle = getTableHandle(temporaryCreateSampledTable); List<ConnectorColumnHandle> columnHandles = ImmutableList.<ConnectorColumnHandle>builder() .addAll(metadata.getColumnHandles(tableHandle).values()) .add(metadata.getSampleWeightColumnHandle(tableHandle)) .build(); assertEquals(columnHandles.size(), 2); // verify the metadata tableMetadata = metadata.getTableMetadata(getTableHandle(temporaryCreateSampledTable)); assertEquals(tableMetadata.getOwner(), tableOwner); Map<String, ColumnMetadata> columnMap = uniqueIndex(tableMetadata.getColumns(), columnNameGetter()); assertEquals(columnMap.size(), 1); assertPrimitiveField(columnMap, 0, "sales", BIGINT, false); // verify the data ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.<ConnectorColumnHandle>all()); assertEquals(partitionResult.getPartitions().size(), 1); ConnectorSplitSource splitSource = splitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions()); ConnectorSplit split = getOnlyElement(getAllSplits(splitSource)); try (RecordCursor cursor = recordSetProvider.getRecordSet(split, columnHandles).cursor()) { assertRecordCursorType(cursor, "rcfile-binary"); assertTrue(cursor.advanceNextPosition()); assertEquals(cursor.getLong(0), 2); assertEquals(cursor.getLong(1), 8); assertTrue(cursor.advanceNextPosition()); assertEquals(cursor.getLong(0), 3); assertEquals(cursor.getLong(1), 5); assertTrue(cursor.advanceNextPosition()); assertEquals(cursor.getLong(0), 4); assertEquals(cursor.getLong(1), 7); assertFalse(cursor.advanceNextPosition()); } }
@Test public void testGetRecords() throws Exception { ConnectorTableHandle tableHandle = getTableHandle(table); ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(tableHandle); List<ConnectorColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(tableHandle).values()); Map<String, Integer> columnIndex = indexColumns(columnHandles); ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.<ConnectorColumnHandle>all()); List<ConnectorSplit> splits = getAllSplits(splitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions())); assertEquals(splits.size(), this.partitions.size()); for (ConnectorSplit split : splits) { HiveSplit hiveSplit = (HiveSplit) split; List<HivePartitionKey> partitionKeys = hiveSplit.getPartitionKeys(); String ds = partitionKeys.get(0).getValue(); String fileType = partitionKeys.get(1).getValue(); long dummy = Long.parseLong(partitionKeys.get(2).getValue()); long baseValue = getBaseValueForFileType(fileType); assertEquals(dummy * 100, baseValue); long rowNumber = 0; long completedBytes = 0; try (RecordCursor cursor = recordSetProvider.getRecordSet(hiveSplit, columnHandles).cursor()) { assertRecordCursorType(cursor, fileType); assertEquals(cursor.getTotalBytes(), hiveSplit.getLength()); while (cursor.advanceNextPosition()) { try { assertReadFields(cursor, tableMetadata.getColumns()); } catch (RuntimeException e) { throw new RuntimeException("row " + rowNumber, e); } rowNumber++; if (rowNumber % 19 == 0) { assertTrue(cursor.isNull(columnIndex.get("t_string"))); } else if (rowNumber % 19 == 1) { assertEquals(cursor.getSlice(columnIndex.get("t_string")).toStringUtf8(), ""); } else { assertEquals( cursor.getSlice(columnIndex.get("t_string")).toStringUtf8(), (fileType + " test")); } assertEquals( cursor.getLong(columnIndex.get("t_tinyint")), (long) ((byte) (baseValue + 1 + rowNumber))); assertEquals(cursor.getLong(columnIndex.get("t_smallint")), baseValue + 2 + rowNumber); assertEquals(cursor.getLong(columnIndex.get("t_int")), baseValue + 3 + rowNumber); if (rowNumber % 13 == 0) { assertTrue(cursor.isNull(columnIndex.get("t_bigint"))); } else { assertEquals(cursor.getLong(columnIndex.get("t_bigint")), baseValue + 4 + rowNumber); } assertEquals( cursor.getDouble(columnIndex.get("t_float")), baseValue + 5.1 + rowNumber, 0.001); assertEquals(cursor.getDouble(columnIndex.get("t_double")), baseValue + 6.2 + rowNumber); if (rowNumber % 3 == 2) { assertTrue(cursor.isNull(columnIndex.get("t_boolean"))); } else { assertEquals(cursor.getBoolean(columnIndex.get("t_boolean")), rowNumber % 3 != 0); } if (rowNumber % 17 == 0) { assertTrue(cursor.isNull(columnIndex.get("t_timestamp"))); } else { long millis = new DateTime(2011, 5, 6, 7, 8, 9, 123, timeZone).getMillis(); assertEquals( cursor.getLong(columnIndex.get("t_timestamp")), millis, (fileType + " test")); } if (rowNumber % 23 == 0) { assertTrue(cursor.isNull(columnIndex.get("t_binary"))); } else { assertEquals( cursor.getSlice(columnIndex.get("t_binary")).toStringUtf8(), (fileType + " test")); } if (rowNumber % 29 == 0) { assertTrue(cursor.isNull(columnIndex.get("t_map"))); } else { String expectedJson = "{\"format\":\"" + fileType + "\"}"; assertEquals(cursor.getSlice(columnIndex.get("t_map")).toStringUtf8(), expectedJson); } if (rowNumber % 27 == 0) { assertTrue(cursor.isNull(columnIndex.get("t_array_string"))); } else { String expectedJson = "[\"" + fileType + "\",\"test\",\"data\"]"; assertEquals( cursor.getSlice(columnIndex.get("t_array_string")).toStringUtf8(), expectedJson); } if (rowNumber % 31 == 0) { assertTrue(cursor.isNull(columnIndex.get("t_complex"))); } else { String expectedJson = "{\"1\":[{\"s_string\":\"" + fileType + "-a\",\"s_double\":0.1},{\"s_string\":\"" + fileType + "-b\",\"s_double\":0.2}]}"; assertEquals( cursor.getSlice(columnIndex.get("t_complex")).toStringUtf8(), expectedJson); } assertEquals(cursor.getSlice(columnIndex.get("ds")).toStringUtf8(), ds); assertEquals(cursor.getSlice(columnIndex.get("file_format")).toStringUtf8(), fileType); assertEquals(cursor.getLong(columnIndex.get("dummy")), dummy); long newCompletedBytes = cursor.getCompletedBytes(); assertTrue(newCompletedBytes >= completedBytes); assertTrue(newCompletedBytes <= hiveSplit.getLength()); completedBytes = newCompletedBytes; } } assertTrue(completedBytes <= hiveSplit.getLength()); assertEquals(rowNumber, 100); } }
@Test(expectedExceptions = TableNotFoundException.class) public void testGetPartitionNamesException() throws Exception { splitManager.getPartitions(invalidTableHandle, TupleDomain.<ConnectorColumnHandle>all()); }
private void doCreateTable() throws InterruptedException { // begin creating the table List<ColumnMetadata> columns = ImmutableList.<ColumnMetadata>builder() .add(new ColumnMetadata("id", BIGINT, 1, false)) .add(new ColumnMetadata("t_string", VARCHAR, 2, false)) .add(new ColumnMetadata("t_bigint", BIGINT, 3, false)) .add(new ColumnMetadata("t_double", DOUBLE, 4, false)) .add(new ColumnMetadata("t_boolean", BOOLEAN, 5, false)) .build(); ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(temporaryCreateTable, columns, tableOwner); ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(SESSION, tableMetadata); // write the records RecordSink sink = recordSinkProvider.getRecordSink(outputHandle); sink.beginRecord(1); sink.appendLong(1); sink.appendString("hello".getBytes(UTF_8)); sink.appendLong(123); sink.appendDouble(43.5); sink.appendBoolean(true); sink.finishRecord(); sink.beginRecord(1); sink.appendLong(2); sink.appendNull(); sink.appendNull(); sink.appendNull(); sink.appendNull(); sink.finishRecord(); sink.beginRecord(1); sink.appendLong(3); sink.appendString("bye".getBytes(UTF_8)); sink.appendLong(456); sink.appendDouble(98.1); sink.appendBoolean(false); sink.finishRecord(); String fragment = sink.commit(); // commit the table metadata.commitCreateTable(outputHandle, ImmutableList.of(fragment)); // load the new table ConnectorTableHandle tableHandle = getTableHandle(temporaryCreateTable); List<ConnectorColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(tableHandle).values()); // verify the metadata tableMetadata = metadata.getTableMetadata(getTableHandle(temporaryCreateTable)); assertEquals(tableMetadata.getOwner(), tableOwner); Map<String, ColumnMetadata> columnMap = uniqueIndex(tableMetadata.getColumns(), columnNameGetter()); assertPrimitiveField(columnMap, 0, "id", BIGINT, false); assertPrimitiveField(columnMap, 1, "t_string", VARCHAR, false); assertPrimitiveField(columnMap, 2, "t_bigint", BIGINT, false); assertPrimitiveField(columnMap, 3, "t_double", DOUBLE, false); assertPrimitiveField(columnMap, 4, "t_boolean", BOOLEAN, false); // verify the data ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle, TupleDomain.<ConnectorColumnHandle>all()); assertEquals(partitionResult.getPartitions().size(), 1); ConnectorSplitSource splitSource = splitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions()); ConnectorSplit split = getOnlyElement(getAllSplits(splitSource)); try (RecordCursor cursor = recordSetProvider.getRecordSet(split, columnHandles).cursor()) { assertRecordCursorType(cursor, "rcfile-binary"); assertTrue(cursor.advanceNextPosition()); assertEquals(cursor.getLong(0), 1); assertEquals(cursor.getSlice(1).toStringUtf8(), "hello"); assertEquals(cursor.getLong(2), 123); assertEquals(cursor.getDouble(3), 43.5); assertEquals(cursor.getBoolean(4), true); assertTrue(cursor.advanceNextPosition()); assertEquals(cursor.getLong(0), 2); assertTrue(cursor.isNull(1)); assertTrue(cursor.isNull(2)); assertTrue(cursor.isNull(3)); assertTrue(cursor.isNull(4)); assertTrue(cursor.advanceNextPosition()); assertEquals(cursor.getLong(0), 3); assertEquals(cursor.getSlice(1).toStringUtf8(), "bye"); assertEquals(cursor.getLong(2), 456); assertEquals(cursor.getDouble(3), 98.1); assertEquals(cursor.getBoolean(4), false); assertFalse(cursor.advanceNextPosition()); } }
@BeforeMethod public void setUp() throws Exception { DualMetadata dualMetadata = new DualMetadata(); TableHandle tableHandle = dualMetadata.getTableHandle(new SchemaTableName("default", DualMetadata.NAME)); assertNotNull(tableHandle, "tableHandle is null"); ColumnHandle columnHandle = dualMetadata.getColumnHandle(tableHandle, DualMetadata.COLUMN_NAME); assertNotNull(columnHandle, "columnHandle is null"); Symbol symbol = new Symbol(DualMetadata.COLUMN_NAME); MetadataManager metadata = new MetadataManager(new FeaturesConfig()); metadata.addInternalSchemaMetadata(MetadataManager.INTERNAL_CONNECTOR_ID, dualMetadata); DualSplitManager dualSplitManager = new DualSplitManager(new InMemoryNodeManager()); PartitionResult partitionResult = dualSplitManager.getPartitions(tableHandle, TupleDomain.all()); SplitSource splitSource = dualSplitManager.getPartitionSplits(tableHandle, partitionResult.getPartitions()); split = Iterables.getOnlyElement(splitSource.getNextBatch(1)); assertTrue(splitSource.isFinished()); LocalExecutionPlanner planner = new LocalExecutionPlanner( new NodeInfo("test"), metadata, new DataStreamManager(new DualDataStreamProvider()), new MockLocalStorageManager(new File("target/temp")), new RecordSinkManager(), new MockExchangeClientSupplier(), new ExpressionCompiler(metadata)); taskExecutor = new TaskExecutor(8); taskExecutor.start(); tableScanNodeId = new PlanNodeId("tableScan"); PlanFragment testFragment = new PlanFragment( new PlanFragmentId("fragment"), new TableScanNode( tableScanNodeId, tableHandle, ImmutableList.of(symbol), ImmutableMap.of(symbol, columnHandle), null, Optional.<GeneratedPartitions>absent()), ImmutableMap.of(symbol, Type.VARCHAR), PlanDistribution.SOURCE, tableScanNodeId, OutputPartitioning.NONE, ImmutableList.<Symbol>of()); TaskId taskId = new TaskId("query", "stage", "task"); Session session = new Session("user", "test", "default", "default", "test", "test"); taskNotificationExecutor = Executors.newCachedThreadPool(threadsNamed("task-notification-%d")); outputBuffers = OutputBuffers.INITIAL_EMPTY_OUTPUT_BUFFERS; taskExecution = SqlTaskExecution.createSqlTaskExecution( session, taskId, URI.create("fake://task/" + taskId), testFragment, ImmutableList.<TaskSource>of(), outputBuffers, planner, new DataSize(32, Unit.MEGABYTE), taskExecutor, taskNotificationExecutor, new DataSize(256, Unit.MEGABYTE), new DataSize(8, Unit.MEGABYTE), new QueryMonitor( new ObjectMapperProvider().get(), new NullEventClient(), new NodeInfo("test")), false); }