Exemplo n.º 1
1
  @Test
  public final void testCtasWithMultipleUnions() throws Exception {
    ResultSet res = executeFile("CtasWithMultipleUnions.sql");
    res.close();

    ResultSet res2 = executeQuery();
    String actual = resultSetToString(res2);
    res2.close();

    String expected =
        "c_custkey,c_nationkey\n"
            + "-------------------------------\n"
            + "1,15\n"
            + "2,13\n"
            + "3,1\n"
            + "4,4\n"
            + "5,3\n"
            + "1,15\n"
            + "2,13\n"
            + "3,1\n"
            + "4,4\n"
            + "5,3\n";

    assertEquals(expected, actual);

    TableDesc desc =
        client.getTableDesc(CatalogUtil.normalizeIdentifier(res2.getMetaData().getTableName(1)));
    assertNotNull(desc);
  }
Exemplo n.º 2
0
  @Test
  public final void testIsJoinQual() {
    FieldEval f1 = new FieldEval("part.p_partkey", CatalogUtil.newSimpleDataType(Type.INT4));
    FieldEval f2 = new FieldEval("partsupp.ps_partkey", CatalogUtil.newSimpleDataType(Type.INT4));

    BinaryEval[] joinQuals = new BinaryEval[5];
    int idx = 0;
    joinQuals[idx++] = new BinaryEval(EvalType.EQUAL, f1, f2);
    joinQuals[idx++] = new BinaryEval(EvalType.LEQ, f1, f2);
    joinQuals[idx++] = new BinaryEval(EvalType.LTH, f1, f2);
    joinQuals[idx++] = new BinaryEval(EvalType.GEQ, f1, f2);
    joinQuals[idx] = new BinaryEval(EvalType.GTH, f1, f2);
    for (int i = 0; i < idx; i++) {
      assertTrue(PlannerUtil.isJoinQual(joinQuals[idx]));
    }

    BinaryEval[] wrongJoinQuals = new BinaryEval[5];
    idx = 0;
    wrongJoinQuals[idx++] = new BinaryEval(EvalType.OR, f1, f2);
    wrongJoinQuals[idx++] = new BinaryEval(EvalType.PLUS, f1, f2);
    wrongJoinQuals[idx++] = new BinaryEval(EvalType.LIKE, f1, f2);

    ConstEval f3 = new ConstEval(DatumFactory.createInt4(1));
    wrongJoinQuals[idx] = new BinaryEval(EvalType.EQUAL, f1, f3);

    for (int i = 0; i < idx; i++) {
      assertFalse(PlannerUtil.isJoinQual(wrongJoinQuals[idx]));
    }
  }
Exemplo n.º 3
0
  private List<String> executeDDL(
      String ddlFileName,
      @Nullable String dataFileName,
      boolean isLocalTable,
      @Nullable String[] args)
      throws Exception {

    Path ddlFilePath = new Path(currentQueryPath, ddlFileName);
    FileSystem fs = ddlFilePath.getFileSystem(conf);
    assertTrue(ddlFilePath + " existence check", fs.exists(ddlFilePath));

    String template = FileUtil.readTextFile(new File(ddlFilePath.toUri()));
    String dataFilePath = null;
    if (dataFileName != null) {
      dataFilePath = getDataSetFile(dataFileName).toString();
    }
    String compiled = compileTemplate(template, dataFilePath, args);

    List<ParsedResult> parsedResults = SimpleParser.parseScript(compiled);
    List<String> createdTableNames = new ArrayList<String>();

    for (ParsedResult parsedResult : parsedResults) {
      // parse a statement
      Expr expr = sqlParser.parse(parsedResult.getStatement());
      assertNotNull(ddlFilePath + " cannot be parsed", expr);

      if (expr.getType() == OpType.CreateTable) {
        CreateTable createTable = (CreateTable) expr;
        String tableName = createTable.getTableName();
        assertTrue("Table creation is failed.", client.updateQuery(parsedResult.getStatement()));
        TableDesc createdTable = client.getTableDesc(tableName);
        String createdTableName = createdTable.getName();

        assertTrue(
            "table '" + createdTableName + "' creation check", client.existTable(createdTableName));
        if (isLocalTable) {
          createdTableGlobalSet.add(createdTableName);
          createdTableNames.add(tableName);
        }
      } else if (expr.getType() == OpType.DropTable) {
        DropTable dropTable = (DropTable) expr;
        String tableName = dropTable.getTableName();
        assertTrue(
            "table '" + tableName + "' existence check",
            client.existTable(CatalogUtil.buildFQName(currentDatabase, tableName)));
        assertTrue("table drop is failed.", client.updateQuery(parsedResult.getStatement()));
        assertFalse(
            "table '" + tableName + "' dropped check",
            client.existTable(CatalogUtil.buildFQName(currentDatabase, tableName)));
        if (isLocalTable) {
          createdTableGlobalSet.remove(tableName);
        }
      } else {
        assertTrue(ddlFilePath + " is not a Create or Drop Table statement", false);
      }
    }

    return createdTableNames;
  }
Exemplo n.º 4
0
  public void upgradeBaseSchema(Connection conn, int currentVersion) {
    if (!isLoaded()) {
      throw new TajoInternalError("Database schema files are not loaded.");
    }

    final List<SchemaPatch> candidatePatches = new ArrayList<>();
    Statement stmt;

    for (SchemaPatch patch : this.catalogStore.getPatches()) {
      if (currentVersion >= patch.getPriorVersion()) {
        candidatePatches.add(patch);
      }
    }

    Collections.sort(candidatePatches);
    try {
      stmt = conn.createStatement();
    } catch (SQLException e) {
      throw new TajoInternalError(e);
    }

    for (SchemaPatch patch : candidatePatches) {
      for (DatabaseObject object : patch.getObjects()) {
        try {
          stmt.executeUpdate(object.getSql());
          LOG.info(object.getName() + " " + object.getType() + " was created or altered.");
        } catch (SQLException e) {
          throw new TajoInternalError(e);
        }
      }
    }

    CatalogUtil.closeQuietly(stmt);
  }
Exemplo n.º 5
0
  @Test
  public void testSkippingHeaderWithText() throws IOException {
    TableMeta meta = CatalogUtil.newTableMeta("TEXT");
    meta.putOption(StorageConstants.TEXT_SKIP_HEADER_LINE, "1");
    meta.putOption(StorageConstants.TEXT_DELIMITER, ",");
    FileFragment fragment = getFileFragment("testSkip.txt");
    Scanner scanner = TablespaceManager.getLocalFs().getScanner(meta, schema, fragment);

    scanner.init();

    int lines = 0;

    try {
      while (true) {
        Tuple tuple = scanner.next();
        if (tuple != null) {
          assertEquals(17 + lines, tuple.getInt2(2));
          lines++;
        } else break;
      }
    } finally {
      assertEquals(6, lines);
      scanner.close();
    }
  }
Exemplo n.º 6
0
  private boolean checkExistenceByQuery(
      PreparedStatement pstmt, BaseSchema baseSchema, String... params) throws SQLException {
    int paramIdx = 1;
    boolean result = false;

    if (baseSchema.getSchemaName() != null && !baseSchema.getSchemaName().isEmpty()) {
      pstmt.setString(paramIdx++, baseSchema.getSchemaName().toUpperCase());
    }

    for (; paramIdx <= pstmt.getParameterMetaData().getParameterCount(); paramIdx++) {
      pstmt.setString(paramIdx, params[paramIdx - 1].toUpperCase());
    }

    ResultSet rs = null;
    try {
      rs = pstmt.executeQuery();
      while (rs.next()) {
        if (rs.getString(1).toUpperCase().equals(params[params.length - 1].toUpperCase())) {
          result = true;
          break;
        }
      }
    } finally {
      CatalogUtil.closeQuietly(rs);
    }

    return result;
  }
  private List<Tuple> fetchSystemTable(TableDesc tableDesc, Schema inSchema) {
    List<Tuple> tuples = null;
    String tableName = CatalogUtil.extractSimpleName(tableDesc.getName());

    if ("tablespace".equalsIgnoreCase(tableName)) {
      tuples = getTablespaces(inSchema);
    } else if ("databases".equalsIgnoreCase(tableName)) {
      tuples = getDatabases(inSchema);
    } else if ("tables".equalsIgnoreCase(tableName)) {
      tuples = getTables(inSchema);
    } else if ("columns".equalsIgnoreCase(tableName)) {
      tuples = getColumns(inSchema);
    } else if ("indexes".equalsIgnoreCase(tableName)) {
      tuples = getIndexes(inSchema);
    } else if ("table_options".equalsIgnoreCase(tableName)) {
      tuples = getAllTableOptions(inSchema);
    } else if ("table_stats".equalsIgnoreCase(tableName)) {
      tuples = getAllTableStats(inSchema);
    } else if ("partitions".equalsIgnoreCase(tableName)) {
      tuples = getAllPartitions(inSchema);
    } else if ("cluster".equalsIgnoreCase(tableName)) {
      tuples = getClusterInfo(inSchema);
    } else if ("session".equalsIgnoreCase(tableName)) {
      tuples = getSessionInfo(inSchema);
    }

    return tuples;
  }
Exemplo n.º 8
0
  @BeforeClass
  public static void setUp() throws Exception {
    util = new TajoTestingCluster();
    util.startCatalogCluster();
    catalog = util.getMiniCatalogCluster().getCatalog();

    Schema schema = new Schema();
    schema.addColumn("name", Type.TEXT);
    schema.addColumn("empId", CatalogUtil.newSimpleDataType(Type.INT4));
    schema.addColumn("deptName", Type.TEXT);

    Schema schema2 = new Schema();
    schema2.addColumn("deptName", Type.TEXT);
    schema2.addColumn("manager", Type.TEXT);

    Schema schema3 = new Schema();
    schema3.addColumn("deptName", Type.TEXT);
    schema3.addColumn("score", CatalogUtil.newSimpleDataType(Type.INT4));

    TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);
    TableDesc people = new TableDesc("employee", schema, meta, CommonTestingUtil.getTestDir());
    catalog.addTable(people);

    TableDesc student =
        new TableDesc(
            "dept", schema2, StoreType.CSV, new Options(), CommonTestingUtil.getTestDir());
    catalog.addTable(student);

    TableDesc score =
        new TableDesc(
            "score", schema3, StoreType.CSV, new Options(), CommonTestingUtil.getTestDir());
    catalog.addTable(score);

    FunctionDesc funcDesc =
        new FunctionDesc(
            "sumtest",
            SumInt.class,
            FunctionType.AGGREGATION,
            CatalogUtil.newSimpleDataType(Type.INT4),
            CatalogUtil.newSimpleDataTypeArray(Type.INT4));

    catalog.createFunction(funcDesc);
    analyzer = new SQLAnalyzer();
    planner = new LogicalPlanner(catalog);
  }
Exemplo n.º 9
0
 public ResultSet getQueryResult(QueryId queryId) throws ServiceException, IOException {
   if (queryId.equals(QueryIdFactory.NULL_QUERY_ID)) {
     return createNullResultSet(queryId);
   }
   GetQueryResultResponse response = getResultResponse(queryId);
   TableDesc tableDesc = CatalogUtil.newTableDesc(response.getTableDesc());
   conf.setVar(ConfVars.USERNAME, response.getTajoUserName());
   return new TajoResultSet(this, queryId, conf, tableDesc);
 }
Exemplo n.º 10
0
  private void checkIndexExistence(
      final QueryContext queryContext, final CreateIndexNode createIndexNode)
      throws DuplicateIndexException {

    String databaseName, simpleIndexName, qualifiedIndexName;
    if (CatalogUtil.isFQTableName(createIndexNode.getIndexName())) {
      String[] splits = CatalogUtil.splitFQTableName(createIndexNode.getIndexName());
      databaseName = splits[0];
      simpleIndexName = splits[1];
      qualifiedIndexName = createIndexNode.getIndexName();
    } else {
      databaseName = queryContext.getCurrentDatabase();
      simpleIndexName = createIndexNode.getIndexName();
      qualifiedIndexName = CatalogUtil.buildFQName(databaseName, simpleIndexName);
    }

    if (catalog.existIndexByName(databaseName, simpleIndexName)) {
      throw new DuplicateIndexException(qualifiedIndexName);
    }
  }
Exemplo n.º 11
0
  @Test
  public final void testComparatorsFromJoinQual() {
    Schema outerSchema = new Schema();
    outerSchema.addColumn("employee.id1", CatalogUtil.newSimpleDataType(Type.INT4));
    outerSchema.addColumn("employee.id2", CatalogUtil.newSimpleDataType(Type.INT4));
    Schema innerSchema = new Schema();
    innerSchema.addColumn("people.fid1", CatalogUtil.newSimpleDataType(Type.INT4));
    innerSchema.addColumn("people.fid2", CatalogUtil.newSimpleDataType(Type.INT4));

    FieldEval f1 = new FieldEval("employee.id1", CatalogUtil.newSimpleDataType(Type.INT4));
    FieldEval f2 = new FieldEval("people.fid1", CatalogUtil.newSimpleDataType(Type.INT4));
    FieldEval f3 = new FieldEval("employee.id2", CatalogUtil.newSimpleDataType(Type.INT4));
    FieldEval f4 = new FieldEval("people.fid2", CatalogUtil.newSimpleDataType(Type.INT4));

    EvalNode joinQual = new BinaryEval(EvalType.EQUAL, f1, f2);
    TupleComparator[] comparators =
        PlannerUtil.getComparatorsFromJoinQual(joinQual, outerSchema, innerSchema);

    Tuple t1 = new VTuple(2);
    t1.put(0, DatumFactory.createInt4(1));
    t1.put(1, DatumFactory.createInt4(2));

    Tuple t2 = new VTuple(2);
    t2.put(0, DatumFactory.createInt4(2));
    t2.put(1, DatumFactory.createInt4(3));

    TupleComparator outerComparator = comparators[0];
    assertTrue(outerComparator.compare(t1, t2) < 0);
    assertTrue(outerComparator.compare(t2, t1) > 0);

    TupleComparator innerComparator = comparators[1];
    assertTrue(innerComparator.compare(t1, t2) < 0);
    assertTrue(innerComparator.compare(t2, t1) > 0);

    // tests for composited join key
    EvalNode joinQual2 = new BinaryEval(EvalType.EQUAL, f3, f4);
    EvalNode compositedJoinQual = new BinaryEval(EvalType.AND, joinQual, joinQual2);
    comparators =
        PlannerUtil.getComparatorsFromJoinQual(compositedJoinQual, outerSchema, innerSchema);

    outerComparator = comparators[0];
    assertTrue(outerComparator.compare(t1, t2) < 0);
    assertTrue(outerComparator.compare(t2, t1) > 0);

    innerComparator = comparators[1];
    assertTrue(innerComparator.compare(t1, t2) < 0);
    assertTrue(innerComparator.compare(t2, t1) > 0);
  }
Exemplo n.º 12
0
  public HashShuffleFileWriteExec(
      TaskAttemptContext context, final ShuffleFileWriteNode plan, final PhysicalExec child)
      throws IOException {
    super(context, plan.getInSchema(), plan.getOutSchema(), child);
    Preconditions.checkArgument(plan.hasShuffleKeys());
    this.plan = plan;
    if (plan.hasOptions()) {
      this.meta = CatalogUtil.newTableMeta(plan.getStorageType(), plan.getOptions());
    } else {
      this.meta = CatalogUtil.newTableMeta(plan.getStorageType());
    }
    // about the shuffle
    this.numShuffleOutputs = this.plan.getNumOutputs();
    int i = 0;
    this.shuffleKeyIds = new int[this.plan.getShuffleKeys().length];
    for (Column key : this.plan.getShuffleKeys()) {
      shuffleKeyIds[i] = inSchema.getColumnId(key.getQualifiedName());
      i++;
    }
    this.partitioner = new HashPartitioner(shuffleKeyIds, numShuffleOutputs);
    this.hashShuffleAppenderManager = context.getHashShuffleAppenderManager();
    this.maxBufferSize =
        context.getQueryContext().getInt(SessionVars.HASH_SHUFFLE_BUFFER_SIZE) * StorageUnit.MB;
    this.bufferThreshold = (int) (maxBufferSize * BUFFER_THRESHOLD_FACTOR);
    this.dataTypes = SchemaUtil.toDataTypes(outSchema);

    if (numShuffleOutputs > 0) {
      // calculate initial buffer by total partition. a buffer size will be 4Kb ~ 1MB
      this.initialBufferSize =
          Math.min(
              MAXIMUM_INITIAL_BUFFER_SIZE,
              Math.max(maxBufferSize / numShuffleOutputs, MINIMUM_INITIAL_BUFFER_SIZE));
    } else {
      this.initialBufferSize = MINIMUM_INITIAL_BUFFER_SIZE;
    }

    this.partitionMemoryMap = Maps.newHashMap();
  }
Exemplo n.º 13
0
  @Test
  public final void testCtasWithTextFile() throws Exception {
    ResultSet res = executeFile("CtasWithTextFile.sql");
    res.close();

    ResultSet res2 = executeQuery();
    resultSetToString(res2);
    res2.close();

    TableDesc desc =
        client.getTableDesc(CatalogUtil.normalizeIdentifier(res2.getMetaData().getTableName(1)));
    assertNotNull(desc);
    assertTrue("TEXT".equalsIgnoreCase(desc.getMeta().getStoreType()));
  }
Exemplo n.º 14
0
  @Test
  public void testIgnoreTruncatedValueErrorTolerance() throws IOException {
    TajoConf conf = new TajoConf();
    TableMeta meta = CatalogUtil.newTableMeta("JSON");
    meta.putOption(StorageUtil.TEXT_ERROR_TOLERANCE_MAXNUM, "1");
    FileFragment fragment = getFileFragment("testErrorTolerance3.json");
    Scanner scanner = TablespaceManager.getLocalFs().getScanner(meta, schema, fragment);
    scanner.init();

    try {
      Tuple tuple = scanner.next();
      assertNull(tuple);
    } finally {
      scanner.close();
    }
  }
Exemplo n.º 15
0
  @Test
  public final void testGetJoinKeyPairs() {
    Schema outerSchema = new Schema();
    outerSchema.addColumn("employee.id1", CatalogUtil.newSimpleDataType(Type.INT4));
    outerSchema.addColumn("employee.id2", CatalogUtil.newSimpleDataType(Type.INT4));
    Schema innerSchema = new Schema();
    innerSchema.addColumn("people.fid1", CatalogUtil.newSimpleDataType(Type.INT4));
    innerSchema.addColumn("people.fid2", CatalogUtil.newSimpleDataType(Type.INT4));

    FieldEval f1 = new FieldEval("employee.id1", CatalogUtil.newSimpleDataType(Type.INT4));
    FieldEval f2 = new FieldEval("people.fid1", CatalogUtil.newSimpleDataType(Type.INT4));
    FieldEval f3 = new FieldEval("employee.id2", CatalogUtil.newSimpleDataType(Type.INT4));
    FieldEval f4 = new FieldEval("people.fid2", CatalogUtil.newSimpleDataType(Type.INT4));

    EvalNode joinQual = new BinaryEval(EvalType.EQUAL, f1, f2);

    // the case where part is the outer and partsupp is the inner.
    List<Column[]> pairs = PlannerUtil.getJoinKeyPairs(joinQual, outerSchema, innerSchema);
    assertEquals(1, pairs.size());
    assertEquals("employee.id1", pairs.get(0)[0].getQualifiedName());
    assertEquals("people.fid1", pairs.get(0)[1].getQualifiedName());

    // after exchange of outer and inner
    pairs = PlannerUtil.getJoinKeyPairs(joinQual, innerSchema, outerSchema);
    assertEquals("people.fid1", pairs.get(0)[0].getQualifiedName());
    assertEquals("employee.id1", pairs.get(0)[1].getQualifiedName());

    // composited join key test
    EvalNode joinQual2 = new BinaryEval(EvalType.EQUAL, f3, f4);
    EvalNode compositedJoinQual = new BinaryEval(EvalType.AND, joinQual, joinQual2);
    pairs = PlannerUtil.getJoinKeyPairs(compositedJoinQual, outerSchema, innerSchema);
    assertEquals(2, pairs.size());
    assertEquals("employee.id1", pairs.get(0)[0].getQualifiedName());
    assertEquals("people.fid1", pairs.get(0)[1].getQualifiedName());
    assertEquals("employee.id2", pairs.get(1)[0].getQualifiedName());
    assertEquals("people.fid2", pairs.get(1)[1].getQualifiedName());

    // after exchange of outer and inner
    pairs = PlannerUtil.getJoinKeyPairs(compositedJoinQual, innerSchema, outerSchema);
    assertEquals(2, pairs.size());
    assertEquals("people.fid1", pairs.get(0)[0].getQualifiedName());
    assertEquals("employee.id1", pairs.get(0)[1].getQualifiedName());
    assertEquals("people.fid2", pairs.get(1)[0].getQualifiedName());
    assertEquals("employee.id2", pairs.get(1)[1].getQualifiedName());
  }
  @Before
  public void setUp() throws Exception {
    this.conf = new TajoConf();
    util = new TajoTestingCluster();
    catalog = util.startCatalogCluster().getCatalog();
    testDir = CommonTestingUtil.getTestDir(TEST_PATH);
    conf.setVar(TajoConf.ConfVars.WORKER_TEMPORAL_DIR, testDir.toString());
    sm = StorageManagerFactory.getStorageManager(conf, testDir);

    Schema schema = new Schema();
    schema.addColumn("managerId", Type.INT4);
    schema.addColumn("empId", Type.INT4);
    schema.addColumn("deptName", Type.TEXT);

    TableMeta employeeMeta = CatalogUtil.newTableMeta(StoreType.CSV);
    Path employeePath = new Path(testDir, "employee.csv");
    Appender appender =
        StorageManagerFactory.getStorageManager(conf)
            .getAppender(employeeMeta, schema, employeePath);
    appender.enableStats();
    appender.init();
    Tuple tuple = new VTuple(schema.getColumnNum());
    for (int i = 0; i < numTuple; i++) {
      tuple.put(
          new Datum[] {
            DatumFactory.createInt4(rnd.nextInt(50)),
            DatumFactory.createInt4(rnd.nextInt(100)),
            DatumFactory.createText("dept_" + i),
          });
      appender.addTuple(tuple);
    }
    appender.flush();
    appender.close();

    System.out.println(
        appender.getStats().getNumRows()
            + " rows ("
            + (appender.getStats().getNumBytes() / 1048576)
            + " MB)");

    employee = new TableDesc("employee", schema, employeeMeta, employeePath);
    catalog.addTable(employee);
    analyzer = new SQLAnalyzer();
    planner = new LogicalPlanner(catalog);
  }
Exemplo n.º 17
0
  @Test
  public void testNoErrorTolerance() throws IOException {
    TajoConf conf = new TajoConf();
    TableMeta meta = CatalogUtil.newTableMeta("JSON");
    meta.putOption(StorageUtil.TEXT_ERROR_TOLERANCE_MAXNUM, "0");
    FileFragment fragment = getFileFragment("testErrorTolerance2.json");
    Scanner scanner = TablespaceManager.getLocalFs().getScanner(meta, schema, fragment);
    scanner.init();

    try {
      scanner.next();
    } catch (IOException ioe) {
      return;
    } finally {
      scanner.close();
    }
    fail();
  }
Exemplo n.º 18
0
  @Test
  public final void testCtasWithoutTableDefinition() throws Exception {
    ResultSet res = executeQuery();
    res.close();

    String tableName = CatalogUtil.normalizeIdentifier("testCtasWithoutTableDefinition");
    CatalogService catalog = testBase.getTestingCluster().getMaster().getCatalog();
    String qualifiedTableName = buildFQName(DEFAULT_DATABASE_NAME, tableName);
    TableDesc desc = catalog.getTableDesc(qualifiedTableName);
    assertTrue(catalog.existsTable(qualifiedTableName));

    assertTrue(desc.getSchema().contains("default.testctaswithouttabledefinition.col1"));
    PartitionMethodDesc partitionDesc = desc.getPartitionMethod();
    assertEquals(partitionDesc.getPartitionType(), CatalogProtos.PartitionType.COLUMN);
    assertEquals(
        "key", partitionDesc.getExpressionSchema().getRootColumns().get(0).getSimpleName());

    FileSystem fs = FileSystem.get(testBase.getTestingCluster().getConfiguration());
    Path path = new Path(desc.getUri());
    assertTrue(fs.isDirectory(path));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=17.0")));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=36.0")));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=38.0")));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=45.0")));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=49.0")));
    if (!testingCluster.isHiveCatalogStoreRunning()) {
      assertEquals(5, desc.getStats().getNumRows().intValue());
    }

    ResultSet res2 = executeFile("check1.sql");

    Map<Double, int[]> resultRows1 = Maps.newHashMap();
    resultRows1.put(45.0d, new int[] {3, 2});
    resultRows1.put(38.0d, new int[] {2, 2});

    int i = 0;
    while (res2.next()) {
      assertEquals(resultRows1.get(res2.getDouble(3))[0], res2.getInt(1));
      assertEquals(resultRows1.get(res2.getDouble(3))[1], res2.getInt(2));
      i++;
    }
    res2.close();
    assertEquals(2, i);
  }
Exemplo n.º 19
0
  @Test
  public void testIgnoreAllErrors() throws IOException {
    TajoConf conf = new TajoConf();

    TableMeta meta = CatalogUtil.newTableMeta("JSON");
    meta.putOption(StorageUtil.TEXT_ERROR_TOLERANCE_MAXNUM, "-1");
    FileFragment fragment = getFileFragment("testErrorTolerance1.json");
    Scanner scanner = TablespaceManager.getLocalFs().getScanner(meta, schema, fragment);
    scanner.init();

    Tuple tuple;
    int i = 0;
    while ((tuple = scanner.next()) != null) {
      assertEquals(baseTuple, tuple);
      i++;
    }
    assertEquals(3, i);
    scanner.close();
  }
Exemplo n.º 20
0
  @Test
  public final void testCtasWithOptions() throws Exception {
    ResultSet res = executeFile("CtasWithOptions.sql");
    res.close();

    ResultSet res2 = executeQuery();
    resultSetToString(res2);
    res2.close();

    TableDesc desc =
        client.getTableDesc(CatalogUtil.normalizeIdentifier(res2.getMetaData().getTableName(1)));
    assertNotNull(desc);
    assertTrue("CSV".equalsIgnoreCase(desc.getMeta().getStoreType()));

    KeyValueSet options = desc.getMeta().getOptions();
    assertNotNull(options);
    assertEquals(
        StringEscapeUtils.escapeJava("\u0001"), options.get(StorageConstants.TEXT_DELIMITER));
  }
Exemplo n.º 21
0
  public void dropBaseSchema(Connection conn) {
    if (!isLoaded()) {
      throw new TajoInternalError("Schema files are not loaded yet.");
    }

    List<DatabaseObject> failedObjects = new ArrayList<>();
    Statement stmt = null;

    try {
      stmt = conn.createStatement();
    } catch (SQLException e) {
      throw new TajoInternalError(e);
    }

    for (DatabaseObject object : catalogStore.getSchema().getObjects()) {
      try {
        if (DatabaseObjectType.TABLE == object.getType()
            || DatabaseObjectType.SEQUENCE == object.getType()
            || DatabaseObjectType.VIEW == object.getType()) {
          stmt.executeUpdate(getDropSQL(object.getType(), object.getName()));
        }
      } catch (SQLException e) {
        failedObjects.add(object);
      }
    }
    CatalogUtil.closeQuietly(stmt);

    if (failedObjects.size() > 0) {
      StringBuffer errorMessage = new StringBuffer(64);
      errorMessage.append("Failed to drop database objects ");

      for (int idx = 0; idx < failedObjects.size(); idx++) {
        DatabaseObject object = failedObjects.get(idx);
        errorMessage.append(object.getType().toString()).append(" ").append(object.getName());
        if ((idx + 1) < failedObjects.size()) {
          errorMessage.append(',');
        }
      }

      LOG.warn(errorMessage.toString());
    }
  }
Exemplo n.º 22
0
  @Test
  public final void testGetSortKeysFromJoinQual() {
    Schema outerSchema = new Schema();
    outerSchema.addColumn("employee.id1", CatalogUtil.newSimpleDataType(Type.INT4));
    outerSchema.addColumn("employee.id2", CatalogUtil.newSimpleDataType(Type.INT4));
    Schema innerSchema = new Schema();
    innerSchema.addColumn("people.fid1", CatalogUtil.newSimpleDataType(Type.INT4));
    innerSchema.addColumn("people.fid2", CatalogUtil.newSimpleDataType(Type.INT4));

    FieldEval f1 = new FieldEval("employee.id1", CatalogUtil.newSimpleDataType(Type.INT4));
    FieldEval f2 = new FieldEval("people.fid1", CatalogUtil.newSimpleDataType(Type.INT4));
    FieldEval f3 = new FieldEval("employee.id2", CatalogUtil.newSimpleDataType(Type.INT4));
    FieldEval f4 = new FieldEval("people.fid2", CatalogUtil.newSimpleDataType(Type.INT4));

    EvalNode joinQual = new BinaryEval(EvalType.EQUAL, f1, f2);
    SortSpec[][] sortSpecs =
        PlannerUtil.getSortKeysFromJoinQual(joinQual, outerSchema, innerSchema);
    assertEquals(2, sortSpecs.length);
    assertEquals(1, sortSpecs[0].length);
    assertEquals(1, sortSpecs[1].length);
    assertEquals(outerSchema.getColumn("id1"), sortSpecs[0][0].getSortKey());
    assertEquals(innerSchema.getColumn("fid1"), sortSpecs[1][0].getSortKey());

    // tests for composited join key
    EvalNode joinQual2 = new BinaryEval(EvalType.EQUAL, f3, f4);
    EvalNode compositedJoinQual = new BinaryEval(EvalType.AND, joinQual, joinQual2);

    sortSpecs = PlannerUtil.getSortKeysFromJoinQual(compositedJoinQual, outerSchema, innerSchema);
    assertEquals(2, sortSpecs.length);
    assertEquals(2, sortSpecs[0].length);
    assertEquals(2, sortSpecs[1].length);
    assertEquals(outerSchema.getColumn("id1"), sortSpecs[0][0].getSortKey());
    assertEquals(outerSchema.getColumn("id2"), sortSpecs[0][1].getSortKey());
    assertEquals(innerSchema.getColumn("fid1"), sortSpecs[1][0].getSortKey());
    assertEquals(innerSchema.getColumn("fid2"), sortSpecs[1][1].getSortKey());
  }
Exemplo n.º 23
0
  @Test
  public void testAlterTableAddDropPartition() throws Exception {
    String tableName = CatalogUtil.normalizeIdentifier("testAlterTableAddPartition");

    tajoCli.executeScript(
        "create table " + tableName + " (col1 int4, col2 int4) partition by column(key float8)");
    tajoCli.executeScript("alter table " + tableName + " add partition (key2 = 0.1)");
    tajoCli.executeScript("alter table " + tableName + " add partition (key = 0.1)");
    tajoCli.executeScript("alter table " + tableName + " drop partition (key = 0.1)");
    tajoCli.executeScript("alter table " + tableName + " drop partition (key = 0.1)");

    tajoCli.executeScript("drop table " + tableName);
    tajoCli.executeScript(
        "create table "
            + tableName
            + " (col1 int4, col2 int4) partition by column(col3 float8, col4 int4)");

    TajoClient client = testBase.getTestingCluster().newTajoClient();
    TableDesc tableDesc = client.getTableDesc(tableName);

    String partitionLocation = tableDesc.getUri().toString() + "/col5=0.1/col6=10";
    tajoCli.executeScript(
        "alter table "
            + tableName
            + " add partition (col3 = 0.1, col4 = 10)"
            + " location '"
            + partitionLocation
            + "'");

    Path partitionPath = new Path(partitionLocation);
    FileSystem fs = testBase.getTestingCluster().getDefaultFileSystem();
    assertTrue(fs.exists(partitionPath));

    tajoCli.executeScript("alter table " + tableName + " drop partition (col3 = 0.1, col4 = 10)");

    String consoleResult = new String(out.toByteArray());
    assertOutputResult(consoleResult);
  }
Exemplo n.º 24
0
  public void createBaseSchema(Connection conn) {
    Statement stmt;

    if (!isLoaded()) {
      throw new TajoInternalError("Database schema files are not loaded.");
    }

    try {
      stmt = conn.createStatement();
    } catch (SQLException e) {
      throw new TajoInternalError(e);
    }

    for (DatabaseObject object : catalogStore.getSchema().getObjects()) {
      try {
        String[] params;
        if (DatabaseObjectType.INDEX == object.getType()) {
          params = new String[2];
          params[0] = object.getDependsOn();
          params[1] = object.getName();
        } else {
          params = new String[1];
          params[0] = object.getName();
        }

        if (checkExistence(conn, object.getType(), params)) {
          LOG.info("Skip to create " + object.getName() + " databse object. Already exists.");
        } else {
          stmt.executeUpdate(object.getSql());
          LOG.info(object.getName() + " " + object.getType() + " is created.");
        }
      } catch (SQLException e) {
        throw new TajoInternalError(e);
      }
    }

    CatalogUtil.closeQuietly(stmt);
  }
Exemplo n.º 25
0
  private final void assertPathOfCreatedTable(
      final String databaseName,
      final String originalTableName,
      final String newTableName,
      String createTableStmt)
      throws Exception {
    // create one table
    executeString("CREATE DATABASE " + CatalogUtil.denormalizeIdentifier(databaseName)).close();
    getClient().existDatabase(CatalogUtil.denormalizeIdentifier(databaseName));
    final String oldFQTableName = CatalogUtil.buildFQName(databaseName, originalTableName);

    ResultSet res = executeString(createTableStmt);
    res.close();
    assertTableExists(oldFQTableName);
    TableDesc oldTableDesc = client.getTableDesc(oldFQTableName);

    // checking the existence of the table directory and validating the path
    Path warehouseDir = TajoConf.getWarehouseDir(testingCluster.getConfiguration());
    FileSystem fs = warehouseDir.getFileSystem(testingCluster.getConfiguration());
    assertTrue(fs.exists(new Path(oldTableDesc.getUri())));
    assertEquals(
        StorageUtil.concatPath(warehouseDir, databaseName, originalTableName),
        new Path(oldTableDesc.getUri()));

    // Rename
    client.executeQuery(
        "ALTER TABLE "
            + CatalogUtil.denormalizeIdentifier(oldFQTableName)
            + " RENAME to "
            + CatalogUtil.denormalizeIdentifier(newTableName));

    // checking the existence of the new table directory and validating the path
    final String newFQTableName = CatalogUtil.buildFQName(databaseName, newTableName);
    TableDesc newTableDesc = client.getTableDesc(newFQTableName);
    assertTrue(fs.exists(new Path(newTableDesc.getUri())));
    assertEquals(
        StorageUtil.concatPath(warehouseDir, databaseName, newTableName),
        new Path(newTableDesc.getUri()));
  }
Exemplo n.º 26
0
 @Override
 public DataType getPartialResultType() {
   return CatalogUtil.newSimpleDataType(Type.INT8);
 }
  @Before
  public void setUp() throws Exception {
    util = new TajoTestingCluster();
    util.initTestDir();
    util.startCatalogCluster();
    catalog = util.getCatalogService();
    testDir = CommonTestingUtil.getTestDir(TEST_PATH);
    catalog.createTablespace(DEFAULT_TABLESPACE_NAME, testDir.toUri().toString());
    catalog.createDatabase(DEFAULT_DATABASE_NAME, DEFAULT_TABLESPACE_NAME);
    conf = util.getConfiguration();

    // ----------------- dep3 ------------------------------
    // dep_id | dep_name  | loc_id
    // --------------------------------
    //  0     | dep_0     | 1000
    //  1     | dep_1     | 1001
    //  2     | dep_2     | 1002
    //  3     | dep_3     | 1003
    //  4     | dep_4     | 1004
    //  5     | dep_5     | 1005
    //  6     | dep_6     | 1006
    //  7     | dep_7     | 1007
    //  8     | dep_8     | 1008
    //  9     | dep_9     | 1009
    Schema dep3Schema = new Schema();
    dep3Schema.addColumn("dep_id", Type.INT4);
    dep3Schema.addColumn("dep_name", Type.TEXT);
    dep3Schema.addColumn("loc_id", Type.INT4);

    TableMeta dep3Meta = CatalogUtil.newTableMeta("TEXT");
    Path dep3Path = new Path(testDir, "dep3.csv");
    Appender appender1 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(dep3Meta, dep3Schema, dep3Path);
    appender1.init();
    VTuple tuple = new VTuple(dep3Schema.size());
    for (int i = 0; i < 10; i++) {
      tuple.put(
          new Datum[] {
            DatumFactory.createInt4(i),
            DatumFactory.createText("dept_" + i),
            DatumFactory.createInt4(1000 + i)
          });
      appender1.addTuple(tuple);
    }

    appender1.flush();
    appender1.close();
    dep3 = CatalogUtil.newTableDesc(DEP3_NAME, dep3Schema, dep3Meta, dep3Path);
    catalog.createTable(dep3);

    // ----------------- dep4 ------------------------------
    // dep_id | dep_name  | loc_id
    // --------------------------------
    //  0     | dep_0     | 1000
    //  1     | dep_1     | 1001
    //  2     | dep_2     | 1002
    //  3     | dep_3     | 1003
    //  4     | dep_4     | 1004
    //  5     | dep_5     | 1005
    //  6     | dep_6     | 1006
    //  7     | dep_7     | 1007
    //  8     | dep_8     | 1008
    //  9     | dep_9     | 1009
    // 10     | dep_10    | 1010
    Schema dep4Schema = new Schema();
    dep4Schema.addColumn("dep_id", Type.INT4);
    dep4Schema.addColumn("dep_name", Type.TEXT);
    dep4Schema.addColumn("loc_id", Type.INT4);

    TableMeta dep4Meta = CatalogUtil.newTableMeta("TEXT");
    Path dep4Path = new Path(testDir, "dep4.csv");
    Appender appender4 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(dep4Meta, dep4Schema, dep4Path);
    appender4.init();
    VTuple tuple4 = new VTuple(dep4Schema.size());
    for (int i = 0; i < 11; i++) {
      tuple4.put(
          new Datum[] {
            DatumFactory.createInt4(i),
            DatumFactory.createText("dept_" + i),
            DatumFactory.createInt4(1000 + i)
          });
      appender4.addTuple(tuple4);
    }

    appender4.flush();
    appender4.close();
    dep4 = CatalogUtil.newTableDesc(DEP4_NAME, dep4Schema, dep4Meta, dep4Path);
    catalog.createTable(dep4);

    // ----------------- job3 ------------------------------
    //  job_id  | job_title
    // ----------------------
    //   101    |  job_101
    //   102    |  job_102
    //   103    |  job_103

    Schema job3Schema = new Schema();
    job3Schema.addColumn("job_id", Type.INT4);
    job3Schema.addColumn("job_title", Type.TEXT);

    TableMeta job3Meta = CatalogUtil.newTableMeta("TEXT");
    Path job3Path = new Path(testDir, "job3.csv");
    Appender appender2 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(job3Meta, job3Schema, job3Path);
    appender2.init();
    VTuple tuple2 = new VTuple(job3Schema.size());
    for (int i = 1; i < 4; i++) {
      int x = 100 + i;
      tuple2.put(
          new Datum[] {DatumFactory.createInt4(100 + i), DatumFactory.createText("job_" + x)});
      appender2.addTuple(tuple2);
    }

    appender2.flush();
    appender2.close();
    job3 = CatalogUtil.newTableDesc(JOB3_NAME, job3Schema, job3Meta, job3Path);
    catalog.createTable(job3);

    // ---------------------emp3 --------------------
    // emp_id  | first_name | last_name | dep_id | salary | job_id
    // ------------------------------------------------------------
    //  11     |  fn_11     |  ln_11    |  1     | 123    | 101
    //  13     |  fn_13     |  ln_13    |  3     | 369    | 103
    //  15     |  fn_15     |  ln_15    |  5     | 615    | null
    //  17     |  fn_17     |  ln_17    |  7     | 861    | null
    //  19     |  fn_19     |  ln_19    |  9     | 1107   | null
    //  21     |  fn_21     |  ln_21    |  1     | 123    | 101
    //  23     |  fn_23     |  ln_23    |  3     | 369    | 103

    Schema emp3Schema = new Schema();
    emp3Schema.addColumn("emp_id", Type.INT4);
    emp3Schema.addColumn("first_name", Type.TEXT);
    emp3Schema.addColumn("last_name", Type.TEXT);
    emp3Schema.addColumn("dep_id", Type.INT4);
    emp3Schema.addColumn("salary", Type.FLOAT4);
    emp3Schema.addColumn("job_id", Type.INT4);

    TableMeta emp3Meta = CatalogUtil.newTableMeta("TEXT");
    Path emp3Path = new Path(testDir, "emp3.csv");
    Appender appender3 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(emp3Meta, emp3Schema, emp3Path);
    appender3.init();
    VTuple tuple3 = new VTuple(emp3Schema.size());

    for (int i = 1; i < 4; i += 2) {
      int x = 10 + i;
      tuple3.put(
          new Datum[] {
            DatumFactory.createInt4(10 + i),
            DatumFactory.createText("firstname_" + x),
            DatumFactory.createText("lastname_" + x),
            DatumFactory.createInt4(i),
            DatumFactory.createFloat4(123 * i),
            DatumFactory.createInt4(100 + i)
          });
      appender3.addTuple(tuple3);

      int y = 20 + i;
      tuple3.put(
          new Datum[] {
            DatumFactory.createInt4(20 + i),
            DatumFactory.createText("firstname_" + y),
            DatumFactory.createText("lastname_" + y),
            DatumFactory.createInt4(i),
            DatumFactory.createFloat4(123 * i),
            DatumFactory.createInt4(100 + i)
          });
      appender3.addTuple(tuple3);
    }

    for (int i = 5; i < 10; i += 2) {
      int x = 10 + i;
      tuple3.put(
          new Datum[] {
            DatumFactory.createInt4(10 + i),
            DatumFactory.createText("firstname_" + x),
            DatumFactory.createText("lastname_" + x),
            DatumFactory.createInt4(i),
            DatumFactory.createFloat4(123 * i),
            DatumFactory.createNullDatum()
          });
      appender3.addTuple(tuple3);
    }

    appender3.flush();
    appender3.close();
    emp3 = CatalogUtil.newTableDesc(EMP3_NAME, emp3Schema, emp3Meta, emp3Path);
    catalog.createTable(emp3);

    // ---------------------phone3 --------------------
    // emp_id  | phone_number
    // -----------------------------------------------
    // this table is empty, no rows

    Schema phone3Schema = new Schema();
    phone3Schema.addColumn("emp_id", Type.INT4);
    phone3Schema.addColumn("phone_number", Type.TEXT);

    TableMeta phone3Meta = CatalogUtil.newTableMeta("TEXT");
    Path phone3Path = new Path(testDir, "phone3.csv");
    Appender appender5 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(phone3Meta, phone3Schema, phone3Path);
    appender5.init();

    appender5.flush();
    appender5.close();
    phone3 = CatalogUtil.newTableDesc(PHONE3_NAME, phone3Schema, phone3Meta, phone3Path);
    catalog.createTable(phone3);

    analyzer = new SQLAnalyzer();
    planner = new LogicalPlanner(catalog, TablespaceManager.getInstance());

    defaultContext = LocalTajoTestingUtility.createDummyContext(conf);
  }
public class TestRightOuterMergeJoinExec {
  private TajoConf conf;
  private final String TEST_PATH =
      TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/TestRightOuterMergeJoinExec";
  private TajoTestingCluster util;
  private CatalogService catalog;
  private SQLAnalyzer analyzer;
  private LogicalPlanner planner;
  private Path testDir;
  private QueryContext defaultContext;

  private TableDesc dep3;
  private TableDesc dep4;
  private TableDesc job3;
  private TableDesc emp3;
  private TableDesc phone3;

  private final String DEP3_NAME = CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "dep3");
  private final String DEP4_NAME = CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "dep4");
  private final String JOB3_NAME = CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "job3");
  private final String EMP3_NAME = CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "emp3");
  private final String PHONE3_NAME = CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "phone3");

  @Before
  public void setUp() throws Exception {
    util = new TajoTestingCluster();
    util.initTestDir();
    util.startCatalogCluster();
    catalog = util.getCatalogService();
    testDir = CommonTestingUtil.getTestDir(TEST_PATH);
    catalog.createTablespace(DEFAULT_TABLESPACE_NAME, testDir.toUri().toString());
    catalog.createDatabase(DEFAULT_DATABASE_NAME, DEFAULT_TABLESPACE_NAME);
    conf = util.getConfiguration();

    // ----------------- dep3 ------------------------------
    // dep_id | dep_name  | loc_id
    // --------------------------------
    //  0     | dep_0     | 1000
    //  1     | dep_1     | 1001
    //  2     | dep_2     | 1002
    //  3     | dep_3     | 1003
    //  4     | dep_4     | 1004
    //  5     | dep_5     | 1005
    //  6     | dep_6     | 1006
    //  7     | dep_7     | 1007
    //  8     | dep_8     | 1008
    //  9     | dep_9     | 1009
    Schema dep3Schema = new Schema();
    dep3Schema.addColumn("dep_id", Type.INT4);
    dep3Schema.addColumn("dep_name", Type.TEXT);
    dep3Schema.addColumn("loc_id", Type.INT4);

    TableMeta dep3Meta = CatalogUtil.newTableMeta("TEXT");
    Path dep3Path = new Path(testDir, "dep3.csv");
    Appender appender1 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(dep3Meta, dep3Schema, dep3Path);
    appender1.init();
    VTuple tuple = new VTuple(dep3Schema.size());
    for (int i = 0; i < 10; i++) {
      tuple.put(
          new Datum[] {
            DatumFactory.createInt4(i),
            DatumFactory.createText("dept_" + i),
            DatumFactory.createInt4(1000 + i)
          });
      appender1.addTuple(tuple);
    }

    appender1.flush();
    appender1.close();
    dep3 = CatalogUtil.newTableDesc(DEP3_NAME, dep3Schema, dep3Meta, dep3Path);
    catalog.createTable(dep3);

    // ----------------- dep4 ------------------------------
    // dep_id | dep_name  | loc_id
    // --------------------------------
    //  0     | dep_0     | 1000
    //  1     | dep_1     | 1001
    //  2     | dep_2     | 1002
    //  3     | dep_3     | 1003
    //  4     | dep_4     | 1004
    //  5     | dep_5     | 1005
    //  6     | dep_6     | 1006
    //  7     | dep_7     | 1007
    //  8     | dep_8     | 1008
    //  9     | dep_9     | 1009
    // 10     | dep_10    | 1010
    Schema dep4Schema = new Schema();
    dep4Schema.addColumn("dep_id", Type.INT4);
    dep4Schema.addColumn("dep_name", Type.TEXT);
    dep4Schema.addColumn("loc_id", Type.INT4);

    TableMeta dep4Meta = CatalogUtil.newTableMeta("TEXT");
    Path dep4Path = new Path(testDir, "dep4.csv");
    Appender appender4 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(dep4Meta, dep4Schema, dep4Path);
    appender4.init();
    VTuple tuple4 = new VTuple(dep4Schema.size());
    for (int i = 0; i < 11; i++) {
      tuple4.put(
          new Datum[] {
            DatumFactory.createInt4(i),
            DatumFactory.createText("dept_" + i),
            DatumFactory.createInt4(1000 + i)
          });
      appender4.addTuple(tuple4);
    }

    appender4.flush();
    appender4.close();
    dep4 = CatalogUtil.newTableDesc(DEP4_NAME, dep4Schema, dep4Meta, dep4Path);
    catalog.createTable(dep4);

    // ----------------- job3 ------------------------------
    //  job_id  | job_title
    // ----------------------
    //   101    |  job_101
    //   102    |  job_102
    //   103    |  job_103

    Schema job3Schema = new Schema();
    job3Schema.addColumn("job_id", Type.INT4);
    job3Schema.addColumn("job_title", Type.TEXT);

    TableMeta job3Meta = CatalogUtil.newTableMeta("TEXT");
    Path job3Path = new Path(testDir, "job3.csv");
    Appender appender2 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(job3Meta, job3Schema, job3Path);
    appender2.init();
    VTuple tuple2 = new VTuple(job3Schema.size());
    for (int i = 1; i < 4; i++) {
      int x = 100 + i;
      tuple2.put(
          new Datum[] {DatumFactory.createInt4(100 + i), DatumFactory.createText("job_" + x)});
      appender2.addTuple(tuple2);
    }

    appender2.flush();
    appender2.close();
    job3 = CatalogUtil.newTableDesc(JOB3_NAME, job3Schema, job3Meta, job3Path);
    catalog.createTable(job3);

    // ---------------------emp3 --------------------
    // emp_id  | first_name | last_name | dep_id | salary | job_id
    // ------------------------------------------------------------
    //  11     |  fn_11     |  ln_11    |  1     | 123    | 101
    //  13     |  fn_13     |  ln_13    |  3     | 369    | 103
    //  15     |  fn_15     |  ln_15    |  5     | 615    | null
    //  17     |  fn_17     |  ln_17    |  7     | 861    | null
    //  19     |  fn_19     |  ln_19    |  9     | 1107   | null
    //  21     |  fn_21     |  ln_21    |  1     | 123    | 101
    //  23     |  fn_23     |  ln_23    |  3     | 369    | 103

    Schema emp3Schema = new Schema();
    emp3Schema.addColumn("emp_id", Type.INT4);
    emp3Schema.addColumn("first_name", Type.TEXT);
    emp3Schema.addColumn("last_name", Type.TEXT);
    emp3Schema.addColumn("dep_id", Type.INT4);
    emp3Schema.addColumn("salary", Type.FLOAT4);
    emp3Schema.addColumn("job_id", Type.INT4);

    TableMeta emp3Meta = CatalogUtil.newTableMeta("TEXT");
    Path emp3Path = new Path(testDir, "emp3.csv");
    Appender appender3 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(emp3Meta, emp3Schema, emp3Path);
    appender3.init();
    VTuple tuple3 = new VTuple(emp3Schema.size());

    for (int i = 1; i < 4; i += 2) {
      int x = 10 + i;
      tuple3.put(
          new Datum[] {
            DatumFactory.createInt4(10 + i),
            DatumFactory.createText("firstname_" + x),
            DatumFactory.createText("lastname_" + x),
            DatumFactory.createInt4(i),
            DatumFactory.createFloat4(123 * i),
            DatumFactory.createInt4(100 + i)
          });
      appender3.addTuple(tuple3);

      int y = 20 + i;
      tuple3.put(
          new Datum[] {
            DatumFactory.createInt4(20 + i),
            DatumFactory.createText("firstname_" + y),
            DatumFactory.createText("lastname_" + y),
            DatumFactory.createInt4(i),
            DatumFactory.createFloat4(123 * i),
            DatumFactory.createInt4(100 + i)
          });
      appender3.addTuple(tuple3);
    }

    for (int i = 5; i < 10; i += 2) {
      int x = 10 + i;
      tuple3.put(
          new Datum[] {
            DatumFactory.createInt4(10 + i),
            DatumFactory.createText("firstname_" + x),
            DatumFactory.createText("lastname_" + x),
            DatumFactory.createInt4(i),
            DatumFactory.createFloat4(123 * i),
            DatumFactory.createNullDatum()
          });
      appender3.addTuple(tuple3);
    }

    appender3.flush();
    appender3.close();
    emp3 = CatalogUtil.newTableDesc(EMP3_NAME, emp3Schema, emp3Meta, emp3Path);
    catalog.createTable(emp3);

    // ---------------------phone3 --------------------
    // emp_id  | phone_number
    // -----------------------------------------------
    // this table is empty, no rows

    Schema phone3Schema = new Schema();
    phone3Schema.addColumn("emp_id", Type.INT4);
    phone3Schema.addColumn("phone_number", Type.TEXT);

    TableMeta phone3Meta = CatalogUtil.newTableMeta("TEXT");
    Path phone3Path = new Path(testDir, "phone3.csv");
    Appender appender5 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(phone3Meta, phone3Schema, phone3Path);
    appender5.init();

    appender5.flush();
    appender5.close();
    phone3 = CatalogUtil.newTableDesc(PHONE3_NAME, phone3Schema, phone3Meta, phone3Path);
    catalog.createTable(phone3);

    analyzer = new SQLAnalyzer();
    planner = new LogicalPlanner(catalog, TablespaceManager.getInstance());

    defaultContext = LocalTajoTestingUtility.createDummyContext(conf);
  }

  @After
  public void tearDown() throws Exception {
    util.shutdownCatalogCluster();
  }

  String[] QUERIES = {
    // [0] no nulls
    "select dep3.dep_id, dep_name, emp_id, salary from emp3 right outer join dep3 on dep3.dep_id = emp3.dep_id",
    // [1] nulls on the left operand
    "select job3.job_id, job_title, emp_id, salary from emp3 right outer join job3 on job3.job_id=emp3.job_id",
    // [2] nulls on the right side
    "select job3.job_id, job_title, emp_id, salary from job3 right outer join emp3 on job3.job_id=emp3.job_id",
    // [3] no nulls, right continues after left
    "select dep4.dep_id, dep_name, emp_id, salary from emp3 right outer join dep4 on dep4.dep_id = emp3.dep_id",
    // [4] one operand is empty
    "select emp3.emp_id, first_name, phone_number from emp3 right outer join phone3 on emp3.emp_id = phone3.emp_id",
    // [5] one operand is empty
    "select phone_number, emp3.emp_id, first_name from phone3 right outer join emp3 on emp3.emp_id = phone3.emp_id"
  };

  @Test
  public final void testRightOuterMergeJoin0() throws IOException, TajoException {
    Expr expr = analyzer.parse(QUERIES[0]);
    LogicalNode plan = planner.createPlan(defaultContext, expr).getRootBlock().getRoot();
    JoinNode joinNode = PlannerUtil.findTopNode(plan, NodeType.JOIN);
    Enforcer enforcer = new Enforcer();
    enforcer.enforceJoinAlgorithm(joinNode.getPID(), JoinAlgorithm.MERGE_JOIN);

    FileFragment[] emp3Frags =
        FileTablespace.splitNG(
            conf, EMP3_NAME, emp3.getMeta(), new Path(emp3.getUri()), Integer.MAX_VALUE);
    FileFragment[] dep3Frags =
        FileTablespace.splitNG(
            conf, DEP3_NAME, dep3.getMeta(), new Path(dep3.getUri()), Integer.MAX_VALUE);
    FileFragment[] merged = TUtil.concat(emp3Frags, dep3Frags);

    Path workDir =
        CommonTestingUtil.getTestDir(
            TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/testRightOuterMergeJoin0");
    TaskAttemptContext ctx =
        new TaskAttemptContext(
            new QueryContext(conf), LocalTajoTestingUtility.newTaskAttemptId(), merged, workDir);
    ctx.setEnforcer(enforcer);

    PhysicalPlanner phyPlanner = new PhysicalPlannerImpl(conf);
    PhysicalExec exec = phyPlanner.createPlan(ctx, plan);

    ProjectionExec proj = (ProjectionExec) exec;
    assertTrue(proj.getChild() instanceof RightOuterMergeJoinExec);

    int count = 0;
    exec.init();
    while (exec.next() != null) {
      // TODO check contents
      count = count + 1;
    }
    assertNull(exec.next());
    exec.close();
    assertEquals(12, count);
  }

  @Test
  public final void testRightOuter_MergeJoin1() throws IOException, TajoException {
    Expr expr = analyzer.parse(QUERIES[1]);
    LogicalNode plan = planner.createPlan(defaultContext, expr).getRootBlock().getRoot();
    JoinNode joinNode = PlannerUtil.findTopNode(plan, NodeType.JOIN);
    Enforcer enforcer = new Enforcer();
    enforcer.enforceJoinAlgorithm(joinNode.getPID(), JoinAlgorithm.MERGE_JOIN);

    FileFragment[] emp3Frags =
        FileTablespace.splitNG(
            conf, EMP3_NAME, emp3.getMeta(), new Path(emp3.getUri()), Integer.MAX_VALUE);
    FileFragment[] job3Frags =
        FileTablespace.splitNG(
            conf, JOB3_NAME, job3.getMeta(), new Path(job3.getUri()), Integer.MAX_VALUE);
    FileFragment[] merged = TUtil.concat(job3Frags, emp3Frags);

    Path workDir =
        CommonTestingUtil.getTestDir(
            TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/testRightOuterMergeJoin1");
    TaskAttemptContext ctx =
        new TaskAttemptContext(
            new QueryContext(conf), LocalTajoTestingUtility.newTaskAttemptId(), merged, workDir);
    ctx.setEnforcer(enforcer);

    PhysicalPlanner phyPlanner = new PhysicalPlannerImpl(conf);
    PhysicalExec exec = phyPlanner.createPlan(ctx, plan);

    ProjectionExec proj = (ProjectionExec) exec;
    assertTrue(proj.getChild() instanceof RightOuterMergeJoinExec);

    int count = 0;
    exec.init();
    while (exec.next() != null) {
      // TODO check contents
      count = count + 1;
    }
    assertNull(exec.next());
    exec.close();
    assertEquals(5, count);
  }

  @Test
  public final void testRightOuterMergeJoin2() throws IOException, TajoException {
    Expr expr = analyzer.parse(QUERIES[2]);
    LogicalNode plan = planner.createPlan(defaultContext, expr).getRootBlock().getRoot();
    JoinNode joinNode = PlannerUtil.findTopNode(plan, NodeType.JOIN);
    Enforcer enforcer = new Enforcer();
    enforcer.enforceJoinAlgorithm(joinNode.getPID(), JoinAlgorithm.MERGE_JOIN);

    FileFragment[] emp3Frags =
        FileTablespace.splitNG(
            conf, EMP3_NAME, emp3.getMeta(), new Path(emp3.getUri()), Integer.MAX_VALUE);
    FileFragment[] job3Frags =
        FileTablespace.splitNG(
            conf, JOB3_NAME, job3.getMeta(), new Path(job3.getUri()), Integer.MAX_VALUE);
    FileFragment[] merged = TUtil.concat(job3Frags, emp3Frags);

    Path workDir =
        CommonTestingUtil.getTestDir(
            TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/testRightOuterMergeJoin2");
    TaskAttemptContext ctx =
        new TaskAttemptContext(
            new QueryContext(conf), LocalTajoTestingUtility.newTaskAttemptId(), merged, workDir);
    ctx.setEnforcer(enforcer);

    PhysicalPlanner phyPlanner = new PhysicalPlannerImpl(conf);
    PhysicalExec exec = phyPlanner.createPlan(ctx, plan);
    ProjectionExec proj = (ProjectionExec) exec;
    assertTrue(proj.getChild() instanceof RightOuterMergeJoinExec);

    int count = 0;
    exec.init();
    while (exec.next() != null) {
      // TODO check contents
      count = count + 1;
    }
    assertNull(exec.next());
    exec.close();
    assertEquals(7, count);
  }

  @Test
  public final void testRightOuter_MergeJoin3() throws IOException, TajoException {
    Expr expr = analyzer.parse(QUERIES[3]);
    LogicalNode plan = planner.createPlan(defaultContext, expr).getRootBlock().getRoot();
    JoinNode joinNode = PlannerUtil.findTopNode(plan, NodeType.JOIN);
    Enforcer enforcer = new Enforcer();
    enforcer.enforceJoinAlgorithm(joinNode.getPID(), JoinAlgorithm.MERGE_JOIN);

    FileFragment[] emp3Frags =
        FileTablespace.splitNG(
            conf, EMP3_NAME, emp3.getMeta(), new Path(emp3.getUri()), Integer.MAX_VALUE);
    FileFragment[] dep4Frags =
        FileTablespace.splitNG(
            conf, DEP4_NAME, dep4.getMeta(), new Path(dep4.getUri()), Integer.MAX_VALUE);
    FileFragment[] merged = TUtil.concat(emp3Frags, dep4Frags);

    Path workDir =
        CommonTestingUtil.getTestDir(
            TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/testRightOuter_MergeJoin3");
    TaskAttemptContext ctx =
        new TaskAttemptContext(
            new QueryContext(conf), LocalTajoTestingUtility.newTaskAttemptId(), merged, workDir);
    ctx.setEnforcer(enforcer);

    PhysicalPlanner phyPlanner = new PhysicalPlannerImpl(conf);
    PhysicalExec exec = phyPlanner.createPlan(ctx, plan);

    ProjectionExec proj = (ProjectionExec) exec;
    assertTrue(proj.getChild() instanceof RightOuterMergeJoinExec);

    int count = 0;
    exec.init();

    while (exec.next() != null) {
      // TODO check contents
      count = count + 1;
    }
    assertNull(exec.next());
    exec.close();
    assertEquals(13, count);
  }

  @Test
  public final void testRightOuter_MergeJoin4() throws IOException, TajoException {
    Expr expr = analyzer.parse(QUERIES[4]);
    LogicalNode plan = planner.createPlan(defaultContext, expr).getRootBlock().getRoot();
    JoinNode joinNode = PlannerUtil.findTopNode(plan, NodeType.JOIN);
    Enforcer enforcer = new Enforcer();
    enforcer.enforceJoinAlgorithm(joinNode.getPID(), JoinAlgorithm.MERGE_JOIN);

    FileFragment[] emp3Frags =
        FileTablespace.splitNG(
            conf, EMP3_NAME, emp3.getMeta(), new Path(emp3.getUri()), Integer.MAX_VALUE);
    FileFragment[] phone3Frags =
        FileTablespace.splitNG(
            conf, PHONE3_NAME, phone3.getMeta(), new Path(phone3.getUri()), Integer.MAX_VALUE);
    FileFragment[] merged = TUtil.concat(emp3Frags, phone3Frags);

    Path workDir =
        CommonTestingUtil.getTestDir(
            TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/testRightOuter_MergeJoin4");
    TaskAttemptContext ctx =
        new TaskAttemptContext(
            new QueryContext(conf), LocalTajoTestingUtility.newTaskAttemptId(), merged, workDir);
    ctx.setEnforcer(enforcer);

    PhysicalPlanner phyPlanner = new PhysicalPlannerImpl(conf);
    PhysicalExec exec = phyPlanner.createPlan(ctx, plan);
    ProjectionExec proj = (ProjectionExec) exec;
    assertTrue(proj.getChild() instanceof RightOuterMergeJoinExec);

    int count = 0;
    exec.init();

    while (exec.next() != null) {
      // TODO check contents
      count = count + 1;
    }
    assertNull(exec.next());
    exec.close();
    assertEquals(0, count);
  }

  @Test
  public final void testRightOuterMergeJoin5() throws IOException, TajoException {
    Expr expr = analyzer.parse(QUERIES[5]);
    LogicalNode plan = planner.createPlan(defaultContext, expr).getRootBlock().getRoot();
    JoinNode joinNode = PlannerUtil.findTopNode(plan, NodeType.JOIN);
    Enforcer enforcer = new Enforcer();
    enforcer.enforceJoinAlgorithm(joinNode.getPID(), JoinAlgorithm.MERGE_JOIN);

    FileFragment[] emp3Frags =
        FileTablespace.splitNG(
            conf, EMP3_NAME, emp3.getMeta(), new Path(emp3.getUri()), Integer.MAX_VALUE);
    FileFragment[] phone3Frags =
        FileTablespace.splitNG(
            conf, PHONE3_NAME, phone3.getMeta(), new Path(phone3.getUri()), Integer.MAX_VALUE);
    FileFragment[] merged = TUtil.concat(phone3Frags, emp3Frags);

    Path workDir =
        CommonTestingUtil.getTestDir(
            TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/testRightOuterMergeJoin5");
    TaskAttemptContext ctx =
        new TaskAttemptContext(
            new QueryContext(conf), LocalTajoTestingUtility.newTaskAttemptId(), merged, workDir);
    ctx.setEnforcer(enforcer);

    PhysicalPlanner phyPlanner = new PhysicalPlannerImpl(conf);
    PhysicalExec exec = phyPlanner.createPlan(ctx, plan);
    ProjectionExec proj = (ProjectionExec) exec;
    assertTrue(proj.getChild() instanceof RightOuterMergeJoinExec);

    int count = 0;
    exec.init();

    while (exec.next() != null) {
      // TODO check contents
      count = count + 1;
    }
    exec.close();
    assertEquals(7, count);
  }
}
Exemplo n.º 29
0
  protected boolean checkExistence(Connection conn, DatabaseObjectType type, String... params)
      throws SQLException {
    boolean result = false;
    DatabaseMetaData metadata = null;
    PreparedStatement pstmt = null;
    BaseSchema baseSchema = catalogStore.getSchema();

    if (params == null || params.length < 1) {
      throw new IllegalArgumentException("checkExistence function needs at least one argument.");
    }

    switch (type) {
      case DATA:
        metadata = conn.getMetaData();
        ResultSet data =
            metadata.getUDTs(
                null,
                baseSchema.getSchemaName() != null && !baseSchema.getSchemaName().isEmpty()
                    ? baseSchema.getSchemaName().toUpperCase()
                    : null,
                params[0].toUpperCase(),
                null);
        result = data.next();
        CatalogUtil.closeQuietly(data);
        break;
      case FUNCTION:
        metadata = conn.getMetaData();
        ResultSet functions =
            metadata.getFunctions(
                null,
                baseSchema.getSchemaName() != null && !baseSchema.getSchemaName().isEmpty()
                    ? baseSchema.getSchemaName().toUpperCase()
                    : null,
                params[0].toUpperCase());
        result = functions.next();
        CatalogUtil.closeQuietly(functions);
        break;
      case INDEX:
        if (params.length != 2) {
          throw new IllegalArgumentException(
              "Finding index object is needed two strings, table name and index name");
        }

        pstmt = getExistQuery(conn, type);
        if (pstmt != null) {
          result = checkExistenceByQuery(pstmt, baseSchema, params);
        } else {
          metadata = conn.getMetaData();
          ResultSet indexes =
              metadata.getIndexInfo(
                  null,
                  baseSchema.getSchemaName() != null && !baseSchema.getSchemaName().isEmpty()
                      ? baseSchema.getSchemaName().toUpperCase()
                      : null,
                  params[0].toUpperCase(),
                  false,
                  true);
          while (indexes.next()) {
            if (indexes.getString("INDEX_NAME").equals(params[1].toUpperCase())) {
              result = true;
              break;
            }
          }
          CatalogUtil.closeQuietly(indexes);
        }
        break;
      case TABLE:
        pstmt = getExistQuery(conn, type);
        if (pstmt != null) {
          result = checkExistenceByQuery(pstmt, baseSchema, params);
        } else {
          metadata = conn.getMetaData();
          ResultSet tables =
              metadata.getTables(
                  null,
                  baseSchema.getSchemaName() != null && !baseSchema.getSchemaName().isEmpty()
                      ? baseSchema.getSchemaName().toUpperCase()
                      : null,
                  params[0].toUpperCase(),
                  new String[] {"TABLE"});
          result = tables.next();
          CatalogUtil.closeQuietly(tables);
        }
        break;
      case DOMAIN:
      case OPERATOR:
      case RULE:
      case SEQUENCE:
      case TRIGGER:
      case VIEW:
        pstmt = getExistQuery(conn, type);

        if (pstmt == null) {
          throw new TajoInternalError(
              "Finding "
                  + type
                  + " type of database object is not supported on this database system.");
        }

        result = checkExistenceByQuery(pstmt, baseSchema, params);
        break;
    }

    return result;
  }
Exemplo n.º 30
0
 public QueryTestCaseBase() {
   this.currentDatabase = CatalogUtil.normalizeIdentifier(getClass().getSimpleName());
   init();
 }