예제 #1
0
  @Test
  public final void testFailCreateTablePartitionedOtherExceptColumn()
      throws IOException, ServiceException, SQLException {
    TajoConf conf = cluster.getConfiguration();
    final String tableName = "testFailCreateTablePartitionedOtherExceptColumn";

    assertFalse(client.existTable(tableName));

    String rangeSql = "create table " + tableName + " (deptname text, score int4)";
    rangeSql += "PARTITION BY RANGE (score)";
    rangeSql += "( PARTITION sub_part1 VALUES LESS THAN (2),";
    rangeSql += "PARTITION sub_part2 VALUES LESS THAN (5),";
    rangeSql += "PARTITION sub_part2 VALUES LESS THAN (MAXVALUE) )";

    assertFalse(client.updateQuery(rangeSql));

    String listSql = "create table " + tableName + " (deptname text, score int4)";
    listSql += "PARTITION BY LIST (deptname)";
    listSql += "( PARTITION sub_part1 VALUES('r&d', 'design'),";
    listSql += "PARTITION sub_part2 VALUES('sales', 'hr') )";

    assertFalse(client.updateQuery(listSql));

    String hashSql = "create table " + tableName + " (deptname text, score int4)";
    hashSql += "PARTITION BY HASH (deptname)";
    hashSql += "PARTITIONS 2";

    assertFalse(client.updateQuery(hashSql));
  }
예제 #2
0
 @BeforeClass
 public static void setUp() throws Exception {
   cluster = TpchTestBase.getInstance().getTestingCluster();
   conf = cluster.getConfiguration();
   client = new TajoClient(conf);
   testDir = CommonTestingUtil.getTestDir();
 }
예제 #3
0
  @Test
  public final void testCreateAndPurgeExternalTableByExecuteQuery()
      throws IOException, ServiceException {
    TajoConf conf = cluster.getConfiguration();
    final String tableName = "testCreateAndPurgeExternalTableByExecuteQuery";

    Path tablePath = writeTmpTable(tableName);
    assertFalse(client.existTable(tableName));

    String sql =
        "create external table "
            + tableName
            + " (deptname text, score int4) "
            + "using csv location '"
            + tablePath
            + "'";

    client.executeQueryAndGetResult(sql);
    assertTrue(client.existTable(tableName));

    client.updateQuery("drop table " + tableName + " purge");
    assertFalse(client.existTable(tableName));
    FileSystem localFS = FileSystem.getLocal(conf);
    assertFalse(localFS.exists(tablePath));
  }
예제 #4
0
  @BeforeClass
  public static void setUp() throws Exception {
    util = new TajoTestingCluster();
    util.startCatalogCluster();
    catalog = util.getMiniCatalogCluster().getCatalog();

    Schema schema = new Schema();
    schema.addColumn("name", Type.TEXT);
    schema.addColumn("empId", CatalogUtil.newSimpleDataType(Type.INT4));
    schema.addColumn("deptName", Type.TEXT);

    Schema schema2 = new Schema();
    schema2.addColumn("deptName", Type.TEXT);
    schema2.addColumn("manager", Type.TEXT);

    Schema schema3 = new Schema();
    schema3.addColumn("deptName", Type.TEXT);
    schema3.addColumn("score", CatalogUtil.newSimpleDataType(Type.INT4));

    TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);
    TableDesc people = new TableDesc("employee", schema, meta, CommonTestingUtil.getTestDir());
    catalog.addTable(people);

    TableDesc student =
        new TableDesc(
            "dept", schema2, StoreType.CSV, new Options(), CommonTestingUtil.getTestDir());
    catalog.addTable(student);

    TableDesc score =
        new TableDesc(
            "score", schema3, StoreType.CSV, new Options(), CommonTestingUtil.getTestDir());
    catalog.addTable(score);

    FunctionDesc funcDesc =
        new FunctionDesc(
            "sumtest",
            SumInt.class,
            FunctionType.AGGREGATION,
            CatalogUtil.newSimpleDataType(Type.INT4),
            CatalogUtil.newSimpleDataTypeArray(Type.INT4));

    catalog.createFunction(funcDesc);
    analyzer = new SQLAnalyzer();
    planner = new LogicalPlanner(catalog);
  }
예제 #5
0
  @Test
  public final void testCtasWithColumnedPartition() throws Exception {
    ResultSet res = executeQuery();
    res.close();

    String tableName = CatalogUtil.normalizeIdentifier("testCtasWithColumnedPartition");

    TajoTestingCluster cluster = testBase.getTestingCluster();
    CatalogService catalog = cluster.getMaster().getCatalog();
    TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
    PartitionMethodDesc partitionDesc = desc.getPartitionMethod();
    assertEquals(partitionDesc.getPartitionType(), CatalogProtos.PartitionType.COLUMN);
    assertEquals(
        "key", partitionDesc.getExpressionSchema().getRootColumns().get(0).getSimpleName());

    FileSystem fs = FileSystem.get(cluster.getConfiguration());
    Path path = new Path(desc.getUri());
    assertTrue(fs.isDirectory(path));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=17.0")));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=36.0")));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=38.0")));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=45.0")));
    assertTrue(fs.isDirectory(new Path(path.toUri() + "/key=49.0")));
    if (!cluster.isHiveCatalogStoreRunning()) {
      assertEquals(5, desc.getStats().getNumRows().intValue());
    }

    ResultSet res2 = executeFile("check2.sql");

    Map<Double, int[]> resultRows1 = Maps.newHashMap();
    resultRows1.put(45.0d, new int[] {3, 2});
    resultRows1.put(38.0d, new int[] {2, 2});

    int i = 0;
    while (res2.next()) {
      assertEquals(resultRows1.get(res2.getDouble(3))[0], res2.getInt(1));
      assertEquals(resultRows1.get(res2.getDouble(3))[1], res2.getInt(2));
      i++;
    }
    res2.close();
    assertEquals(2, i);
  }
예제 #6
0
  @Test
  public void testDescTable() throws Exception {
    String tableName;
    if (cluster.isHiveCatalogStoreRunning()) {
      tableName = "TEST_DESC_TABLE".toLowerCase();
    } else {
      tableName = "TEST_DESC_TABLE";
    }

    String sql = "create table \"" + tableName + "\" (col1 int4, col2 int4);";
    verifyDescTable(sql, tableName, "testDescTable.result");
  }
예제 #7
0
  @Test
  public void testDescTableForNestedSchema() throws Exception {
    String tableName;
    if (cluster.isHiveCatalogStoreRunning()) {
      tableName = "TEST_DESC_TABLE_NESTED".toLowerCase();
    } else {
      tableName = "TEST_DESC_TABLE_NESTED";
    }

    String sql =
        "create table \""
            + tableName
            + "\" (col1 int4, col2 int4, col3 record (col4 record (col5 text)));";
    verifyDescTable(sql, tableName, "testDescTableForNestedSchema.result");
  }
예제 #8
0
  @Test(timeout = 3000)
  public void testNonForwardQueryPause() throws Exception {
    final String sql = "select * from default.lineitem";
    TajoCli cli = null;
    try {
      TableDesc tableDesc = cluster.getMaster().getCatalog().getTableDesc("default", "lineitem");
      assertNotNull(tableDesc);
      assertEquals(0L, tableDesc.getStats().getNumRows().longValue());

      InputStream testInput =
          new ByteArrayInputStream(new byte[] {(byte) DefaultTajoCliOutputFormatter.QUIT_COMMAND});
      cli = new TajoCli(cluster.getConfiguration(), new String[] {}, testInput, out);
      setVar(cli, SessionVars.CLI_PAGE_ROWS, "2");
      setVar(cli, SessionVars.CLI_FORMATTER_CLASS, TajoCliOutputTestFormatter.class.getName());

      cli.executeScript(sql);

      String consoleResult;
      consoleResult = new String(out.toByteArray());
      assertOutputResult(consoleResult);
    } finally {
      cli.close();
    }
  }
  @Before
  public void setUp() throws Exception {
    this.conf = new TajoConf();
    util = new TajoTestingCluster();
    catalog = util.startCatalogCluster().getCatalog();
    testDir = CommonTestingUtil.getTestDir(TEST_PATH);
    conf.setVar(TajoConf.ConfVars.WORKER_TEMPORAL_DIR, testDir.toString());
    sm = StorageManagerFactory.getStorageManager(conf, testDir);

    Schema schema = new Schema();
    schema.addColumn("managerId", Type.INT4);
    schema.addColumn("empId", Type.INT4);
    schema.addColumn("deptName", Type.TEXT);

    TableMeta employeeMeta = CatalogUtil.newTableMeta(StoreType.CSV);
    Path employeePath = new Path(testDir, "employee.csv");
    Appender appender =
        StorageManagerFactory.getStorageManager(conf)
            .getAppender(employeeMeta, schema, employeePath);
    appender.enableStats();
    appender.init();
    Tuple tuple = new VTuple(schema.getColumnNum());
    for (int i = 0; i < numTuple; i++) {
      tuple.put(
          new Datum[] {
            DatumFactory.createInt4(rnd.nextInt(50)),
            DatumFactory.createInt4(rnd.nextInt(100)),
            DatumFactory.createText("dept_" + i),
          });
      appender.addTuple(tuple);
    }
    appender.flush();
    appender.close();

    System.out.println(
        appender.getStats().getNumRows()
            + " rows ("
            + (appender.getStats().getNumBytes() / 1048576)
            + " MB)");

    employee = new TableDesc("employee", schema, employeeMeta, employeePath);
    catalog.addTable(employee);
    analyzer = new SQLAnalyzer();
    planner = new LogicalPlanner(catalog);
  }
예제 #10
0
  @Test
  public final void testGetFunctions() throws IOException, ServiceException, SQLException {
    Collection<FunctionDesc> catalogFunctions = cluster.getMaster().getCatalog().getFunctions();
    String functionName = "sum";
    int numFunctions = 0;
    for (FunctionDesc eachFunction : catalogFunctions) {
      if (functionName.equals(eachFunction.getSignature())) {
        numFunctions++;
      }
    }

    List<CatalogProtos.FunctionDescProto> functions = client.getFunctions(functionName);
    assertEquals(numFunctions, functions.size());

    functions = client.getFunctions("notmatched");
    assertEquals(0, functions.size());

    functions = client.getFunctions(null);
    assertEquals(catalogFunctions.size(), functions.size());
  }
예제 #11
0
  private void verifyDescTable(String sql, String tableName, String resultFileName)
      throws Exception {
    setVar(tajoCli, SessionVars.CLI_FORMATTER_CLASS, TajoCliOutputTestFormatter.class.getName());
    tajoCli.executeScript(sql);

    tajoCli.executeMetaCommand("\\d " + tableName);
    tajoCli.executeMetaCommand("\\d \"" + tableName + "\"");

    String consoleResult = new String(out.toByteArray());

    if (!cluster.isHiveCatalogStoreRunning()) {
      assertOutputResult(
          resultFileName,
          consoleResult,
          new String[] {"${table.path}"},
          new String[] {
            TablespaceManager.getDefault().getTableUri("default", tableName).toString()
          });
    }
  }
예제 #12
0
  @Test
  public final void testCreateAndPurgeTableByExecuteQuery()
      throws IOException, ServiceException, SQLException {
    TajoConf conf = cluster.getConfiguration();
    final String tableName = "testCreateAndPurgeTableByExecuteQuery";

    assertFalse(client.existTable(tableName));

    String sql = "create table " + tableName + " (deptname text, score int4)";

    client.updateQuery(sql);
    assertTrue(client.existTable(tableName));

    Path tablePath = client.getTableDesc(tableName).getPath();
    FileSystem hdfs = tablePath.getFileSystem(conf);
    assertTrue(hdfs.exists(tablePath));

    client.updateQuery("drop table " + tableName + " purge");
    assertFalse(client.existTable(tableName));
    assertFalse(hdfs.exists(tablePath));
  }
예제 #13
0
  // @Test
  public final void testCreateAndDropTablePartitionedHash1ByExecuteQuery()
      throws IOException, ServiceException, SQLException {
    TajoConf conf = cluster.getConfiguration();
    final String tableName = "testCreateAndDropTablePartitionedHash1ByExecuteQuery";

    assertFalse(client.existTable(tableName));

    String sql = "create table " + tableName + " (deptname text, score int4)";
    sql += " PARTITION BY HASH (deptname)";
    sql += " (PARTITION sub_part1, PARTITION sub_part2, PARTITION sub_part3)";

    client.updateQuery(sql);
    assertTrue(client.existTable(tableName));

    Path tablePath = client.getTableDesc(tableName).getPath();
    FileSystem hdfs = tablePath.getFileSystem(conf);
    assertTrue(hdfs.exists(tablePath));

    client.updateQuery("drop table " + tableName);
    assertFalse(client.existTable(tableName));
    assertTrue(hdfs.exists(tablePath));
  }
예제 #14
0
  @Test
  public void testConnectDatabase() throws Exception {
    String databaseName;

    if (cluster.isHiveCatalogStoreRunning()) {
      databaseName = "TEST_CONNECTION_DATABASE".toLowerCase();
    } else {
      databaseName = "TEST_CONNECTION_DATABASE";
    }
    String sql = "create database \"" + databaseName + "\";";

    tajoCli.executeScript(sql);

    tajoCli.executeMetaCommand("\\c " + databaseName);
    assertEquals(databaseName, tajoCli.getContext().getCurrentDatabase());

    tajoCli.executeMetaCommand("\\c default");
    assertEquals("default", tajoCli.getContext().getCurrentDatabase());

    tajoCli.executeMetaCommand("\\c \"" + databaseName + "\"");
    assertEquals(databaseName, tajoCli.getContext().getCurrentDatabase());
  }
  @Before
  public void setUp() throws Exception {
    util = new TajoTestingCluster();
    util.initTestDir();
    util.startCatalogCluster();
    catalog = util.getCatalogService();
    testDir = CommonTestingUtil.getTestDir(TEST_PATH);
    catalog.createTablespace(DEFAULT_TABLESPACE_NAME, testDir.toUri().toString());
    catalog.createDatabase(DEFAULT_DATABASE_NAME, DEFAULT_TABLESPACE_NAME);
    conf = util.getConfiguration();

    // ----------------- dep3 ------------------------------
    // dep_id | dep_name  | loc_id
    // --------------------------------
    //  0     | dep_0     | 1000
    //  1     | dep_1     | 1001
    //  2     | dep_2     | 1002
    //  3     | dep_3     | 1003
    //  4     | dep_4     | 1004
    //  5     | dep_5     | 1005
    //  6     | dep_6     | 1006
    //  7     | dep_7     | 1007
    //  8     | dep_8     | 1008
    //  9     | dep_9     | 1009
    Schema dep3Schema = new Schema();
    dep3Schema.addColumn("dep_id", Type.INT4);
    dep3Schema.addColumn("dep_name", Type.TEXT);
    dep3Schema.addColumn("loc_id", Type.INT4);

    TableMeta dep3Meta = CatalogUtil.newTableMeta("TEXT");
    Path dep3Path = new Path(testDir, "dep3.csv");
    Appender appender1 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(dep3Meta, dep3Schema, dep3Path);
    appender1.init();
    VTuple tuple = new VTuple(dep3Schema.size());
    for (int i = 0; i < 10; i++) {
      tuple.put(
          new Datum[] {
            DatumFactory.createInt4(i),
            DatumFactory.createText("dept_" + i),
            DatumFactory.createInt4(1000 + i)
          });
      appender1.addTuple(tuple);
    }

    appender1.flush();
    appender1.close();
    dep3 = CatalogUtil.newTableDesc(DEP3_NAME, dep3Schema, dep3Meta, dep3Path);
    catalog.createTable(dep3);

    // ----------------- dep4 ------------------------------
    // dep_id | dep_name  | loc_id
    // --------------------------------
    //  0     | dep_0     | 1000
    //  1     | dep_1     | 1001
    //  2     | dep_2     | 1002
    //  3     | dep_3     | 1003
    //  4     | dep_4     | 1004
    //  5     | dep_5     | 1005
    //  6     | dep_6     | 1006
    //  7     | dep_7     | 1007
    //  8     | dep_8     | 1008
    //  9     | dep_9     | 1009
    // 10     | dep_10    | 1010
    Schema dep4Schema = new Schema();
    dep4Schema.addColumn("dep_id", Type.INT4);
    dep4Schema.addColumn("dep_name", Type.TEXT);
    dep4Schema.addColumn("loc_id", Type.INT4);

    TableMeta dep4Meta = CatalogUtil.newTableMeta("TEXT");
    Path dep4Path = new Path(testDir, "dep4.csv");
    Appender appender4 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(dep4Meta, dep4Schema, dep4Path);
    appender4.init();
    VTuple tuple4 = new VTuple(dep4Schema.size());
    for (int i = 0; i < 11; i++) {
      tuple4.put(
          new Datum[] {
            DatumFactory.createInt4(i),
            DatumFactory.createText("dept_" + i),
            DatumFactory.createInt4(1000 + i)
          });
      appender4.addTuple(tuple4);
    }

    appender4.flush();
    appender4.close();
    dep4 = CatalogUtil.newTableDesc(DEP4_NAME, dep4Schema, dep4Meta, dep4Path);
    catalog.createTable(dep4);

    // ----------------- job3 ------------------------------
    //  job_id  | job_title
    // ----------------------
    //   101    |  job_101
    //   102    |  job_102
    //   103    |  job_103

    Schema job3Schema = new Schema();
    job3Schema.addColumn("job_id", Type.INT4);
    job3Schema.addColumn("job_title", Type.TEXT);

    TableMeta job3Meta = CatalogUtil.newTableMeta("TEXT");
    Path job3Path = new Path(testDir, "job3.csv");
    Appender appender2 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(job3Meta, job3Schema, job3Path);
    appender2.init();
    VTuple tuple2 = new VTuple(job3Schema.size());
    for (int i = 1; i < 4; i++) {
      int x = 100 + i;
      tuple2.put(
          new Datum[] {DatumFactory.createInt4(100 + i), DatumFactory.createText("job_" + x)});
      appender2.addTuple(tuple2);
    }

    appender2.flush();
    appender2.close();
    job3 = CatalogUtil.newTableDesc(JOB3_NAME, job3Schema, job3Meta, job3Path);
    catalog.createTable(job3);

    // ---------------------emp3 --------------------
    // emp_id  | first_name | last_name | dep_id | salary | job_id
    // ------------------------------------------------------------
    //  11     |  fn_11     |  ln_11    |  1     | 123    | 101
    //  13     |  fn_13     |  ln_13    |  3     | 369    | 103
    //  15     |  fn_15     |  ln_15    |  5     | 615    | null
    //  17     |  fn_17     |  ln_17    |  7     | 861    | null
    //  19     |  fn_19     |  ln_19    |  9     | 1107   | null
    //  21     |  fn_21     |  ln_21    |  1     | 123    | 101
    //  23     |  fn_23     |  ln_23    |  3     | 369    | 103

    Schema emp3Schema = new Schema();
    emp3Schema.addColumn("emp_id", Type.INT4);
    emp3Schema.addColumn("first_name", Type.TEXT);
    emp3Schema.addColumn("last_name", Type.TEXT);
    emp3Schema.addColumn("dep_id", Type.INT4);
    emp3Schema.addColumn("salary", Type.FLOAT4);
    emp3Schema.addColumn("job_id", Type.INT4);

    TableMeta emp3Meta = CatalogUtil.newTableMeta("TEXT");
    Path emp3Path = new Path(testDir, "emp3.csv");
    Appender appender3 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(emp3Meta, emp3Schema, emp3Path);
    appender3.init();
    VTuple tuple3 = new VTuple(emp3Schema.size());

    for (int i = 1; i < 4; i += 2) {
      int x = 10 + i;
      tuple3.put(
          new Datum[] {
            DatumFactory.createInt4(10 + i),
            DatumFactory.createText("firstname_" + x),
            DatumFactory.createText("lastname_" + x),
            DatumFactory.createInt4(i),
            DatumFactory.createFloat4(123 * i),
            DatumFactory.createInt4(100 + i)
          });
      appender3.addTuple(tuple3);

      int y = 20 + i;
      tuple3.put(
          new Datum[] {
            DatumFactory.createInt4(20 + i),
            DatumFactory.createText("firstname_" + y),
            DatumFactory.createText("lastname_" + y),
            DatumFactory.createInt4(i),
            DatumFactory.createFloat4(123 * i),
            DatumFactory.createInt4(100 + i)
          });
      appender3.addTuple(tuple3);
    }

    for (int i = 5; i < 10; i += 2) {
      int x = 10 + i;
      tuple3.put(
          new Datum[] {
            DatumFactory.createInt4(10 + i),
            DatumFactory.createText("firstname_" + x),
            DatumFactory.createText("lastname_" + x),
            DatumFactory.createInt4(i),
            DatumFactory.createFloat4(123 * i),
            DatumFactory.createNullDatum()
          });
      appender3.addTuple(tuple3);
    }

    appender3.flush();
    appender3.close();
    emp3 = CatalogUtil.newTableDesc(EMP3_NAME, emp3Schema, emp3Meta, emp3Path);
    catalog.createTable(emp3);

    // ---------------------phone3 --------------------
    // emp_id  | phone_number
    // -----------------------------------------------
    // this table is empty, no rows

    Schema phone3Schema = new Schema();
    phone3Schema.addColumn("emp_id", Type.INT4);
    phone3Schema.addColumn("phone_number", Type.TEXT);

    TableMeta phone3Meta = CatalogUtil.newTableMeta("TEXT");
    Path phone3Path = new Path(testDir, "phone3.csv");
    Appender appender5 =
        ((FileTablespace) TablespaceManager.getLocalFs())
            .getAppender(phone3Meta, phone3Schema, phone3Path);
    appender5.init();

    appender5.flush();
    appender5.close();
    phone3 = CatalogUtil.newTableDesc(PHONE3_NAME, phone3Schema, phone3Meta, phone3Path);
    catalog.createTable(phone3);

    analyzer = new SQLAnalyzer();
    planner = new LogicalPlanner(catalog, TablespaceManager.getInstance());

    defaultContext = LocalTajoTestingUtility.createDummyContext(conf);
  }
 @After
 public void tearDown() throws Exception {
   util.shutdownCatalogCluster();
 }
예제 #17
0
  @Before
  public void setup() throws Exception {
    this.randomValues = new HashMap<Integer, Integer>();
    this.conf = new TajoConf();
    util = new TajoTestingCluster();
    util.startCatalogCluster();
    catalog = util.getMiniCatalogCluster().getCatalog();

    Path workDir = CommonTestingUtil.getTestDir();
    catalog.createTablespace(DEFAULT_TABLESPACE_NAME, workDir.toUri().toString());
    catalog.createDatabase(TajoConstants.DEFAULT_DATABASE_NAME, DEFAULT_TABLESPACE_NAME);
    sm = StorageManagerFactory.getStorageManager(conf, workDir);

    idxPath = new Path(workDir, "test.idx");

    Schema schema = new Schema();
    schema.addColumn("managerid", Type.INT4);
    schema.addColumn("empid", Type.INT4);
    schema.addColumn("deptname", Type.TEXT);

    this.idxSchema = new Schema();
    idxSchema.addColumn("managerid", Type.INT4);
    SortSpec[] sortKeys = new SortSpec[1];
    sortKeys[0] = new SortSpec(idxSchema.getColumn("managerid"), true, false);
    this.comp = new TupleComparator(idxSchema, sortKeys);

    this.writer =
        new BSTIndex(conf)
            .getIndexWriter(idxPath, BSTIndex.TWO_LEVEL_INDEX, this.idxSchema, this.comp);
    writer.setLoadNum(100);
    writer.open();
    long offset;

    meta = CatalogUtil.newTableMeta(StoreType.CSV);
    tablePath = StorageUtil.concatPath(workDir, "employee", "table.csv");
    fs = tablePath.getFileSystem(conf);
    fs.mkdirs(tablePath.getParent());

    FileAppender appender =
        (FileAppender)
            StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, tablePath);
    appender.init();
    Tuple tuple = new VTuple(schema.size());
    for (int i = 0; i < 10000; i++) {

      Tuple key = new VTuple(this.idxSchema.size());
      int rndKey = rnd.nextInt(250);
      if (this.randomValues.containsKey(rndKey)) {
        int t = this.randomValues.remove(rndKey) + 1;
        this.randomValues.put(rndKey, t);
      } else {
        this.randomValues.put(rndKey, 1);
      }

      key.put(new Datum[] {DatumFactory.createInt4(rndKey)});
      tuple.put(
          new Datum[] {
            DatumFactory.createInt4(rndKey),
            DatumFactory.createInt4(rnd.nextInt(10)),
            DatumFactory.createText("dept_" + rnd.nextInt(10))
          });
      offset = appender.getOffset();
      appender.addTuple(tuple);
      writer.write(key, offset);
    }
    appender.flush();
    appender.close();
    writer.close();

    TableDesc desc =
        new TableDesc(
            CatalogUtil.buildFQName(TajoConstants.DEFAULT_DATABASE_NAME, "employee"),
            schema,
            meta,
            sm.getTablePath("employee"));
    catalog.createTable(desc);

    analyzer = new SQLAnalyzer();
    planner = new LogicalPlanner(catalog);
    optimizer = new LogicalOptimizer(conf);
  }
예제 #18
0
 @After
 public void tearDown() {
   util.shutdownCatalogCluster();
 }
예제 #19
0
 @Before
 public void setUp() throws Exception {
   out = new ByteArrayOutputStream();
   tajoCli = new TajoCli(cluster.getConfiguration(), new String[] {}, System.in, out);
 }
 @After
 public void tearDown() throws Exception {
   CommonTestingUtil.cleanupTestDir(TEST_PATH);
   util.shutdownCatalogCluster();
 }