/*
   * (non-Javadoc)
   *
   * @see
   * com.impetus.kunderahbase.dao.user.UserDao#findUserByUserName(java.lang
   * .String, boolean, java.util.List)
   */
  @Override
  public void findUserByUserName(String userName, boolean isBulk, List<UserHBaseDTO> users) {
    for (UserHBaseDTO user : users) {
      Filter filter =
          new SingleColumnValueFilter(
              Bytes.toBytes("user_name"),
              Bytes.toBytes("user_name"),
              CompareOp.EQUAL,
              Bytes.toBytes(user.getUserNameCounter()));
      Scan scan = new Scan();
      scan.setFilter(filter);
      scan.addColumn(Bytes.toBytes("user_name"), Bytes.toBytes("user_name"));
      scan.addColumn(Bytes.toBytes("user_nameCnt"), Bytes.toBytes("user_nameCnt"));
      scan.addColumn(Bytes.toBytes("password"), Bytes.toBytes("password"));
      scan.addColumn(Bytes.toBytes("relation"), Bytes.toBytes("relation"));

      ResultScanner scanner = null;
      int counter = 0;
      try {
        scanner = hTablePool.getTable("User").getScanner(scan);
      } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
      assert scanner != null;
      for (Result result : scanner) {
        counter++;
        assert result != null;
      }
      assert counter != 0;
    }
  }
Exemplo n.º 2
0
  @Override
  protected List<RawResource> getAllResources(String rangeStart, String rangeEnd)
      throws IOException {
    byte[] startRow = Bytes.toBytes(rangeStart);
    byte[] endRow = plusZero(Bytes.toBytes(rangeEnd));

    Scan scan = new Scan(startRow, endRow);
    scan.addColumn(B_FAMILY, B_COLUMN_TS);
    scan.addColumn(B_FAMILY, B_COLUMN);

    HTableInterface table = getConnection().getTable(getAllInOneTableName());
    List<RawResource> result = Lists.newArrayList();
    try {
      ResultScanner scanner = table.getScanner(scan);
      for (Result r : scanner) {
        result.add(new RawResource(getInputStream(Bytes.toString(r.getRow()), r), getTimestamp(r)));
      }
    } catch (IOException e) {
      for (RawResource rawResource : result) {
        IOUtils.closeQuietly(rawResource.resource);
      }
      throw e;
    } finally {
      IOUtils.closeQuietly(table);
    }
    return result;
  }
  /*
   * (non-Javadoc)
   *
   * @see com.impetus.kunderahbase.dao.user.UserDao#findAllByUserName(int)
   */
  @Override
  public void findAllByUserName(int count) {
    Filter filter =
        new SingleColumnValueFilter(
            Bytes.toBytes("user_name"),
            Bytes.toBytes("user_name"),
            CompareOp.EQUAL,
            Bytes.toBytes("Amry"));
    Scan scan = new Scan();
    scan.setFilter(filter);
    scan.addColumn(Bytes.toBytes("user_name"), Bytes.toBytes("user_name"));
    scan.addColumn(Bytes.toBytes("user_nameCnt"), Bytes.toBytes("user_nameCnt"));
    scan.addColumn(Bytes.toBytes("password"), Bytes.toBytes("password"));
    scan.addColumn(Bytes.toBytes("relation"), Bytes.toBytes("relation"));

    ResultScanner scanner = null;
    int counter = 0;
    try {
      scanner = hTablePool.getTable("User").getScanner(scan);
    } catch (IOException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }
    assert scanner != null;
    for (Result result : scanner) {
      counter++;
      assert result != null;
    }
    assert counter != 0;
    assert counter == count;
  }
      @Override
      public void process(long now, HRegion region, List<Mutation> mutations, WALEdit walEdit)
          throws IOException {
        List<Cell> kvs = new ArrayList<Cell>();
        { // First scan to get friends of the person
          Scan scan = new Scan(row, row);
          scan.addColumn(FAM, person);
          doScan(region, scan, kvs);
        }

        // Second scan to get friends of friends
        Scan scan = new Scan(row, row);
        for (Cell kv : kvs) {
          byte[] friends = CellUtil.cloneValue(kv);
          for (byte f : friends) {
            scan.addColumn(FAM, new byte[] {f});
          }
        }
        doScan(region, scan, kvs);

        // Collect result
        result.clear();
        for (Cell kv : kvs) {
          for (byte b : CellUtil.cloneValue(kv)) {
            result.add((char) b + "");
          }
        }
      }
 /**
  * Get the QueueIds belonging to the named server from the ReplicationTableBase
  *
  * @param server name of the server
  * @return a ResultScanner over the QueueIds belonging to the server
  * @throws IOException
  */
 private ResultScanner getAllQueuesScanner(String server) throws IOException {
   Scan scan = new Scan();
   SingleColumnValueFilter filterMyQueues =
       new SingleColumnValueFilter(
           CF_QUEUE, COL_QUEUE_OWNER, CompareFilter.CompareOp.EQUAL, Bytes.toBytes(server));
   scan.setFilter(filterMyQueues);
   scan.addColumn(CF_QUEUE, COL_QUEUE_OWNER);
   scan.addColumn(CF_QUEUE, COL_QUEUE_OWNER_HISTORY);
   ResultScanner results = replicationTable.getScanner(scan);
   return results;
 }
  /**
   * Sets up the actual job.
   *
   * @param conf The current configuration.
   * @param args The command line parameters.
   * @return The newly created job.
   * @throws IOException When setting up the job fails.
   */
  public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException {
    String tableName = args[0];
    Job job = new Job(conf, NAME + "_" + tableName);
    job.setJarByClass(CachingRowCounter.class);
    // Columns are space delimited
    StringBuilder sb = new StringBuilder();
    final int columnoffset = 1;
    for (int i = columnoffset; i < args.length; i++) {
      if (i > columnoffset) {
        sb.append(" ");
      }
      sb.append(args[i]);
    }

    Scan scan = new Scan();
    scan.setFilter(new FirstKeyOnlyFilter());
    if (sb.length() > 0) {
      for (String columnName : sb.toString().split(" ")) {
        String[] fields = columnName.split(":");
        if (fields.length == 1) {
          scan.addFamily(Bytes.toBytes(fields[0]));
        } else {
          scan.addColumn(Bytes.toBytes(fields[0]), Bytes.toBytes(fields[1]));
        }
      }
    }
    scan.setCaching(100);

    // Second argument is the table name.
    job.setOutputFormatClass(NullOutputFormat.class);
    TableMapReduceUtil.initTableMapperJob(
        tableName, scan, RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job);
    job.setNumReduceTasks(0);
    return job;
  }
Exemplo n.º 7
0
  @Override
  public Map<String, Integer> giveAvailableBikes(
      long milliseconds, List<String> stationIds, Scan scan) throws IOException {
    // scan has set the time stamp accordingly, i.e., the start and end row of
    // the scan.

    for (String qualifier : stationIds) {
      log.debug("adding qualifier: " + qualifier);
      scan.addColumn(colFamily, qualifier.getBytes());
    }
    InternalScanner scanner =
        ((RegionCoprocessorEnvironment) getEnvironment()).getRegion().getScanner(scan);
    List<KeyValue> res = new ArrayList<KeyValue>();
    Map<String, Integer> result = new HashMap<String, Integer>();
    boolean hasMoreResult = false;
    try {
      do {
        hasMoreResult = scanner.next(res);
        for (KeyValue kv : res) {
          // log.debug("got a kv: " + kv);
          int availBikes = getFreeBikes(kv);
          String id = Bytes.toString(kv.getQualifier());
          // log.debug("result to be added is: " + availBikes + " id: " + id);
          result.put(id, availBikes);
        }
        res.clear();
      } while (hasMoreResult);
    } finally {
      scanner.close();
    }
    return result;
  }
Exemplo n.º 8
0
  public Scan generateScan(
      String[] rowRange, FilterList filterList, String[] family, String[] columns, int maxVersion)
      throws Exception {
    if (table == null) throw new Exception("No table handler");
    if (cacheSize < 0) throw new Exception("should set cache size before scanning");

    Scan scan = null;

    try {
      scan = new Scan();
      scan.setCaching(this.cacheSize);
      scan.setCacheBlocks(this.blockCached);
      scan.setFilter(filterList);
      if (maxVersion > 0) scan.setMaxVersions(maxVersion);
      if (rowRange != null) {
        scan.setStartRow(rowRange[0].getBytes());
        if (rowRange.length == 2) scan.setStopRow(rowRange[1].getBytes());
      }

      if (columns != null) {
        for (int i = 0; i < columns.length; i++) {
          scan.addColumn(family[0].getBytes(), columns[i].getBytes());
          // System.out.println(family[i]+";"+columns[i]);
        }
      } else {
        scan.addFamily(family[0].getBytes());
      }

    } catch (Exception e) {
      e.printStackTrace();
    }

    return scan;
  }
Exemplo n.º 9
0
 /* (non-Javadoc)
  * @see com.hazelcast.core.MapLoader#loadAllKeys()
  */
 @Override
 public Set<String> loadAllKeys() {
   Set<String> keySet = null;
   if (allowLoadAll) {
     keySet = new HashSet<String>();
     HTableInterface hti = null;
     try {
       hti = pool.getTable(tableName);
       Scan s = new Scan();
       s.addColumn(family, qualifier);
       ResultScanner rs = hti.getScanner(s);
       Result r = null;
       while ((r = rs.next()) != null) {
         String k = new String(r.getRow());
         keySet.add(k);
       }
     } catch (IOException e) {
       LOG.error("IOException while loading all keys", e);
     } finally {
       if (hti != null) {
         pool.putTable(hti);
       }
     }
   }
   return keySet;
 }
Exemplo n.º 10
0
      @Override
      public void process(long now, HRegion region, List<Mutation> mutations, WALEdit walEdit)
          throws IOException {
        // Scan current counter
        List<Cell> kvs = new ArrayList<Cell>();
        Scan scan = new Scan(row, row);
        scan.addColumn(FAM, COUNTER);
        doScan(region, scan, kvs);
        counter = kvs.size() == 0 ? 0 : Bytes.toInt(CellUtil.cloneValue(kvs.iterator().next()));

        // Assert counter value
        assertEquals(expectedCounter, counter);

        // Increment counter and send it to both memstore and wal edit
        counter += 1;
        expectedCounter += 1;

        Put p = new Put(row);
        KeyValue kv = new KeyValue(row, FAM, COUNTER, now, Bytes.toBytes(counter));
        p.add(kv);
        mutations.add(p);
        walEdit.add(kv);

        // We can also inject some meta data to the walEdit
        KeyValue metaKv =
            new KeyValue(
                row,
                WALEdit.METAFAMILY,
                Bytes.toBytes("I just increment counter"),
                Bytes.toBytes(counter));
        walEdit.add(metaKv);
      }
Exemplo n.º 11
0
 private Scan createScan(final String columnFamily, final String[] headers) {
   Scan scan = new Scan();
   for (String header : headers) {
     scan.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes(header));
   }
   return scan;
 }
Exemplo n.º 12
0
 /*
  * Add to each of the regions in .META. a value.  Key is the startrow of the
  * region (except its 'aaa' for first region).  Actual value is the row name.
  * @param expected
  * @return
  * @throws IOException
  */
 private static int addToEachStartKey(final int expected) throws IOException {
   HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
   HTable meta = new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
   int rows = 0;
   Scan scan = new Scan();
   scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
   ResultScanner s = meta.getScanner(scan);
   for (Result r = null; (r = s.next()) != null; ) {
     byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
     if (b == null || b.length <= 0) break;
     HRegionInfo hri = Writables.getHRegionInfo(b);
     // If start key, add 'aaa'.
     byte[] row = getStartKey(hri);
     Put p = new Put(row);
     p.setWriteToWAL(false);
     p.add(getTestFamily(), getTestQualifier(), row);
     t.put(p);
     rows++;
   }
   s.close();
   Assert.assertEquals(expected, rows);
   t.close();
   meta.close();
   return rows;
 }
 /** @throws Throwable */
 @Test
 public void testStdWithValidRange() throws Throwable {
   AggregationClient aClient = new AggregationClient(conf);
   Scan scan = new Scan();
   scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
   final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
       new LongColumnInterpreter();
   double std = aClient.std(TEST_TABLE, ci, scan);
   assertEquals(5.766, std, 0.05d);
 }
 /**
  * This will test the row count on the entire table. Startrow and endrow will be null.
  *
  * @throws Throwable
  */
 @Test
 public void testRowCountAllTable() throws Throwable {
   AggregationClient aClient = new AggregationClient(conf);
   Scan scan = new Scan();
   scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
   final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
       new LongColumnInterpreter();
   long rowCount = aClient.rowCount(TEST_TABLE, ci, scan);
   assertEquals(ROWSIZE, rowCount);
 }
 /** @throws Throwable */
 @Test
 public void testAvgWithValidRange() throws Throwable {
   AggregationClient aClient = new AggregationClient(conf);
   Scan scan = new Scan();
   scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
   final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
       new LongColumnInterpreter();
   double avg = aClient.avg(TEST_TABLE, ci, scan);
   assertEquals(9.5, avg, 0);
 }
 /** @throws Throwable */
 @Test
 public void testSumWithValidRange() throws Throwable {
   AggregationClient aClient = new AggregationClient(conf);
   Scan scan = new Scan();
   scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
   final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
       new LongColumnInterpreter();
   long sum = aClient.sum(TEST_TABLE, ci, scan);
   assertEquals(190, sum);
 }
 @Test
 public void testRowCountWithPrefixFilter() throws Throwable {
   AggregationClient aClient = new AggregationClient(conf);
   Scan scan = new Scan();
   scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
   final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
       new LongColumnInterpreter();
   Filter f = new PrefixFilter(Bytes.toBytes("foo:bar"));
   scan.setFilter(f);
   long rowCount = aClient.rowCount(TEST_TABLE, ci, scan);
   assertEquals(0, rowCount);
 }
 /** @throws Throwable */
 @Test
 public void testMinWithValidRange2() throws Throwable {
   AggregationClient aClient = new AggregationClient(conf);
   Scan scan = new Scan();
   scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
   scan.setStartRow(ROWS[5]);
   scan.setStopRow(ROWS[15]);
   final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
       new LongColumnInterpreter();
   long min = aClient.min(TEST_TABLE, ci, scan);
   assertEquals(5, min);
 }
 /** @throws Throwable */
 @Test
 public void testMinWithValidRange() throws Throwable {
   AggregationClient aClient = new AggregationClient(conf);
   Scan scan = new Scan();
   scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
   scan.setStartRow(HConstants.EMPTY_START_ROW);
   scan.setStopRow(HConstants.EMPTY_END_ROW);
   final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
       new LongColumnInterpreter();
   Long min = aClient.min(TEST_TABLE, ci, scan);
   assertEquals(0l, min.longValue());
 }
 @Test
 public void testMaxWithFilter() throws Throwable {
   Long max = 0l;
   AggregationClient aClient = new AggregationClient(conf);
   Scan scan = new Scan();
   scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
   Filter f = new PrefixFilter(Bytes.toBytes("foo:bar"));
   scan.setFilter(f);
   final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
       new LongColumnInterpreter();
   max = aClient.max(TEST_TABLE, ci, scan);
   assertEquals(null, max);
 }
 @Test
 public void testAvgWithFilter() throws Throwable {
   AggregationClient aClient = new AggregationClient(conf);
   Scan scan = new Scan();
   scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
   Filter f = new PrefixFilter(Bytes.toBytes("foo:bar"));
   scan.setFilter(f);
   final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
       new LongColumnInterpreter();
   Double avg = null;
   avg = aClient.avg(TEST_TABLE, ci, scan);
   assertEquals(Double.NaN, avg, 0);
 }
  public static void main(String[] args) throws IOException {

    Configuration conf = HBaseClientHelper.loadDefaultConfiguration();

    Connection connection = ConnectionFactory.createConnection(conf);

    try {

      Table table = connection.getTable(TableName.valueOf("testtable"));
      try {
        // 1 Put
        Put p = new Put(Bytes.toBytes("row1"));
        p.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1"));

        table.put(p);

        // 2 Get
        Get g = new Get(Bytes.toBytes("row1"));
        Result r = table.get(g);
        byte[] value = r.getValue(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"));
        String valueStr = Bytes.toString(value);
        System.out.println("GET: " + valueStr);

        // 3 Scan
        Scan s = new Scan();
        s.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"));
        ResultScanner scanner = table.getScanner(s);
        try {
          for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
            System.out.println("Found row: " + rr);
          }

          // The other approach is to use a foreach loop. Scanners are
          // iterable!
          // for (Result rr : scanner) {
          // System.out.println("Found row: " + rr);
          // }
        } finally {
          scanner.close();
        }

        // Close your table and cluster connection.
      } finally {
        if (table != null) table.close();
      }
    } finally {
      connection.close();
    }
  }
Exemplo n.º 23
0
  public List<RowLogMessage> next(String subscription, Long minimalTimestamp, boolean problematic)
      throws RowLogException {
    byte[] rowPrefix;
    byte[] subscriptionBytes = Bytes.toBytes(subscription);
    if (problematic) {
      rowPrefix = PROBLEMATIC_MARKER;
      rowPrefix = Bytes.add(rowPrefix, subscriptionBytes);
    } else {
      rowPrefix = subscriptionBytes;
    }
    byte[] startRow = rowPrefix;
    if (minimalTimestamp != null) startRow = Bytes.add(startRow, Bytes.toBytes(minimalTimestamp));
    try {
      List<RowLogMessage> rowLogMessages = new ArrayList<RowLogMessage>();
      Scan scan = new Scan(startRow);
      if (minimalTimestamp != null) scan.setTimeRange(minimalTimestamp, Long.MAX_VALUE);
      scan.addColumn(MESSAGES_CF, MESSAGE_COLUMN);
      ResultScanner scanner = table.getScanner(scan);
      boolean keepScanning = problematic;
      do {
        Result[] results = scanner.next(batchSize);
        if (results.length == 0) {
          keepScanning = false;
        }
        for (Result next : results) {
          byte[] rowKey = next.getRow();
          if (!Bytes.startsWith(rowKey, rowPrefix)) {
            keepScanning = false;
            break; // There were no messages for this subscription
          }
          if (problematic) {
            rowKey = Bytes.tail(rowKey, rowKey.length - PROBLEMATIC_MARKER.length);
          }
          byte[] value = next.getValue(MESSAGES_CF, MESSAGE_COLUMN);
          byte[] messageId = Bytes.tail(rowKey, rowKey.length - subscriptionBytes.length);
          rowLogMessages.add(decodeMessage(messageId, value));
        }
      } while (keepScanning);

      // The scanner is not closed in a finally block, since when we get an IOException from
      // HBase, it is likely that closing the scanner will give problems too. Not closing
      // the scanner is not fatal since HBase will expire it after a while.
      Closer.close(scanner);

      return rowLogMessages;
    } catch (IOException e) {
      throw new RowLogException("Failed to fetch next message from RowLogShard", e);
    }
  }
 @Test
 public void testAvgWithInvalidRange() {
   AggregationClient aClient = new AggregationClient(conf);
   Scan scan = new Scan();
   scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
   scan.setStartRow(ROWS[5]);
   scan.setStopRow(ROWS[1]);
   final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
       new LongColumnInterpreter();
   Double avg = null;
   try {
     avg = aClient.avg(TEST_TABLE, ci, scan);
   } catch (Throwable e) {
   }
   assertEquals(null, avg); // control should go to the catch block
 }
Exemplo n.º 25
0
    OfflineMerger(Configuration conf, FileSystem fs) throws IOException {
      super(conf, fs, HConstants.META_TABLE_NAME);

      Path rootTableDir =
          HTableDescriptor.getTableDir(
              fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))),
              HConstants.ROOT_TABLE_NAME);

      // Scan root region to find all the meta regions

      root =
          HRegion.newHRegion(
              rootTableDir,
              hlog,
              fs,
              conf,
              HRegionInfo.ROOT_REGIONINFO,
              HTableDescriptor.ROOT_TABLEDESC,
              null);
      root.initialize();

      Scan scan = new Scan();
      scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
      InternalScanner rootScanner = root.getScanner(scan);

      try {
        List<KeyValue> results = new ArrayList<KeyValue>();
        boolean hasMore;
        do {
          hasMore = rootScanner.next(results);
          for (KeyValue kv : results) {
            HRegionInfo info = Writables.getHRegionInfoOrNull(kv.getValue());
            if (info != null) {
              metaRegions.add(info);
            }
          }
        } while (hasMore);
      } finally {
        rootScanner.close();
        try {
          root.close();

        } catch (IOException e) {
          LOG.error(e);
        }
      }
    }
 @Test
 public void testMaxWithInvalidRange2() throws Throwable {
   long max = Long.MIN_VALUE;
   Scan scan = new Scan();
   scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
   scan.setStartRow(ROWS[4]);
   scan.setStopRow(ROWS[4]);
   try {
     AggregationClient aClient = new AggregationClient(conf);
     final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
         new LongColumnInterpreter();
     max = aClient.max(TEST_TABLE, ci, scan);
   } catch (Exception e) {
     max = 0;
   }
   assertEquals(0, max); // control should go to the catch block
 }
Exemplo n.º 27
0
  @Override
  public int run(String[] strings) throws Exception {
    Configuration conf = new Configuration();
    // String inputFileName = "/cluster/gmm.seq";
    String outputFileName = "/cluster/matrix_intermediate_" + level + ".seq";

    int result;
    System.out.println("level:" + level);
    conf.set("level", level + "");
    String table = "ClusterDatabase";
    // String seqFileName = "/cluster/gmm.seq";

    Scan scan = new Scan();
    scan.setStartRow((level + "|").getBytes());
    scan.setStopRow(
        Bytes.add((level + "|").getBytes(), Bytes.toBytes("ffffffffffffffffffffffffffffffff")));
    scan.addColumn("Cluster".getBytes(), "GMM".getBytes());

    // try (FileSystem fileSystem = FileSystem.get(conf)) {
    FileSystem fileSystem = FileSystem.get(conf);
    Path outputpath = new Path(outputFileName);
    if (fileSystem.exists(outputpath)) {
      fileSystem.delete(outputpath, true);
    }

    Job job = new Job(conf, "Matrix Creation I From HBase");
    job.setJarByClass(MatrixCreationI.class);
    TableMapReduceUtil.initTableMapperJob(
        table, scan, MatrixMapper.class, IntWritable.class, Text.class, job);
    job.setReducerClass(MatrixReducer.class);
    job.setMapOutputKeyClass(IntWritable.class);
    job.setMapOutputValueClass(Text.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    //            job.setInputFormatClass(TableInputFormat.class);
    // job.setOutputFormatClass(TextOutputFormat.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);

    job.setNumReduceTasks(1);
    // FileInputFormat.addInputPath(job, new Path(inputFileName + "/part*"));
    FileOutputFormat.setOutputPath(job, outputpath);
    result = job.waitForCompletion(true) ? 0 : 1;
    // }
    return result;
  }
  /**
   * This will test the row count with startrow > endrow. The result should be -1.
   *
   * @throws Throwable
   */
  @Test
  public void testRowCountWithInvalidRange1() {
    AggregationClient aClient = new AggregationClient(conf);
    Scan scan = new Scan();
    scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
    scan.setStartRow(ROWS[5]);
    scan.setStopRow(ROWS[2]);

    final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
        new LongColumnInterpreter();
    long rowCount = -1;
    try {
      rowCount = aClient.rowCount(TEST_TABLE, ci, scan);
    } catch (Throwable e) {
      myLog.error("Exception thrown in the invalidRange method" + e.getStackTrace());
    }
    assertEquals(-1, rowCount);
  }
  /**
   * This will test the row count with startrow = endrow and they will be non-null. The result
   * should be 0, as it assumes a non-get query.
   *
   * @throws Throwable
   */
  @Test
  public void testRowCountWithInvalidRange2() {
    AggregationClient aClient = new AggregationClient(conf);
    Scan scan = new Scan();
    scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
    scan.setStartRow(ROWS[5]);
    scan.setStopRow(ROWS[5]);

    final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
        new LongColumnInterpreter();
    long rowCount = -1;
    try {
      rowCount = aClient.rowCount(TEST_TABLE, ci, scan);
    } catch (Throwable e) {
      rowCount = 0;
    }
    assertEquals(0, rowCount);
  }
Exemplo n.º 30
0
  @SuppressWarnings({"rawtypes", "unchecked"})
  public void Read() throws IOException {
    List list = new ArrayList();
    Scan scan = new Scan();
    scan.setBatch(0);
    scan.setCaching(10000);
    scan.setMaxVersions();
    scan.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("total"));
    ResultScanner rsScanner = table.getScanner(scan);
    for (Result rs : rsScanner) {
      String date = Bytes.toString(rs.getRow());
      String total = Bytes.toString(rs.getValue(Bytes.toBytes("cf1"), Bytes.toBytes("total")));
      list.add(date + "\t" + total);
    }

    for (int i = 0; i < 7; i++)
      System.out.println((String) list.get(i) + "\t" + (String) list.get(i + 7));
  }