private HBaseConfiguration getConf() {
    HBaseConfiguration conf = new HBaseConfiguration();

    // disable compactions in this test.
    conf.setInt("hbase.hstore.compactionThreshold", 10000);
    return conf;
  }
  private static void createTable() throws Exception {
    try {
      Configuration configuration = HBaseConfiguration.create();
      HBaseAdmin.checkHBaseAvailable(configuration);
      Connection connection = ConnectionFactory.createConnection(configuration);

      // Instantiating HbaseAdmin class
      Admin admin = connection.getAdmin();

      // Instantiating table descriptor class
      HTableDescriptor stockTableDesc =
          new HTableDescriptor(TableName.valueOf(Constants.STOCK_DATES_TABLE));

      // Adding column families to table descriptor
      HColumnDescriptor stock_0414 = new HColumnDescriptor(Constants.STOCK_DATES_CF);
      stockTableDesc.addFamily(stock_0414);

      // Execute the table through admin
      if (!admin.tableExists(stockTableDesc.getTableName())) {
        admin.createTable(stockTableDesc);
        System.out.println("Stock table created !!!");
      }

      // Load hbase-site.xml
      HBaseConfiguration.addHbaseResources(configuration);
    } catch (ServiceException e) {
      log.error("Error occurred while creating HBase tables", e);
      throw new Exception("Error occurred while creating HBase tables", e);
    }
  }
Esempio n. 3
0
 public static void main(String[] args) throws IOException {
   // TODO Auto-generated method stub
   @SuppressWarnings("deprecation")
   HBaseConfiguration conf = new HBaseConfiguration();
   conf.addResource("/usr/local/hbase-0.98.2/conf/hbase-site.xml");
   conf.set("hbase.master", "localhost:60000");
   CreateTable(conf);
   SeedData(conf);
 }
  @Test
  public void testV1CodecV2Compat() throws Exception {

    long now = System.currentTimeMillis();

    // NOTE: set visibilityUpperBound to 0 as this is expected default for decoding older version
    // that doesn't store it
    TreeMap<Long, TransactionManager.InProgressTx> inProgress =
        Maps.newTreeMap(
            ImmutableSortedMap.of(
                16L, new TransactionManager.InProgressTx(0L, now + 1000),
                17L, new TransactionManager.InProgressTx(0L, now + 1000)));

    TransactionSnapshot snapshot =
        new TransactionSnapshot(
            now,
            15,
            18,
            Lists.newArrayList(5L, 7L),
            inProgress,
            ImmutableMap.<Long, Set<ChangeId>>of(
                17L,
                Sets.newHashSet(
                    new ChangeId(Bytes.toBytes("ch1")), new ChangeId(Bytes.toBytes("ch2")))),
            ImmutableMap.<Long, Set<ChangeId>>of(
                16L,
                Sets.newHashSet(
                    new ChangeId(Bytes.toBytes("ch2")), new ChangeId(Bytes.toBytes("ch3")))));

    Configuration configV1 = HBaseConfiguration.create();
    configV1.setStrings(
        TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES, SnapshotCodecV1.class.getName());

    SnapshotCodecProvider codecV1 = new SnapshotCodecProvider(configV1);

    // encoding with codec of v1
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    try {
      codecV1.encode(out, snapshot);
    } finally {
      out.close();
    }

    // decoding
    Configuration configV1V2 = HBaseConfiguration.create();
    configV1V2.setStrings(
        TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES,
        SnapshotCodecV1.class.getName(),
        SnapshotCodecV2.class.getName());
    SnapshotCodecProvider codecV1V2 = new SnapshotCodecProvider(configV1V2);
    TransactionSnapshot decoded = codecV1V2.decode(new ByteArrayInputStream(out.toByteArray()));

    assertEquals(snapshot, decoded);
  }
Esempio n. 5
0
 /**
  * @param args :
  *     <ol>
  *       <li>table name
  *     </ol>
  */
 public static void main(String[] args) {
   try {
     ToolRunner.run(HBaseConfiguration.create(), new CountRowsMR(), args);
   } catch (Exception e) {
     e.printStackTrace();
   }
 }
  public static void main(String args[]) {
    if (args.length == 0) {
      System.out.println("JavaHBaseBulkPutExample  {master} {tableName} {columnFamily}");
    }

    String master = args[0];
    String tableName = args[1];
    String columnFamily = args[2];

    JavaSparkContext jsc = new JavaSparkContext(master, "JavaHBaseBulkPutExample");

    List<String> list = new ArrayList<String>();
    list.add("1," + columnFamily + ",a,1");
    list.add("2," + columnFamily + ",a,2");
    list.add("3," + columnFamily + ",a,3");
    list.add("4," + columnFamily + ",a,4");
    list.add("5," + columnFamily + ",a,5");
    JavaRDD<String> rdd = jsc.parallelize(list);

    Configuration conf = HBaseConfiguration.create();
    conf.addResource(new Path("/opt/hadoop-2.6.0/etc/hadoop/core-site.xml"));
    conf.addResource(new Path("/opt/hbase/conf/hbase-site.xml"));

    JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);

    hbaseContext.bulkPut(rdd, tableName, new PutFunction(), true);
  }
Esempio n. 7
0
  /**
   * Check duplicated tweet IDs in <b>tweetIdDir</b>, and output the duplicates to stdout.
   *
   * @param tweetIdDir
   * @throws Exception
   */
  public static void checkTidDuplicates(String tweetIdDir) throws Exception {
    // First change path strings to URI strings starting with 'file:' or 'hdfs:'
    tweetIdDir = MultiFileFolderWriter.getUriStrForPath(tweetIdDir);

    Set<String> tidSet = new HashSet<String>();
    Configuration conf = HBaseConfiguration.create();
    FileSystem fs = FileSystem.get(new URI(tweetIdDir), conf);
    int dupCount = 0;
    for (FileStatus srcFileStatus : fs.listStatus(new Path(tweetIdDir))) {
      String srcFileName = srcFileStatus.getPath().getName();
      if (srcFileName.endsWith(".txt") && srcFileName.contains("tweetIds")) {
        BufferedReader brTid =
            new BufferedReader(new InputStreamReader(fs.open(srcFileStatus.getPath())));
        String tid = brTid.readLine();
        while (tid != null) {
          if (tidSet.contains(tid)) {
            System.out.println("Duplicated tweet ID: " + tid);
            dupCount++;
          } else {
            tidSet.add(tid);
          }
          tid = brTid.readLine();
        }
        brTid.close();
      }
    }
    System.out.println(
        "Number of unique tweet IDs: " + tidSet.size() + ", number of duplicates: " + dupCount);
  }
Esempio n. 8
0
  public static void deleteTest(String tableStr) {
    try {
      Configuration conf = HBaseConfiguration.create();
      byte[] tableName = Bytes.toBytes(tableStr);

      HConnection hConnection = HConnectionManager.createConnection(conf);
      HTableInterface table = hConnection.getTable(tableName);

      byte[] startRow = Bytes.toBytes("rowKey_1");
      byte[] stopRow = Bytes.toBytes("rowKey_3");
      byte[] family = f0;

      Scan scan = new Scan();
      scan.addFamily(family);
      scan.setMaxVersions(1);

      //            scan.setStartRow(startRow);
      //            scan.setStopRow(stopRow);

      ResultScanner scanner = table.getScanner(scan);
      Result result = scanner.next();
      List<Delete> delete = new ArrayList<Delete>();
      while (result != null) {
        Delete del = new Delete(result.getRow());
        delete.add(del);
        result = scanner.next();
      }
      table.delete(delete);
      System.out.println("delete done");
      table.close(); // very important
    } catch (IOException e) {
      e.printStackTrace();
    }
  }
Esempio n. 9
0
  public static void main(String[] args) throws IOException, NoSuchAlgorithmException {
    Configuration config = HBaseConfiguration.create();
    config.set("hbase.zookeeper.quorum", "distillery");
    HTable meadcam = new HTable(config, "meadcam");

    if (args[0].equals("delta")) {
      MeadRows meadrows = new MeadRows(meadcam);
      System.out.println("Average Delta: " + averageDeltas(meadrows));
    }

    if (args[0].equals("get")) {
      MeadRow row = new MeadRow(meadcam, args[1]);
      System.out.println("==== " + row.getBrewId() + " ====");
      System.out.println("Date:\t " + row.getDate());
      System.out.println("Delta:\t " + row.getDelta());
      System.out.println("RMS:\t " + row.getRMS());
    }

    if (args[0].equals("latest")) {
      System.out.println("Getting latest...");
      String brewId = args[1];
      // long minutes = Long.parseLong(args[2]);
      MeadRows meadrows = new MeadRows(meadcam, brewId);
      System.out.println("Average: " + Float.toString(averageDeltas(meadrows)));
    }

    meadcam.close();
  }
Esempio n. 10
0
  public static void main(String... args) throws IOException {
    Configuration config = HBaseConfiguration.create();
    // Add any necessary configuration files (hbase-site.xml, core-site.xml)
    //		config.addResource(new Path(System.getenv("HBASE_CONF_DIR"), "hbase-site.xml"));
    //		config.addResource(new Path(System.getenv("HADOOP_CONF_DIR"), "core-site.xml"));

    System.out.println("Argumentos: " + args.length);
    if (args.length == 2) {
      CF_DEFAULT = args[0];
      TABLE_NAME = args[1];

      TABLE_NAME1 = TableName.valueOf(TABLE_NAME);
      CF = Bytes.toBytes(CF_DEFAULT);
    } else {
      System.out.println("Error de argumentos validos");
      System.out.println(
          "Ejemplo de ejecucion: hadoop jar /home/cloudera/Desktop/loadtohbase-0.0.1-SNAPSHOT.jar loadtohbase.ToHBase csvFile bdu");
      System.exit(0);
    }

    //		putTable(config);
    try {
      scanTable(config);

    } catch (URISyntaxException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
      System.out.println(e.getMessage());
    }
  }
Esempio n. 11
0
  public static void main(String[] args) throws Exception {
    final Configuration configuration = HBaseConfiguration.create();
    configuration.addResource("grade.xml");
    String tables = configuration.get("hbase.cdn.tables");
    if (Strings.isNullOrEmpty(tables)) {
      return;
    }
    List<String> list = Lists.newArrayList(Splitter.on(",").split(tables));
    List<String> results =
        Lists.transform(
            list,
            new Function<String, String>() {
              @Override
              public String apply(@Nullable java.lang.String input) {
                return String.format(
                    configuration.get("hdfs.directory.base.db"), new Date(), input);
              }
            });

    String[] arrays =
        new String[] {
          Joiner.on(",").join(results),
          String.format(configuration.get("hdfs.directory.num.middle"), new Date()),
          String.format(configuration.get("hdfs.directory.num.result"), new Date())
        };
    AbstractJob job = new TopNJob();
    //        job.setStart(true);
    int i = ToolRunner.run(configuration, job, arrays);
    System.exit(i);
  }
Esempio n. 12
0
 /**
  * Test that operation timeout prevails over rpc default timeout and retries, etc.
  *
  * @throws IOException
  */
 @Test
 public void testRocTimeout() throws IOException {
   Configuration localConfig = HBaseConfiguration.create(this.conf);
   // This override mocks up our exists/get call to throw a RegionServerStoppedException.
   localConfig.set("hbase.client.connection.impl", RpcTimeoutConnection.class.getName());
   int pause = 10;
   localConfig.setInt("hbase.client.pause", pause);
   localConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10);
   // Set the operation timeout to be < the pause.  Expectation is that after first pause, we will
   // fail out of the rpc because the rpc timeout will have been set to the operation tiemout
   // and it has expired.  Otherwise, if this functionality is broke, all retries will be run --
   // all ten of them -- and we'll get the RetriesExhaustedException exception.
   localConfig.setInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, pause - 1);
   HTable table = new HTable(localConfig, TableName.META_TABLE_NAME);
   Throwable t = null;
   try {
     // An exists call turns into a get w/ a flag.
     table.exists(new Get(Bytes.toBytes("abc")));
   } catch (SocketTimeoutException e) {
     // I expect this exception.
     LOG.info("Got expected exception", e);
     t = e;
   } catch (RetriesExhaustedException e) {
     // This is the old, unwanted behavior.  If we get here FAIL!!!
     fail();
   } finally {
     table.close();
   }
   assertTrue(t != null);
 }
  private static void addDirectoryToClassPath(File directory) {
    try {
      // Get the classloader actually used by HBaseConfiguration
      ClassLoader classLoader = HBaseConfiguration.create().getClassLoader();
      if (!(classLoader instanceof URLClassLoader)) {
        fail("We should get a URLClassLoader");
      }

      // Make the addURL method accessible
      Method method = URLClassLoader.class.getDeclaredMethod("addURL", URL.class);
      method.setAccessible(true);

      // Add the directory where we put the hbase-site.xml to the classpath
      method.invoke(classLoader, directory.toURI().toURL());
    } catch (MalformedURLException
        | NoSuchMethodException
        | IllegalAccessException
        | InvocationTargetException e) {
      fail(
          "Unable to add "
              + directory
              + " to classpath because of this exception: "
              + e.getMessage());
    }
  }
  @Override
  public void init(Context context) throws IOException {
    super.init(context);

    this.conf = HBaseConfiguration.create(context.getConfiguration());
    this.tableDescriptors = context.getTableDescriptors();

    // HRS multiplies client retries by 10 globally for meta operations, but we do not want this.
    // We are resetting it here because we want default number of retries (35) rather than 10 times
    // that which makes very long retries for disabled tables etc.
    int defaultNumRetries =
        conf.getInt(
            HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
    if (defaultNumRetries > 10) {
      int mult = conf.getInt("hbase.client.serverside.retries.multiplier", 10);
      defaultNumRetries = defaultNumRetries / mult; // reset if HRS has multiplied this already
    }

    conf.setInt("hbase.client.serverside.retries.multiplier", 1);
    int numRetries = conf.getInt(CLIENT_RETRIES_NUMBER, defaultNumRetries);
    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, numRetries);

    this.numWriterThreads = this.conf.getInt("hbase.region.replica.replication.writer.threads", 3);
    controller = new PipelineController();
    entryBuffers =
        new EntryBuffers(
            controller,
            this.conf.getInt("hbase.region.replica.replication.buffersize", 128 * 1024 * 1024));

    // use the regular RPC timeout for replica replication RPC's
    this.operationTimeout =
        conf.getInt(
            HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
            HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
  }
Esempio n. 15
0
 /**
  * Constructor to set maximum versions and use the specified configuration, table factory and pool
  * type. The HTablePool supports the {@link PoolType#Reusable} and {@link PoolType#ThreadLocal}.
  * If the pool type is null or not one of those two values, then it will default to {@link
  * PoolType#Reusable}.
  *
  * @param config configuration
  * @param maxSize maximum number of references to keep for each table
  * @param tableFactory table factory
  * @param poolType pool type which is one of {@link PoolType#Reusable} or {@link
  *     PoolType#ThreadLocal}
  */
 public HTablePool(
     final Configuration config,
     final int maxSize,
     final HTableInterfaceFactory tableFactory,
     PoolType poolType) {
   // Make a new configuration instance so I can safely cleanup when
   // done with the pool.
   this.config = config == null ? HBaseConfiguration.create() : config;
   this.maxSize = maxSize;
   this.tableFactory = tableFactory == null ? new HTableFactory() : tableFactory;
   if (poolType == null) {
     this.poolType = PoolType.Reusable;
   } else {
     switch (poolType) {
       case Reusable:
       case ThreadLocal:
         this.poolType = poolType;
         break;
       default:
         this.poolType = PoolType.Reusable;
         break;
     }
   }
   this.tables = new PoolMap<String, HTableInterface>(this.poolType, this.maxSize);
 }
Esempio n. 16
0
 public static Path[] getInputPaths(String rootPath) {
   try {
     Configuration conf = HBaseConfiguration.create();
     Path root = new Path(rootPath);
     ArrayList<Path> paths = new ArrayList<Path>();
     FileSystem fs = root.getFileSystem(conf);
     LinkedList<Path> list = new LinkedList<Path>();
     list.push(root);
     if (!fs.exists(root)) {
       System.out.println("path not exists: " + root.toString());
       return new Path[0];
     }
     while (!list.isEmpty()) {
       Path path = list.pop();
       if (fs.isFile(path)) {
         if (path.getName().matches("^.*part-r-\\d{5}.*$")) {
           paths.add(path);
           System.out.println("something is wrong with path" + path.toString());
         }
       } else {
         FileStatus[] statuses = fs.listStatus(path);
         for (FileStatus status : statuses) {
           if (status.isDir()) {
             list.add(status.getPath());
           } else if (status.getPath().getName().matches("^.*part-r-\\d{5}.*$")) {
             paths.add(status.getPath());
           }
         }
       }
     }
     return paths.toArray(new Path[paths.size()]);
   } catch (IOException ignored) {
     return new Path[0];
   }
 }
  @Test
  public void testRegionObserverFlushTimeStacking() throws Exception {
    byte[] ROW = Bytes.toBytes("testRow");
    byte[] TABLE = Bytes.toBytes(getClass().getName());
    byte[] A = Bytes.toBytes("A");
    byte[][] FAMILIES = new byte[][] {A};

    Configuration conf = HBaseConfiguration.create();
    HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
    RegionCoprocessorHost h = region.getCoprocessorHost();
    h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
    h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);

    // put a row and flush it to disk
    Put put = new Put(ROW);
    put.add(A, A, A);
    region.put(put);
    region.flushcache();
    Get get = new Get(ROW);
    Result r = region.get(get);
    assertNull(
        "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
            + r,
        r.listCells());
  }
 /**
  * @param args
  * @author Nagamallikarjuna
  * @throws IOException
  */
 public static void main(String[] args) throws IOException {
   Configuration conf = HBaseConfiguration.create();
   HTable table = new HTable(conf, "stocks");
   File file = new File("/home/naga/bigdata/hadoop-1.0.3/daily");
   BufferedReader br = new BufferedReader(new FileReader(file));
   String line = br.readLine();
   Put data = null;
   while (line != null) {
     String parts[] = line.trim().split("\\t");
     if (parts.length == 9) {
       String key = parts[1] + ":" + parts[2];
       data = new Put(key.getBytes());
       data.add("cf".getBytes(), "exchange".getBytes(), parts[0].getBytes());
       data.add("cf".getBytes(), "open".getBytes(), parts[3].getBytes());
       data.add("cf".getBytes(), "high".getBytes(), parts[4].getBytes());
       data.add("cf".getBytes(), "low".getBytes(), parts[5].getBytes());
       data.add("cf".getBytes(), "close".getBytes(), parts[6].getBytes());
       data.add("cf".getBytes(), "volume".getBytes(), parts[7].getBytes());
       data.add("cf".getBytes(), "adj_close".getBytes(), parts[8].getBytes());
       table.put(data);
     }
     line = br.readLine();
   }
   br.close();
   table.close();
 }
  public static void main(String[] args) throws Exception {
    if (args.length < 2) {
      throw new Exception("Table name not specified.");
    }
    Configuration conf = HBaseConfiguration.create();
    HTable table = new HTable(conf, args[0]);
    String startKey = args[1];

    TimeCounter executeTimer = new TimeCounter();
    executeTimer.begin();
    executeTimer.enter();

    Expression exp =
        ExpressionFactory.eq(
            ExpressionFactory.toLong(
                ExpressionFactory.toString(ExpressionFactory.columnValue("family", "longStr2"))),
            ExpressionFactory.constant(Long.parseLong("99")));
    ExpressionFilter expressionFilter = new ExpressionFilter(exp);
    Scan scan = new Scan(Bytes.toBytes(startKey), expressionFilter);
    int count = 0;
    ResultScanner scanner = table.getScanner(scan);
    Result r = scanner.next();
    while (r != null) {
      count++;
      r = scanner.next();
    }
    System.out.println("++ Scanning finished with count : " + count + " ++");
    scanner.close();

    executeTimer.leave();
    executeTimer.end();
    System.out.println("++ Time cost for scanning: " + executeTimer.getTimeString() + " ++");
  }
Esempio n. 20
0
/**
 * 类HBaseFactory.java的实现描述:TODO 类实现描述
 *
 * @author zhaoheng Jul 28, 2015 4:53:52 PM
 */
public class HBaseFactory {

  protected static final Configuration CONF = HBaseConfiguration.create();
  protected static final Map<String, CMap> CMPS = new HashMap<String, CMap>();
  protected static final Map<String, HTable> TABLES = new HashMap<String, HTable>();

  public static synchronized HTable getTable(Table tb) throws IOException {
    if (TABLES.get(tb.tbName) != null) {
      return TABLES.get(tb.tbName);
    } else {
      HTable table = new HTable(CONF, tb.tbName);
      table.setAutoFlush(true, false);
      TABLES.put(tb.tbName, table);
      return table;
    }
  }

  public static synchronized CMap getHBaseCMap(Table tb) throws IOException {
    if (CMPS.get(tb.tbName) != null) {
      return CMPS.get(tb.tbName);
    } else {
      CMap table = new HBaseCMap(tb);
      CMPS.put(tb.tbName, table);
      return table;
    }
  }
}
Esempio n. 21
0
 /**
  * Remove the @Ignore to try out timeout and retry asettings
  *
  * @throws IOException
  */
 @Ignore
 @Test
 public void testTimeoutAndRetries() throws IOException {
   Configuration localConfig = HBaseConfiguration.create(this.conf);
   // This override mocks up our exists/get call to throw a RegionServerStoppedException.
   localConfig.set("hbase.client.connection.impl", RpcTimeoutConnection.class.getName());
   HTable table = new HTable(localConfig, TableName.META_TABLE_NAME);
   Throwable t = null;
   LOG.info("Start");
   try {
     // An exists call turns into a get w/ a flag.
     table.exists(new Get(Bytes.toBytes("abc")));
   } catch (SocketTimeoutException e) {
     // I expect this exception.
     LOG.info("Got expected exception", e);
     t = e;
   } catch (RetriesExhaustedException e) {
     // This is the old, unwanted behavior.  If we get here FAIL!!!
     fail();
   } finally {
     table.close();
   }
   LOG.info("Stop");
   assertTrue(t != null);
 }
Esempio n. 22
0
  /**
   * @param conn The HBase connection.
   * @param conf The HBase configuration
   * @param perRegionServerBufferQueueSize determines the max number of the buffered Put ops for
   *     each region server before dropping the request.
   */
  public HTableMultiplexer(
      Connection conn, Configuration conf, int perRegionServerBufferQueueSize) {
    this.conn = (ClusterConnection) conn;
    this.pool = HTable.getDefaultExecutor(conf);
    // how many times we could try in total, one more than retry number
    this.maxAttempts =
        conf.getInt(
                HConstants.HBASE_CLIENT_RETRIES_NUMBER,
                HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER)
            + 1;
    this.perRegionServerBufferQueueSize = perRegionServerBufferQueueSize;
    this.maxKeyValueSize = HTable.getMaxKeyValueSize(conf);
    this.flushPeriod = conf.getLong(TABLE_MULTIPLEXER_FLUSH_PERIOD_MS, 100);
    int initThreads = conf.getInt(TABLE_MULTIPLEXER_INIT_THREADS, 10);
    this.executor =
        Executors.newScheduledThreadPool(
            initThreads,
            new ThreadFactoryBuilder()
                .setDaemon(true)
                .setNameFormat("HTableFlushWorker-%d")
                .build());

    this.workerConf = HBaseConfiguration.create(conf);
    // We do not do the retry because we need to reassign puts to different queues if regions are
    // moved.
    this.workerConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 0);
  }
Esempio n. 23
0
 public static void addColumnFamily(String keyspace, String columnFamily) throws Exception {
   Configuration conf = HBaseConfiguration.create();
   HBaseAdmin admin = new HBaseAdmin(conf);
   HColumnDescriptor column = new HColumnDescriptor(columnFamily);
   // admin.deleteColumn(keyspace, columnFamily);
   admin.addColumn(keyspace, column);
 }
Esempio n. 24
0
  @Override
  public void configure(HTraceConfiguration conf) {
    this.conf = conf;
    this.hconf = HBaseConfiguration.create();
    this.table = Bytes.toBytes(conf.get(TABLE_KEY, DEFAULT_TABLE));
    this.cf = Bytes.toBytes(conf.get(COLUMNFAMILY_KEY, DEFAULT_COLUMNFAMILY));
    this.maxSpanBatchSize = conf.getInt(MAX_SPAN_BATCH_SIZE_KEY, DEFAULT_MAX_SPAN_BATCH_SIZE);
    String quorum = conf.get(COLLECTOR_QUORUM_KEY, DEFAULT_COLLECTOR_QUORUM);
    hconf.set(HConstants.ZOOKEEPER_QUORUM, quorum);
    String znodeParent = conf.get(ZOOKEEPER_ZNODE_PARENT_KEY, DEFAULT_ZOOKEEPER_ZNODE_PARENT);
    hconf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, znodeParent);
    int clientPort = conf.getInt(ZOOKEEPER_CLIENT_PORT_KEY, DEFAULT_ZOOKEEPER_CLIENT_PORT);
    hconf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, clientPort);

    // If there are already threads runnnig tear them down.
    if (this.service != null) {
      this.service.shutdownNow();
      this.service = null;
    }
    int numThreads = conf.getInt(NUM_THREADS_KEY, DEFAULT_NUM_THREADS);
    this.service = Executors.newFixedThreadPool(numThreads, tf);
    for (int i = 0; i < numThreads; i++) {
      this.service.submit(new WriteSpanRunnable());
    }
  }
  @Override
  public Event intercept(Event event) {
    // TODO Auto-generated method stub
    Map<String, String> headers = event.getHeaders();
    String Filename = headers.get("file");
    String fileType = getFileType(new String(event.getBody()));
    Configuration conf = HBaseConfiguration.create();
    HTable table = null;
    try {
      table = new HTable(conf, "fs");
    } catch (IOException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }
    Put put = new Put(Bytes.toBytes(fileType + "_" + Filename));
    put.add(Bytes.toBytes("fn"), Bytes.toBytes("ST"), Bytes.toBytes("PICKED"));

    try {
      table.put(put);
    } catch (RetriesExhaustedWithDetailsException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    } catch (InterruptedIOException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }
    try {

      table.close();
    } catch (IOException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }
    return event;
  }
  public static void main(String args[]) {
    if (args.length == 0) {
      System.out.println("JavaHBaseDistributedScan  {master} {tableName}");
    }

    String master = args[0];
    String tableName = args[1];

    JavaSparkContext jsc = new JavaSparkContext(master, "JavaHBaseDistributedScan");
    jsc.addJar("SparkHBase.jar");

    Configuration conf = HBaseConfiguration.create();
    conf.addResource(new Path("/etc/hbase/conf/core-site.xml"));
    conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));

    JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);

    Scan scan = new Scan();
    scan.setCaching(100);

    JavaRDD<Tuple2<byte[], List<Tuple3<byte[], byte[], byte[]>>>> javaRdd =
        hbaseContext.hbaseRDD(tableName, scan);

    List<Tuple2<byte[], List<Tuple3<byte[], byte[], byte[]>>>> results = javaRdd.collect();

    results.size();
  }
Esempio n. 27
0
  public synchronized Configuration configuration() {
    if (conf == null) {
      conf = HBaseConfiguration.create();
    }

    return conf;
  }
Esempio n. 28
0
 @Before
 public void setUp() {
   conf = HBaseConfiguration.create();
   rpcServices = Mockito.mock(RSRpcServices.class);
   when(rpcServices.getConfiguration()).thenReturn(conf);
   qosFunction = new AnnotationReadingPriorityFunction(rpcServices, RSRpcServices.class);
 }
Esempio n. 29
0
  public Blog(String blogid) throws IOException {

    Configuration conf = HBaseConfiguration.create();
    table = new HTable(conf, "blogs");

    // 1. Get the row whose row key is blogid from above
    Get g = new Get(Bytes.toBytes(blogid));
    Result r = table.get(g);

    // 2. Extract the rowkey, blog text (column "body") and blog title
    // (column "meta:title")
    key = r.getRow();
    keyStr = Bytes.toString(key);
    blogText = Bytes.toString(r.getValue(Bytes.toBytes("body"), Bytes.toBytes("")));
    blogTitle = Bytes.toString(r.getValue(Bytes.toBytes("meta"), Bytes.toBytes("title")));
    Long reverseTimestamp = Long.parseLong(keyStr.substring(4));
    Long epoch = Math.abs(reverseTimestamp - Long.MAX_VALUE);
    dateOfPost = new Date(epoch);

    // Get an iterator for the comments
    Scan s = new Scan();
    s.addFamily(Bytes.toBytes("comment"));
    // Use a PrefixFilter
    PrefixFilter filter = new PrefixFilter(key);
    s.setFilter(filter);
    scanner = table.getScanner(s);
    resultIterator = scanner.iterator();
  }
Esempio n. 30
0
  public static void main(String[] args)
      throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
    Configuration conf = HBaseConfiguration.create();
    conf.set(
        "hbase.zookeeper.quorum", "192.168.10.163:2181,192.168.10.164:2181,192.168.10.165:2181");

    // admin用户
    HBaseAdmin admin = new HBaseAdmin(conf);
    // table
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("people"));
    // info列簇
    HColumnDescriptor hcd_info = new HColumnDescriptor("info");
    hcd_info.setMaxVersions(3);
    // data列簇
    HColumnDescriptor hcd_data = new HColumnDescriptor("data");

    // 将列簇添加到htable中
    htd.addFamily(hcd_info);
    htd.addFamily(hcd_data);

    // 创建表
    admin.createTable(htd);

    // 关闭连接
    admin.close();
  }