示例#1
0
  public static void run(
      String instanceName, String zookeepers, AuthenticationToken rootPassword, String args[])
      throws Exception {
    // edit this method to play with Accumulo

    Instance instance = new ZooKeeperInstance(instanceName, zookeepers);

    Connector conn = instance.getConnector("root", rootPassword);

    conn.tableOperations().create("foo");

    BatchWriterConfig bwConfig = new BatchWriterConfig();
    bwConfig.setMaxLatency(60000l, java.util.concurrent.TimeUnit.MILLISECONDS);
    bwConfig.setMaxWriteThreads(3);
    bwConfig.setMaxMemory(50000000);
    BatchWriter bw = conn.createBatchWriter("foo", bwConfig);
    Mutation m = new Mutation("r1");
    m.put("cf1", "cq1", "v1");
    m.put("cf1", "cq2", "v3");
    bw.addMutation(m);
    bw.close();

    Scanner scanner = conn.createScanner("foo", Constants.NO_AUTHS);
    for (Entry<Key, Value> entry : scanner) {
      System.out.println(entry.getKey() + " " + entry.getValue());
    }
  }
示例#2
0
  @Override
  public void setup() {

    Connector conn = getConnector();
    String tableName = getTestProperty("TABLE");

    // delete existing table
    if (conn.tableOperations().exists(tableName)) {
      System.out.println("Deleting existing table: " + tableName);
      try {
        conn.tableOperations().delete(tableName);
      } catch (Exception e) {
        log.error("Failed to delete table '" + tableName + "'.", e);
      }
    }

    // create table
    try {
      conn.tableOperations().create(tableName);
      conn.tableOperations().addSplits(tableName, calculateSplits());
      conn.tableOperations().setProperty(tableName, "table.split.threshold", "256M");
    } catch (Exception e) {
      log.error("Failed to create table '" + tableName + "'.", e);
    }
  }
示例#3
0
  @Override
  public void tearDown(State state, Environment env) throws Exception {
    // We have resources we need to clean up
    if (env.isMultiTableBatchWriterInitialized()) {
      MultiTableBatchWriter mtbw = env.getMultiTableBatchWriter();
      try {
        mtbw.close();
      } catch (MutationsRejectedException e) {
        log.error("Ignoring mutations that weren't flushed", e);
      }

      // Reset the MTBW on the state to null
      env.resetMultiTableBatchWriter();
    }

    // Now we can safely delete the tables
    log.debug("Dropping tables: " + imageTableName + " " + indexTableName);

    Connector conn = env.getConnector();

    conn.tableOperations().delete(imageTableName);
    conn.tableOperations().delete(indexTableName);

    log.debug("Final total of writes: " + state.getLong("totalWrites"));
  }
    public void addTable(Text tableName) throws AccumuloException, AccumuloSecurityException {
      if (simulate) {
        log.info("Simulating adding table: " + tableName);
        return;
      }

      log.debug("Adding table: " + tableName);
      BatchWriter bw = null;
      String table = tableName.toString();

      if (createTables && !conn.tableOperations().exists(table)) {
        try {
          conn.tableOperations().create(table);
        } catch (AccumuloSecurityException e) {
          log.error("Accumulo security violation creating " + table, e);
          throw e;
        } catch (TableExistsException e) {
          // Shouldn't happen
        }
      }

      try {
        bw = mtbw.getBatchWriter(table);
      } catch (TableNotFoundException e) {
        log.error("Accumulo table " + table + " doesn't exist and cannot be created.", e);
        throw new AccumuloException(e);
      } catch (AccumuloException e) {
        throw e;
      } catch (AccumuloSecurityException e) {
        throw e;
      }

      if (bw != null) bws.put(tableName, bw);
    }
示例#5
0
  @Override
  public void visit(State state, Properties props) throws Exception {
    Connector conn = state.getConnector();

    Random rand = (Random) state.get("rand");

    @SuppressWarnings("unchecked")
    List<String> tableNames = (List<String>) state.get("tables");

    String tableName = tableNames.get(rand.nextInt(tableNames.size()));

    // TODO need to sometimes do null start and end ranges

    TreeSet<Text> range = new TreeSet<Text>();
    range.add(new Text(String.format("%016x", Math.abs(rand.nextLong()))));
    range.add(new Text(String.format("%016x", Math.abs(rand.nextLong()))));

    try {
      boolean wait = rand.nextBoolean();
      conn.tableOperations().compact(tableName, range.first(), range.last(), false, wait);
      log.debug((wait ? "compacted " : "initiated compaction ") + tableName);
    } catch (TableNotFoundException tne) {
      log.debug("compact " + tableName + " failed, doesnt exist");
    } catch (TableOfflineException toe) {
      log.debug("compact " + tableName + " failed, offline");
    }
  }
  @Before
  public void init()
      throws AccumuloException, AccumuloSecurityException, TableNotFoundException,
          TableExistsException {

    mock = new MockInstance("accumulo");
    PasswordToken pToken = new PasswordToken("pass".getBytes());
    conn = mock.getConnector("user", pToken);

    config = new BatchWriterConfig();
    config.setMaxMemory(1000);
    config.setMaxLatency(1000, TimeUnit.SECONDS);
    config.setMaxWriteThreads(10);

    if (conn.tableOperations().exists("rya_prospects")) {
      conn.tableOperations().delete("rya_prospects");
    }
    if (conn.tableOperations().exists("rya_selectivity")) {
      conn.tableOperations().delete("rya_selectivity");
    }

    arc = new AccumuloRdfConfiguration();
    arc.setTableLayoutStrategy(new TablePrefixLayoutStrategy());
    arc.setMaxRangesForScanner(300);
    res = new ProspectorServiceEvalStatsDAO(conn, arc);
  }
示例#7
0
  @Override
  public void visit(State state, Properties props) throws Exception {
    Connector conn = state.getConnector();

    Random rand = (Random) state.get("rand");

    @SuppressWarnings("unchecked")
    List<String> tableNames = (List<String>) state.get("tables");

    String tableName = tableNames.get(rand.nextInt(tableNames.size()));

    try {
      Scanner scanner = conn.createScanner(tableName, Constants.NO_AUTHS);
      Iterator<Entry<Key, Value>> iter = scanner.iterator();
      while (iter.hasNext()) {
        iter.next();
      }
      log.debug("Scanned " + tableName);
    } catch (TableDeletedException e) {
      log.debug("Scan " + tableName + " failed, table deleted");
    } catch (TableNotFoundException e) {
      log.debug("Scan " + tableName + " failed, doesnt exist");
    } catch (TableOfflineException e) {
      log.debug("Scan " + tableName + " failed, offline");
    } catch (RuntimeException e) {
      if (e.getCause() instanceof AccumuloSecurityException) {
        log.debug("BatchScan " + tableName + " failed, permission error");
      } else {
        throw e;
      }
    }
  }
示例#8
0
  @Test
  public void test() throws Exception {
    Connector c = getConnector();
    // make a table
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    // write to it
    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("row");
    m.put("cf", "cq", "value");
    bw.addMutation(m);
    bw.close();

    // create a fake _tmp file in its directory
    String id = c.tableOperations().tableIdMap().get(tableName);
    FileSystem fs = getCluster().getFileSystem();
    Path tmp = new Path("/accumulo/tables/" + id + "/default_tablet/junk.rf_tmp");
    fs.create(tmp).close();
    for (ProcessReference tserver : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) {
      getCluster().killProcess(ServerType.TABLET_SERVER, tserver);
    }
    getCluster().start();

    Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
    FunctionalTestUtils.count(scanner);
    assertFalse(fs.exists(tmp));
  }
示例#9
0
 @Test(timeout = 2 * 60 * 1000)
 public void test() throws Exception {
   Connector c = getConnector();
   c.tableOperations().create("test_ingest");
   deleteTest(c, cluster);
   assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
 }
  /**
   * Sets the connector information needed to communicate with Accumulo in this job.
   *
   * <p><b>WARNING:</b> Some tokens, when serialized, divulge sensitive information in the
   * configuration as a means to pass the token to MapReduce tasks. This information is BASE64
   * encoded to provide a charset safe conversion to a string, but this conversion is not intended
   * to be secure. {@link PasswordToken} is one example that is insecure in this way; however {@link
   * DelegationToken}s, acquired using {@link
   * SecurityOperations#getDelegationToken(DelegationTokenConfig)}, is not subject to this concern.
   *
   * @param job the Hadoop job instance to be configured
   * @param principal a valid Accumulo user name (user must have Table.CREATE permission)
   * @param token the user's password
   * @since 1.5.0
   */
  public static void setConnectorInfo(JobConf job, String principal, AuthenticationToken token)
      throws AccumuloSecurityException {
    if (token instanceof KerberosToken) {
      log.info("Received KerberosToken, attempting to fetch DelegationToken");
      try {
        Instance instance = getInstance(job);
        Connector conn = instance.getConnector(principal, token);
        token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
      } catch (Exception e) {
        log.warn(
            "Failed to automatically obtain DelegationToken, Mappers/Reducers will likely fail to communicate with Accumulo",
            e);
      }
    }
    // DelegationTokens can be passed securely from user to task without serializing insecurely in
    // the configuration
    if (token instanceof DelegationTokenImpl) {
      DelegationTokenImpl delegationToken = (DelegationTokenImpl) token;

      // Convert it into a Hadoop Token
      AuthenticationTokenIdentifier identifier = delegationToken.getIdentifier();
      Token<AuthenticationTokenIdentifier> hadoopToken =
          new Token<>(
              identifier.getBytes(),
              delegationToken.getPassword(),
              identifier.getKind(),
              delegationToken.getServiceName());

      // Add the Hadoop Token to the Job so it gets serialized and passed along.
      job.getCredentials().addToken(hadoopToken.getService(), hadoopToken);
    }

    InputConfigurator.setConnectorInfo(CLASS, job, principal, token);
  }
示例#11
0
 @Test
 public void test() throws Exception {
   Connector c = getConnector();
   String tableName = getUniqueNames(1)[0];
   c.tableOperations().create(tableName);
   BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
   Mutation m = new Mutation("row1");
   m.put("cf", "col1", "Test");
   bw.addMutation(m);
   bw.close();
   scanCheck(c, tableName, "Test");
   FileSystem fs = getCluster().getFileSystem();
   Path jarPath = new Path(rootPath + "/lib/ext/Test.jar");
   copyStreamToFileSystem(fs, "/TestCombinerX.jar", jarPath);
   sleepUninterruptibly(1, TimeUnit.SECONDS);
   IteratorSetting is =
       new IteratorSetting(10, "TestCombiner", "org.apache.accumulo.test.functional.TestCombiner");
   Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf")));
   c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.scan));
   sleepUninterruptibly(ZOOKEEPER_PROPAGATION_TIME, TimeUnit.MILLISECONDS);
   scanCheck(c, tableName, "TestX");
   fs.delete(jarPath, true);
   copyStreamToFileSystem(fs, "/TestCombinerY.jar", jarPath);
   sleepUninterruptibly(5, TimeUnit.SECONDS);
   scanCheck(c, tableName, "TestY");
   fs.delete(jarPath, true);
 }
示例#12
0
  private static void runTest(
      Connector conn, int numlg, ArrayList<byte[]> cfset, Map<String, Stat> stats)
      throws Exception {
    String table = "immlgb";

    try {
      conn.tableOperations().delete(table);
    } catch (TableNotFoundException tnfe) {
    }

    conn.tableOperations().create(table);
    conn.tableOperations()
        .setProperty(table, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "snappy");

    setupLocalityGroups(conn, numlg, cfset, table);

    addStat(stats, "write", write(conn, cfset, table));
    addStat(stats, "scan cf", scan(conn, cfset, table, false));
    addStat(stats, "scan cf:cq", scan(conn, cfset, table, true));
    // TODO time reading all data

    long t1 = System.currentTimeMillis();
    conn.tableOperations().flush(table, null, null, true);
    long t2 = System.currentTimeMillis();

    addStat(stats, "flush", t2 - t1);
  }
示例#13
0
  @Override
  public int run(String[] args) {
    Opts opts = new Opts();
    opts.parseArgs(BulkIngestExample.class.getName(), args);

    Configuration conf = getConf();
    PrintStream out = null;
    try {
      Job job = JobUtil.getJob(conf);
      job.setJobName("bulk ingest example");
      job.setJarByClass(this.getClass());

      job.setInputFormatClass(TextInputFormat.class);

      job.setMapperClass(MapClass.class);
      job.setMapOutputKeyClass(Text.class);
      job.setMapOutputValueClass(Text.class);

      job.setReducerClass(ReduceClass.class);
      job.setOutputFormatClass(AccumuloFileOutputFormat.class);
      opts.setAccumuloConfigs(job);

      Connector connector = opts.getConnector();

      TextInputFormat.setInputPaths(job, new Path(opts.inputDir));
      AccumuloFileOutputFormat.setOutputPath(job, new Path(opts.workDir + "/files"));

      FileSystem fs = FileSystem.get(conf);
      out =
          new PrintStream(
              new BufferedOutputStream(fs.create(new Path(opts.workDir + "/splits.txt"))));

      Collection<Text> splits = connector.tableOperations().listSplits(opts.getTableName(), 100);
      for (Text split : splits)
        out.println(new String(Base64.encodeBase64(TextUtil.getBytes(split))));

      job.setNumReduceTasks(splits.size() + 1);
      out.close();

      job.setPartitionerClass(RangePartitioner.class);
      RangePartitioner.setSplitFile(job, opts.workDir + "/splits.txt");

      job.waitForCompletion(true);
      Path failures = new Path(opts.workDir, "failures");
      fs.delete(failures, true);
      fs.mkdirs(new Path(opts.workDir, "failures"));
      connector
          .tableOperations()
          .importDirectory(
              opts.getTableName(), opts.workDir + "/files", opts.workDir + "/failures", false);

    } catch (Exception e) {
      throw new RuntimeException(e);
    } finally {
      if (out != null) out.close();
    }

    return 0;
  }
  @Test
  public void testBulkSplitOptimization() throws Exception {
    final Connector c = getConnector();
    final String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1000");
    c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "1000");
    c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "1G");
    FileSystem fs = cluster.getFileSystem();
    Path testDir = new Path(getUsableDir(), "testmf");
    FunctionalTestUtils.createRFiles(c, fs, testDir.toString(), ROWS, SPLITS, 8);
    FileStatus[] stats = fs.listStatus(testDir);

    System.out.println("Number of generated files: " + stats.length);
    FunctionalTestUtils.bulkImport(c, fs, tableName, testDir.toString());
    FunctionalTestUtils.checkSplits(c, tableName, 0, 0);
    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 100, 100);

    // initiate splits
    getConnector()
        .tableOperations()
        .setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "100K");

    sleepUninterruptibly(2, TimeUnit.SECONDS);

    // wait until over split threshold -- should be 78 splits
    while (getConnector().tableOperations().listSplits(tableName).size() < 75) {
      sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
    }

    FunctionalTestUtils.checkSplits(c, tableName, 50, 100);
    VerifyIngest.Opts opts = new VerifyIngest.Opts();
    opts.timestamp = 1;
    opts.dataSize = 50;
    opts.random = 56;
    opts.rows = 100000;
    opts.startRow = 0;
    opts.cols = 1;
    opts.setTableName(tableName);

    AuthenticationToken adminToken = getAdminToken();
    if (adminToken instanceof PasswordToken) {
      PasswordToken token = (PasswordToken) getAdminToken();
      opts.setPassword(new Password(new String(token.getPassword(), UTF_8)));
      opts.setPrincipal(getAdminPrincipal());
    } else if (adminToken instanceof KerberosToken) {
      ClientConfiguration clientConf = cluster.getClientConfig();
      opts.updateKerberosCredentials(clientConf);
    } else {
      Assert.fail("Unknown token type");
    }

    VerifyIngest.verifyIngest(c, opts, new ScannerOpts());

    // ensure each tablet does not have all map files, should be ~2.5 files per tablet
    FunctionalTestUtils.checkRFiles(c, tableName, 50, 100, 1, 4);
  }
 @After
 public void resetConfig() throws Exception {
   if (null != majcDelay) {
     Connector conn = getConnector();
     conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
     getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
     getClusterControl().startAllServers(ServerType.TABLET_SERVER);
   }
 }
 @Test
 public void run() throws Exception {
   Connector c = getConnector();
   String tableName = getUniqueNames(1)[0];
   c.tableOperations().create(tableName);
   for (int i = 0; i < 100000; i++) {
     c.createScanner(tableName, Authorizations.EMPTY);
   }
 }
  @Before
  public void createMockKeyValues() throws Exception {
    // Make a MockInstance here, by setting the instance name to be the same as this mock instance
    // we can "trick" the InputFormat into using a MockInstance
    mockInstance = new MockInstance(test.getMethodName());
    inputformat = new HiveAccumuloTableInputFormat();
    conf = new JobConf();
    conf.set(AccumuloSerDeParameters.TABLE_NAME, TEST_TABLE);
    conf.set(AccumuloSerDeParameters.USE_MOCK_INSTANCE, "true");
    conf.set(AccumuloSerDeParameters.INSTANCE_NAME, test.getMethodName());
    conf.set(AccumuloSerDeParameters.USER_NAME, USER);
    conf.set(AccumuloSerDeParameters.USER_PASS, PASS);
    conf.set(AccumuloSerDeParameters.ZOOKEEPERS, "localhost:2181"); // not used for mock, but
    // required by input format.

    columnNames = Arrays.asList("name", "sid", "dgrs", "mills");
    columnTypes =
        Arrays.<TypeInfo>asList(
            TypeInfoFactory.stringTypeInfo,
            TypeInfoFactory.intTypeInfo,
            TypeInfoFactory.doubleTypeInfo,
            TypeInfoFactory.longTypeInfo);
    conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:name,cf:sid,cf:dgrs,cf:mills");
    conf.set(serdeConstants.LIST_COLUMNS, "name,sid,dgrs,mills");
    conf.set(serdeConstants.LIST_COLUMN_TYPES, "string,int,double,bigint");

    con = mockInstance.getConnector(USER, new PasswordToken(PASS.getBytes()));
    con.tableOperations().create(TEST_TABLE);
    con.securityOperations().changeUserAuthorizations(USER, new Authorizations("blah"));
    BatchWriterConfig writerConf = new BatchWriterConfig();
    BatchWriter writer = con.createBatchWriter(TEST_TABLE, writerConf);

    Mutation m1 = new Mutation(new Text("r1"));
    m1.put(COLUMN_FAMILY, NAME, new Value("brian".getBytes()));
    m1.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("1")));
    m1.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("44.5")));
    m1.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("555")));

    Mutation m2 = new Mutation(new Text("r2"));
    m2.put(COLUMN_FAMILY, NAME, new Value("mark".getBytes()));
    m2.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("2")));
    m2.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("55.5")));
    m2.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("666")));

    Mutation m3 = new Mutation(new Text("r3"));
    m3.put(COLUMN_FAMILY, NAME, new Value("dennis".getBytes()));
    m3.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("3")));
    m3.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("65.5")));
    m3.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("777")));

    writer.addMutation(m1);
    writer.addMutation(m2);
    writer.addMutation(m3);

    writer.close();
  }
示例#18
0
 @Override
 public Connector getConnector(String principal, AuthenticationToken token)
     throws AccumuloException, AccumuloSecurityException {
   Connector conn = new MockConnector(new Credentials(principal, token), acu, this);
   if (!acu.users.containsKey(principal))
     conn.securityOperations().createLocalUser(principal, (PasswordToken) token);
   else if (!acu.users.get(principal).token.equals(token))
     throw new AccumuloSecurityException(principal, SecurityErrorCode.BAD_CREDENTIALS);
   return conn;
 }
  /** The run method which sets the configuration and starts the MapReduce job */
  public int run(String[] args) throws Exception {

    if (USE_MINI_ACCUMULO) {
      Connector connector = LocalEnvUtil.getConnector(userPass);
      userName = "******";
      instanceName = connector.getInstance().getInstanceName();
      zookeepers = connector.getInstance().getZooKeepers();
    }

    // Create and initialize a MapReduce Job
    Job job = Job.getInstance(getConf(), "tweetIndexer");
    job.setJarByClass(IndexedDocIndexer.class);

    // Set the AccumuloInputFormat so the mapper can read from Accumulo

    AccumuloInputFormat.setConnectorInfo(job, userName, new PasswordToken(userPass));

    AccumuloInputFormat.setInputTableName(job, twitterDataTable);

    AccumuloInputFormat.setScanAuthorizations(job, new Authorizations());

    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.withInstance(instanceName);
    clientConfig.withZkHosts(zookeepers);

    AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);

    AccumuloOutputFormat.setConnectorInfo(job, userName, new PasswordToken(userPass));

    AccumuloOutputFormat.setCreateTables(job, createTables);

    AccumuloOutputFormat.setDefaultTableName(job, tweetDocIndex);

    AccumuloOutputFormat.setZooKeeperInstance(job, clientConfig);

    // Set the map and reduce classes
    job.setMapperClass(TweetMapper.class);
    job.setReducerClass(TweetReducer.class);

    // Set the output key and value class for the mapper
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);

    // Set the output key and value class for the reducer
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Mutation.class);

    // Set the InputFormat and OutputFormat for the job
    job.setInputFormatClass(AccumuloInputFormat.class);
    job.setOutputFormatClass(AccumuloOutputFormat.class);

    // Run the MapReduce job and return 0 for success, 1 otherwise
    return job.waitForCompletion(true) ? 0 : 1;
  }
示例#20
0
 @Test
 public void interleaveSplit() throws Exception {
   Connector c = getConnector();
   c.tableOperations().create("test_ingest");
   c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
   c.tableOperations()
       .setProperty("test_ingest", Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
   ReadWriteIT.interleaveTest(c);
   UtilWaitThread.sleep(5 * 1000);
   assertTrue(c.tableOperations().listSplits("test_ingest").size() > 20);
 }
 @Before
 public void alterConfig() throws Exception {
   Connector conn = getConnector();
   majcDelay =
       conn.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
   if (!"1s".equals(majcDelay)) {
     conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1s");
     getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
     getClusterControl().startAllServers(ServerType.TABLET_SERVER);
   }
 }
示例#22
0
  @Override
  public void visit(State state, Properties props) throws Exception {
    Connector conn = state.getConnector();

    Random rand = (Random) state.get("rand");

    @SuppressWarnings("unchecked")
    List<String> tableNames = (List<String>) state.get("tables");

    String tableName = tableNames.get(rand.nextInt(tableNames.size()));

    Configuration conf = CachedConfiguration.getInstance();
    FileSystem fs = FileSystem.get(conf);

    String bulkDir = "/tmp/concurrent_bulk/b_" + String.format("%016x", Math.abs(rand.nextLong()));

    fs.mkdirs(new Path(bulkDir));
    fs.mkdirs(new Path(bulkDir + "_f"));

    try {
      BatchWriter bw = new RFileBatchWriter(conf, fs, bulkDir + "/file01.rf");
      try {
        TreeSet<Long> rows = new TreeSet<Long>();
        int numRows = rand.nextInt(100000);
        for (int i = 0; i < numRows; i++) {
          rows.add(Math.abs(rand.nextLong()));
        }

        for (Long row : rows) {
          Mutation m = new Mutation(String.format("%016x", row));
          long val = Math.abs(rand.nextLong());
          for (int j = 0; j < 10; j++) {
            m.put("cf", "cq" + j, new Value(String.format("%016x", val).getBytes()));
          }

          bw.addMutation(m);
        }
      } finally {
        bw.close();
      }

      conn.tableOperations()
          .importDirectory(tableName, bulkDir, bulkDir + "_f", rand.nextBoolean());

      log.debug("BulkImported to " + tableName);
    } catch (TableNotFoundException e) {
      log.debug("BulkImport " + tableName + " failed, doesnt exist");
    } catch (TableOfflineException toe) {
      log.debug("BulkImport " + tableName + " failed, offline");
    } finally {
      fs.delete(new Path(bulkDir), true);
      fs.delete(new Path(bulkDir + "_f"), true);
    }
  }
示例#23
0
  @Override
  public void teardown() {

    Connector conn = getConnector();
    String tableName = getTestProperty("TABLE");

    try {
      conn.tableOperations().delete(tableName);
    } catch (Exception e) {
      log.error("Failed to delete table '" + tableName + "'", e);
    }
  }
示例#24
0
 @Test(timeout = 4 * 60 * 1000)
 public void binaryStressTest() throws Exception {
   Connector c = getConnector();
   c.tableOperations().create("bt");
   c.tableOperations().setProperty("bt", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
   BinaryIT.runTest(c);
   String id = c.tableOperations().tableIdMap().get("bt");
   FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
   FileStatus[] dir =
       fs.listStatus(new Path(cluster.getConfig().getDir() + "/accumulo/tables/" + id));
   assertTrue(dir.length > 7);
 }
  @Test
  public void testGetProtectedField() throws Exception {
    FileInputFormat.addInputPath(conf, new Path("unused"));

    BatchWriterConfig writerConf = new BatchWriterConfig();
    BatchWriter writer = con.createBatchWriter(TEST_TABLE, writerConf);

    Authorizations origAuths = con.securityOperations().getUserAuthorizations(USER);
    con.securityOperations()
        .changeUserAuthorizations(USER, new Authorizations(origAuths.toString() + ",foo"));

    Mutation m = new Mutation("r4");
    m.put(COLUMN_FAMILY, NAME, new ColumnVisibility("foo"), new Value("frank".getBytes()));
    m.put(COLUMN_FAMILY, SID, new ColumnVisibility("foo"), new Value(parseIntBytes("4")));
    m.put(COLUMN_FAMILY, DEGREES, new ColumnVisibility("foo"), new Value(parseDoubleBytes("60.6")));
    m.put(COLUMN_FAMILY, MILLIS, new ColumnVisibility("foo"), new Value(parseLongBytes("777")));

    writer.addMutation(m);
    writer.close();

    conf.set(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, "foo");

    InputSplit[] splits = inputformat.getSplits(conf, 0);
    assertEquals(splits.length, 1);
    RecordReader<Text, AccumuloHiveRow> reader = inputformat.getRecordReader(splits[0], conf, null);
    Text rowId = new Text("r1");
    AccumuloHiveRow row = new AccumuloHiveRow();
    assertTrue(reader.next(rowId, row));
    assertEquals(row.getRowId(), rowId.toString());
    assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME));
    assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "brian".getBytes());

    rowId = new Text("r2");
    assertTrue(reader.next(rowId, row));
    assertEquals(row.getRowId(), rowId.toString());
    assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME));
    assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "mark".getBytes());

    rowId = new Text("r3");
    assertTrue(reader.next(rowId, row));
    assertEquals(row.getRowId(), rowId.toString());
    assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME));
    assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "dennis".getBytes());

    rowId = new Text("r4");
    assertTrue(reader.next(rowId, row));
    assertEquals(row.getRowId(), rowId.toString());
    assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME));
    assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "frank".getBytes());

    assertFalse(reader.next(rowId, row));
  }
示例#26
0
 @Test
 public void deleteSplit() throws Exception {
   Connector c = getConnector();
   c.tableOperations().create("test_ingest");
   c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
   DeleteIT.deleteTest(c, cluster);
   c.tableOperations().flush("test_ingest", null, null, true);
   for (int i = 0; i < 5; i++) {
     UtilWaitThread.sleep(10 * 1000);
     if (c.tableOperations().listSplits("test_ingest").size() > 20) break;
   }
   assertTrue(c.tableOperations().listSplits("test_ingest").size() > 20);
 }
示例#27
0
  /** Write entries to a table. */
  public static void writeEntries(
      Connector connector, Map<Key, Value> map, String table, boolean createIfNotExist) {
    if (createIfNotExist && !connector.tableOperations().exists(table))
      try {
        connector.tableOperations().create(table);
      } catch (AccumuloException | AccumuloSecurityException e) {
        log.error("trouble creating " + table, e);
        throw new RuntimeException(e);
      } catch (TableExistsException e) {
        log.error("crazy", e);
        throw new RuntimeException(e);
      }

    BatchWriterConfig bwc = new BatchWriterConfig();
    BatchWriter bw;
    try {
      bw = connector.createBatchWriter(table, bwc);
    } catch (TableNotFoundException e) {
      log.error("tried to write to a non-existant table " + table, e);
      throw new RuntimeException(e);
    }

    try {
      for (Map.Entry<Key, Value> entry : map.entrySet()) {
        Key k = entry.getKey();
        ByteSequence rowData = k.getRowData(),
            cfData = k.getColumnFamilyData(),
            cqData = k.getColumnQualifierData();
        Mutation m = new Mutation(rowData.getBackingArray(), rowData.offset(), rowData.length());
        m.put(
            cfData.getBackingArray(),
            cqData.getBackingArray(),
            k.getColumnVisibilityParsed(),
            entry.getValue().get());
        bw.addMutation(m);
      }

    } catch (MutationsRejectedException e) {
      log.error("mutations rejected", e);
      throw new RuntimeException(e);
    } finally {
      try {
        bw.close();
      } catch (MutationsRejectedException e) {
        log.error("mutations rejected while trying to close BatchWriter", e);
      }
    }
  }
示例#28
0
  /**
   * Insert the contents of an NDSI data file into an Accumulo table.
   *
   * @param file Input file
   * @param Atable Table to insert into
   * @return Nummber of entries inserted into Atable; equal to 2x the number of rows.
   * @throws IOException
   */
  public long ingestFile(File file, String Atable, boolean deleteIfExists) throws IOException {
    if (deleteIfExists && connector.tableOperations().exists(Atable))
      try {
        connector.tableOperations().delete(Atable);
      } catch (AccumuloException | AccumuloSecurityException e) {
        log.warn("trouble deleting table " + Atable, e);
        throw new RuntimeException(e);
      } catch (TableNotFoundException e) {
        throw new RuntimeException(e);
      }
    if (!connector.tableOperations().exists(Atable))
      try {
        connector.tableOperations().create(Atable);
      } catch (AccumuloException | AccumuloSecurityException e) {
        log.warn("trouble creating table " + Atable, e);
        throw new RuntimeException(e);
      } catch (TableExistsException e) {
        throw new RuntimeException(e);
      }

    BatchWriter bw = null;
    String line = null;
    long entriesProcessed = 0;

    try (BufferedReader fo = new BufferedReader(new FileReader(file))) {
      BatchWriterConfig config = new BatchWriterConfig();
      bw = connector.createBatchWriter(Atable, config);

      // Skip header line
      fo.readLine();

      while ((line = fo.readLine()) != null)
        if (!line.isEmpty()) entriesProcessed += ingestLine(bw, line);

    } catch (TableNotFoundException e) {
      throw new RuntimeException(e);
    } catch (MutationsRejectedException e) {
      log.warn("Mutation rejected on line " + line, e);
    } finally {
      if (bw != null)
        try {
          bw.close();
        } catch (MutationsRejectedException e) {
          log.warn("Mutation rejected at close() on line " + line, e);
        }
    }
    return entriesProcessed;
  }
示例#29
0
  private static long write(Connector conn, ArrayList<byte[]> cfset, String table)
      throws TableNotFoundException, MutationsRejectedException {
    Random rand = new Random();

    byte val[] = new byte[50];

    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());

    long t1 = System.currentTimeMillis();

    for (int i = 0; i < 1 << 15; i++) {
      byte[] row = FastFormat.toZeroPaddedString(abs(rand.nextLong()), 16, 16, new byte[0]);

      Mutation m = new Mutation(row);
      for (byte[] cf : cfset) {
        byte[] cq = FastFormat.toZeroPaddedString(rand.nextInt(1 << 16), 4, 16, new byte[0]);
        rand.nextBytes(val);
        m.put(cf, cq, val);
      }

      bw.addMutation(m);
    }

    bw.close();

    long t2 = System.currentTimeMillis();

    return t2 - t1;
  }
示例#30
0
  private void setupTestUsers(
      final Connector accumuloConn, final String ryaInstanceName, final String pcjId)
      throws AccumuloException, AccumuloSecurityException {
    final PasswordToken pass = new PasswordToken("password");
    final SecurityOperations secOps = accumuloConn.securityOperations();

    // XXX We need the table name so that we can update security for the users.
    final String pcjTableName = new PcjTableNameFactory().makeTableName(ryaInstanceName, pcjId);

    // Give the 'roor' user authorizations to see everything.
    secOps.changeUserAuthorizations("root", new Authorizations("A", "B", "C", "D", "E"));

    // Create a user that can see things with A and B.
    secOps.createLocalUser("abUser", pass);
    secOps.changeUserAuthorizations("abUser", new Authorizations("A", "B"));
    secOps.grantTablePermission("abUser", pcjTableName, TablePermission.READ);

    // Create a user that can see things with A, B, and C.
    secOps.createLocalUser("abcUser", pass);
    secOps.changeUserAuthorizations("abcUser", new Authorizations("A", "B", "C"));
    secOps.grantTablePermission("abcUser", pcjTableName, TablePermission.READ);

    // Create a user that can see things with A, D, and E.
    secOps.createLocalUser("adeUser", pass);
    secOps.changeUserAuthorizations("adeUser", new Authorizations("A", "D", "E"));
    secOps.grantTablePermission("adeUser", pcjTableName, TablePermission.READ);

    // Create a user that can't see anything.
    secOps.createLocalUser("noAuth", pass);
    secOps.changeUserAuthorizations("noAuth", new Authorizations());
    secOps.grantTablePermission("noAuth", pcjTableName, TablePermission.READ);
  }