@Test
 public void testHdfs() throws Exception {
   Configuration conf =
       HadoopConfigurationBuilder.buildHdfsConfiguration(clusterSpec, cluster, defaults);
   assertThat(Iterators.size(conf.getKeys()), is(1));
   assertThat(conf.getString("p1"), is("hdfs1"));
 }
 @Test
 public void testCommon() throws Exception {
   Configuration conf =
       HadoopConfigurationBuilder.buildCommonConfiguration(clusterSpec, cluster, defaults);
   assertThat(Iterators.size(conf.getKeys()), is(3));
   assertThat(conf.getString("p1"), is("common1"));
   assertThat(conf.getString("p2"), is("common2"));
   assertThat(conf.getString("fs.default.name"), matches("hdfs://.+:8020/"));
 }
Exemplo n.º 3
0
 public static int size(Object obj) {
   Preconditions.checkArgument(obj != null);
   if (obj instanceof Traversal) return size(((Traversal) obj).toList());
   else if (obj instanceof Collection) return ((Collection) obj).size();
   else if (obj instanceof Iterable) return Iterables.size((Iterable) obj);
   else if (obj instanceof Iterator) return Iterators.size((Iterator) obj);
   else if (obj.getClass().isArray()) return Array.getLength(obj);
   throw new IllegalArgumentException("Cannot determine size of: " + obj);
 }
 @Test
 public void testOverrides() throws Exception {
   Configuration overrides = new PropertiesConfiguration();
   overrides.addProperty("hadoop-common.p1", "overridden1");
   overrides.addProperty("hadoop-common.p2", "overridden2");
   overrides.addProperty("hadoop-common.fs.default.name", "not-overridden");
   clusterSpec = ClusterSpec.withNoDefaults(overrides);
   Configuration conf =
       HadoopConfigurationBuilder.buildCommonConfiguration(clusterSpec, cluster, defaults);
   assertThat(Iterators.size(conf.getKeys()), is(3));
   assertThat(conf.getString("p1"), is("overridden1"));
   assertThat(conf.getString("p2"), is("overridden2"));
   assertThat(
       "Can't override dynamically set properties",
       conf.getString("fs.default.name"),
       matches("hdfs://.+:8020/"));
 }
Exemplo n.º 5
0
  @Test
  public void testFromXmlWithOnlyAttributes() throws Exception {
    final Agent agent =
        setUpAndRunAgent(
            "testFromXml (state <s> ^superstate nil ^io.input-link <il>) -->"
                + "(<il> ^xml (from-xml |<ignored name='Boo' value='Radley'/>|))");

    final Identifier il = agent.getInputOutput().getInputLink();
    final MatcherBuilder m = Wmes.matcher(agent);
    final Identifier xml = m.attr("xml").find(il).getValue().asIdentifier();
    assertNotNull(xml);
    final Wme attrs = m.attr(DefaultWmeToXml.ATTRS).find(xml);
    assertNotNull(attrs);
    assertEquals(1, Iterators.size(xml.getWmes())); // Only /attrs

    assertEquals("Boo", m.attr("name").find(attrs).getValue().asString().getValue());
    assertEquals("Radley", m.attr("value").find(attrs).getValue().asString().getValue());
  }
Exemplo n.º 6
0
  protected void reduce(IntWritable key, Iterable<Point> values, Context context)
      throws IOException, InterruptedException {

    float scalar;
    Point updatedCenter = new Point(KMeans.centroids.get(0).getDimension());
    Iterator<Point> it = values.iterator();
    int size = Iterators.size(it);
    while (it.hasNext()) {
      updatedCenter = Point.addPoints(updatedCenter, it.next());
    }
    scalar = (1 / (float) size);

    updatedCenter = Point.multiplyScalar(updatedCenter, scalar);

    context.write(key, updatedCenter);

    KMeans.centroids.set(key.get(), updatedCenter);
  }
Exemplo n.º 7
0
  @Test
  public void write_issues() {
    // no data yet
    assertThat(underTest.hasComponentData(FileStructure.Domain.ISSUES, 1)).isFalse();

    // write data
    BatchReport.Issue issue =
        BatchReport.Issue.newBuilder().setLine(50).setMsg("the message").build();

    underTest.writeComponentIssues(1, Arrays.asList(issue));

    assertThat(underTest.hasComponentData(FileStructure.Domain.ISSUES, 1)).isTrue();
    File file = underTest.getFileStructure().fileFor(FileStructure.Domain.ISSUES, 1);
    assertThat(file).exists().isFile();
    try (CloseableIterator<BatchReport.Issue> read =
        Protobuf.readStream(file, BatchReport.Issue.PARSER)) {
      assertThat(Iterators.size(read)).isEqualTo(1);
    }
  }
Exemplo n.º 8
0
  @Test
  public void write_measures() {
    assertThat(underTest.hasComponentData(FileStructure.Domain.MEASURES, 1)).isFalse();

    BatchReport.Measure measure =
        BatchReport.Measure.newBuilder()
            .setStringValue("text-value")
            .setDoubleValue(2.5d)
            .setValueType(Constants.MeasureValueType.DOUBLE)
            .build();

    underTest.writeComponentMeasures(1, Arrays.asList(measure));

    assertThat(underTest.hasComponentData(FileStructure.Domain.MEASURES, 1)).isTrue();
    File file = underTest.getFileStructure().fileFor(FileStructure.Domain.MEASURES, 1);
    assertThat(file).exists().isFile();
    try (CloseableIterator<BatchReport.Measure> read =
        Protobuf.readStream(file, BatchReport.Measure.PARSER)) {
      assertThat(Iterators.size(read)).isEqualTo(1);
    }
  }
Exemplo n.º 9
0
  @Test
  public void oneLevelSearch() throws Exception {
    LdapSearch search =
        new LdapSearch(contextFactory)
            .setBaseDn("dc=example,dc=org")
            .setScope(SearchControls.ONELEVEL_SCOPE)
            .setRequest("(objectClass={0})")
            .setParameters("inetOrgPerson")
            .returns("cn");

    assertThat(search.getBaseDn(), is("dc=example,dc=org"));
    assertThat(search.getScope(), is(SearchControls.ONELEVEL_SCOPE));
    assertThat(search.getRequest(), is("(objectClass={0})"));
    assertThat(search.getParameters(), is(new String[] {"inetOrgPerson"}));
    assertThat(search.getReturningAttributes(), is(new String[] {"cn"}));
    assertThat(
        search.toString(),
        is(
            "LdapSearch{baseDn=dc=example,dc=org, scope=onelevel, request=(objectClass={0}), parameters=[inetOrgPerson], attributes=[cn]}"));
    assertThat(Iterators.size(Iterators.forEnumeration(search.find())), is(0));
    assertThat(search.findUnique(), nullValue());
  }
Exemplo n.º 10
0
  @Test
  public void objectSearch() throws Exception {
    LdapSearch search =
        new LdapSearch(contextFactory)
            .setBaseDn("cn=bind,ou=users,dc=example,dc=org")
            .setScope(SearchControls.OBJECT_SCOPE)
            .setRequest("(objectClass={0})")
            .setParameters("uidObject")
            .returns("uid");

    assertThat(search.getBaseDn(), is("cn=bind,ou=users,dc=example,dc=org"));
    assertThat(search.getScope(), is(SearchControls.OBJECT_SCOPE));
    assertThat(search.getRequest(), is("(objectClass={0})"));
    assertThat(search.getParameters(), is(new String[] {"uidObject"}));
    assertThat(search.getReturningAttributes(), is(new String[] {"uid"}));
    assertThat(
        search.toString(),
        is(
            "LdapSearch{baseDn=cn=bind,ou=users,dc=example,dc=org, scope=object, request=(objectClass={0}), parameters=[uidObject], attributes=[uid]}"));
    assertThat(Iterators.size(Iterators.forEnumeration(search.find())), is(1));
    assertThat(search.findUnique(), not(nullValue()));
  }
Exemplo n.º 11
0
  @Test
  public void subtreeSearch() throws Exception {
    LdapSearch search =
        new LdapSearch(contextFactory)
            .setBaseDn("dc=example,dc=org")
            .setRequest("(objectClass={0})")
            .setParameters("inetOrgPerson")
            .returns("objectClass");

    assertThat(search.getBaseDn(), is("dc=example,dc=org"));
    assertThat(search.getScope(), is(SearchControls.SUBTREE_SCOPE));
    assertThat(search.getRequest(), is("(objectClass={0})"));
    assertThat(search.getParameters(), is(new String[] {"inetOrgPerson"}));
    assertThat(search.getReturningAttributes(), is(new String[] {"objectClass"}));
    assertThat(
        search.toString(),
        is(
            "LdapSearch{baseDn=dc=example,dc=org, scope=subtree, request=(objectClass={0}), parameters=[inetOrgPerson], attributes=[objectClass]}"));
    assertThat(Iterators.size(Iterators.forEnumeration(search.find())), is(3));
    thrown.expect(NamingException.class);
    thrown.expectMessage("Non unique result for " + search.toString());
    search.findUnique();
  }
Exemplo n.º 12
0
 private int sizeOf(final Iterable<?> iterable) {
   return Iterators.size(iterable.iterator());
 }
Exemplo n.º 13
0
  public void
      doShouldRunReadOnlyLdbcWorkloadWithNothingDbWhileIgnoringScheduledStartTimesAndReturnExpectedMetrics(
          int threadCount,
          long operationCount,
          CompletionTimeService completionTimeService,
          ConcurrentErrorReporter errorReporter)
          throws InterruptedException, DbException, WorkloadException, IOException,
              MetricsCollectionException, CompletionTimeException, DriverConfigurationException,
              ExecutionException {
    ControlService controlService = null;
    Db db = null;
    Workload workload = null;
    MetricsService metricsService = null;
    try {
      Map<String, String> paramsMap =
          LdbcSnbInteractiveWorkloadConfiguration.defaultReadOnlyConfigSF1();
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.PARAMETERS_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.UPDATES_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      // Driver-specific parameters
      String name = null;
      String dbClassName = DummyLdbcSnbInteractiveDb.class.getName();
      String workloadClassName = LdbcSnbInteractiveWorkload.class.getName();
      int statusDisplayInterval = 1;
      TimeUnit timeUnit = TimeUnit.NANOSECONDS;
      String resultDirPath = temporaryFolder.newFolder().getAbsolutePath();
      double timeCompressionRatio = 1.0;
      Set<String> peerIds = new HashSet<>();
      ConsoleAndFileDriverConfiguration.ConsoleAndFileValidationParamOptions validationParams =
          null;
      String dbValidationFilePath = null;
      boolean calculateWorkloadStatistics = false;
      long spinnerSleepDuration = 0l;
      boolean printHelp = false;
      boolean ignoreScheduledStartTimes = true;
      long warmupCount = 100;

      ConsoleAndFileDriverConfiguration configuration =
          new ConsoleAndFileDriverConfiguration(
              paramsMap,
              name,
              dbClassName,
              workloadClassName,
              operationCount,
              threadCount,
              statusDisplayInterval,
              timeUnit,
              resultDirPath,
              timeCompressionRatio,
              peerIds,
              validationParams,
              dbValidationFilePath,
              calculateWorkloadStatistics,
              spinnerSleepDuration,
              printHelp,
              ignoreScheduledStartTimes,
              warmupCount);

      configuration =
          (ConsoleAndFileDriverConfiguration)
              configuration.applyArgs(
                  MapUtils.loadPropertiesToMap(
                      TestUtils.getResource("/snb/interactive/updateStream.properties")));

      controlService =
          new LocalControlService(
              timeSource.nowAsMilli() + 1000,
              configuration,
              new Log4jLoggingServiceFactory(false),
              timeSource);
      LoggingService loggingService =
          new Log4jLoggingServiceFactory(false).loggingServiceFor("Test");
      workload = new LdbcSnbInteractiveWorkload();
      workload.init(configuration);
      db = new DummyLdbcSnbInteractiveDb();
      db.init(configuration.asMap(), loggingService, workload.operationTypeToClassMapping());
      GeneratorFactory gf = new GeneratorFactory(new RandomDataGeneratorFactory(42L));
      Iterator<Operation> operations =
          gf.limit(
              WorkloadStreams.mergeSortedByStartTimeExcludingChildOperationGenerators(
                  gf, workload.streams(gf, true)),
              configuration.operationCount());
      Iterator<Operation> timeMappedOperations =
          gf.timeOffsetAndCompress(operations, controlService.workloadStartTimeAsMilli(), 1.0);
      WorkloadStreams workloadStreams = new WorkloadStreams();
      workloadStreams.setAsynchronousStream(
          new HashSet<Class<? extends Operation>>(),
          new HashSet<Class<? extends Operation>>(),
          Collections.<Operation>emptyIterator(),
          timeMappedOperations,
          null);

      File resultsLog = temporaryFolder.newFile();
      SimpleCsvFileWriter csvResultsLogWriter =
          new SimpleCsvFileWriter(resultsLog, SimpleCsvFileWriter.DEFAULT_COLUMN_SEPARATOR);
      metricsService =
          ThreadedQueuedMetricsService.newInstanceUsingBlockingBoundedQueue(
              timeSource,
              errorReporter,
              configuration.timeUnit(),
              ThreadedQueuedMetricsService.DEFAULT_HIGHEST_EXPECTED_RUNTIME_DURATION_AS_NANO,
              csvResultsLogWriter,
              workload.operationTypeToClassMapping(),
              LOGGING_SERVICE_FACTORY);

      int boundedQueueSize = DefaultQueues.DEFAULT_BOUND_1000;
      WorkloadRunner runner =
          new WorkloadRunner(
              timeSource,
              db,
              workloadStreams,
              metricsService,
              errorReporter,
              completionTimeService,
              controlService.loggingServiceFactory(),
              controlService.configuration().threadCount(),
              controlService.configuration().statusDisplayIntervalAsSeconds(),
              controlService.configuration().spinnerSleepDurationAsMilli(),
              controlService.configuration().ignoreScheduledStartTimes(),
              boundedQueueSize);

      runner.getFuture().get();

      WorkloadResultsSnapshot workloadResults = metricsService.getWriter().results();
      SimpleDetailedWorkloadMetricsFormatter metricsFormatter =
          new SimpleDetailedWorkloadMetricsFormatter();

      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          errorReporter.errorEncountered(),
          is(false));
      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          workloadResults.latestFinishTimeAsMilli() >= workloadResults.startTimeAsMilli(),
          is(true));
      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          workloadResults.totalOperationCount(),
          is(operationCount));

      WorkloadResultsSnapshot workloadResultsFromJson =
          WorkloadResultsSnapshot.fromJson(workloadResults.toJson());

      assertThat(errorReporter.toString(), workloadResults, equalTo(workloadResultsFromJson));
      assertThat(
          errorReporter.toString(),
          workloadResults.toJson(),
          equalTo(workloadResultsFromJson.toJson()));

      csvResultsLogWriter.close();
      SimpleCsvFileReader csvResultsLogReader =
          new SimpleCsvFileReader(
              resultsLog, SimpleCsvFileReader.DEFAULT_COLUMN_SEPARATOR_REGEX_STRING);
      assertThat(
          (long) Iterators.size(csvResultsLogReader),
          is(configuration.operationCount())); // NOT + 1 because I didn't add csv headers
      csvResultsLogReader.close();

      operationCount = metricsService.getWriter().results().totalOperationCount();
      double operationsPerSecond =
          Math.round(
              ((double) operationCount / workloadResults.totalRunDurationAsNano())
                  * ONE_SECOND_AS_NANO);
      double microSecondPerOperation =
          (double) TimeUnit.NANOSECONDS.toMicros(workloadResults.totalRunDurationAsNano())
              / operationCount;
      System.out.println(
          format(
              "[%s threads] Completed %s operations in %s = %s op/sec = 1 op/%s us",
              threadCount,
              numberFormatter.format(operationCount),
              TEMPORAL_UTIL.nanoDurationToString(workloadResults.totalRunDurationAsNano()),
              doubleNumberFormatter.format(operationsPerSecond),
              doubleNumberFormatter.format(microSecondPerOperation)));
    } finally {
      System.out.println(errorReporter.toString());
      if (null != controlService) {
        controlService.shutdown();
      }
      if (null != db) {
        db.close();
      }
      if (null != workload) {
        workload.close();
      }
      if (null != metricsService) {
        metricsService.shutdown();
      }
      if (null != completionTimeService) {
        completionTimeService.shutdown();
      }
    }
  }
Exemplo n.º 14
0
 @Override
 public int size() {
   return Iterators.size(iterator());
 }
Exemplo n.º 15
0
 @Override
 public void finish() {
   Iterators.size(pages);
 }
  @Test
  public void dataWasReplicatedToThePeer() throws Exception {
    MiniAccumuloConfigImpl peerCfg =
        new MiniAccumuloConfigImpl(
            createTestDir(
                this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
            ROOT_PASSWORD);
    peerCfg.setNumTservers(1);
    peerCfg.setInstanceName("peer");
    updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
    peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
    MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);

    peerCluster.start();

    try {
      final Connector connMaster = getConnector();
      final Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));

      ReplicationTable.setOnline(connMaster);

      String peerUserName = "******", peerPassword = "******";

      String peerClusterName = "peer";

      connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));

      connMaster
          .instanceOperations()
          .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
      connMaster
          .instanceOperations()
          .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);

      // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
      connMaster
          .instanceOperations()
          .setProperty(
              Property.REPLICATION_PEERS.getKey() + peerClusterName,
              ReplicaSystemFactory.getPeerConfigurationValue(
                  AccumuloReplicaSystem.class,
                  AccumuloReplicaSystem.buildConfiguration(
                      peerCluster.getInstanceName(), peerCluster.getZooKeepers())));

      final String masterTable = "master", peerTable = "peer";

      connMaster.tableOperations().create(masterTable);
      String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
      Assert.assertNotNull(masterTableId);

      connPeer.tableOperations().create(peerTable);
      String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
      Assert.assertNotNull(peerTableId);

      connPeer
          .securityOperations()
          .grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);

      // Replicate this table to the peerClusterName in a table with the peerTableId table id
      connMaster
          .tableOperations()
          .setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
      connMaster
          .tableOperations()
          .setProperty(
              masterTable,
              Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName,
              peerTableId);

      // Wait for zookeeper updates (configuration) to propagate
      sleepUninterruptibly(3, TimeUnit.SECONDS);

      // Write some data to table1
      BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
      for (int rows = 0; rows < 5000; rows++) {
        Mutation m = new Mutation(Integer.toString(rows));
        for (int cols = 0; cols < 100; cols++) {
          String value = Integer.toString(cols);
          m.put(value, "", value);
        }
        bw.addMutation(m);
      }

      bw.close();

      log.info("Wrote all data to master cluster");

      final Set<String> filesNeedingReplication =
          connMaster.replicationOperations().referencedFiles(masterTable);

      for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
        cluster.killProcess(ServerType.TABLET_SERVER, proc);
      }
      cluster.exec(TabletServer.class);

      log.info("TabletServer restarted");
      Iterators.size(ReplicationTable.getScanner(connMaster).iterator());
      log.info("TabletServer is online");

      log.info("");
      log.info("Fetching metadata records:");
      for (Entry<Key, Value> kv :
          connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
          log.info(
              kv.getKey().toStringNoTruncate()
                  + " "
                  + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
        } else {
          log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
        }
      }

      log.info("");
      log.info("Fetching replication records:");
      for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) {
        log.info(
            kv.getKey().toStringNoTruncate()
                + " "
                + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
      }

      Future<Boolean> future =
          executor.submit(
              new Callable<Boolean>() {

                @Override
                public Boolean call() throws Exception {
                  connMaster.replicationOperations().drain(masterTable, filesNeedingReplication);
                  log.info("Drain completed");
                  return true;
                }
              });

      long timeoutSeconds = timeoutFactor * 30;
      try {
        future.get(timeoutSeconds, TimeUnit.SECONDS);
      } catch (TimeoutException e) {
        future.cancel(true);
        Assert.fail("Drain did not finish within " + timeoutSeconds + " seconds");
      }

      log.info("drain completed");

      log.info("");
      log.info("Fetching metadata records:");
      for (Entry<Key, Value> kv :
          connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
          log.info(
              kv.getKey().toStringNoTruncate()
                  + " "
                  + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
        } else {
          log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
        }
      }

      log.info("");
      log.info("Fetching replication records:");
      for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) {
        log.info(
            kv.getKey().toStringNoTruncate()
                + " "
                + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
      }

      Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY),
          peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
      Iterator<Entry<Key, Value>> masterIter = master.iterator(), peerIter = peer.iterator();
      Entry<Key, Value> masterEntry = null, peerEntry = null;
      while (masterIter.hasNext() && peerIter.hasNext()) {
        masterEntry = masterIter.next();
        peerEntry = peerIter.next();
        Assert.assertEquals(
            masterEntry.getKey() + " was not equal to " + peerEntry.getKey(),
            0,
            masterEntry
                .getKey()
                .compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
        Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
      }

      log.info("Last master entry: " + masterEntry);
      log.info("Last peer entry: " + peerEntry);

      Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
      Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
    } finally {
      peerCluster.stop();
    }
  }
  @Test
  public void dataReplicatedToCorrectTable() throws Exception {
    MiniAccumuloConfigImpl peerCfg =
        new MiniAccumuloConfigImpl(
            createTestDir(
                this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
            ROOT_PASSWORD);
    peerCfg.setNumTservers(1);
    peerCfg.setInstanceName("peer");
    updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
    peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
    MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);

    peer1Cluster.start();

    try {
      Connector connMaster = getConnector();
      Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));

      String peerClusterName = "peer";
      String peerUserName = "******", peerPassword = "******";

      // Create local user
      connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));

      connMaster
          .instanceOperations()
          .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
      connMaster
          .instanceOperations()
          .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);

      // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
      connMaster
          .instanceOperations()
          .setProperty(
              Property.REPLICATION_PEERS.getKey() + peerClusterName,
              ReplicaSystemFactory.getPeerConfigurationValue(
                  AccumuloReplicaSystem.class,
                  AccumuloReplicaSystem.buildConfiguration(
                      peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));

      String masterTable1 = "master1",
          peerTable1 = "peer1",
          masterTable2 = "master2",
          peerTable2 = "peer2";

      // Create tables
      connMaster.tableOperations().create(masterTable1);
      String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
      Assert.assertNotNull(masterTableId1);

      connMaster.tableOperations().create(masterTable2);
      String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
      Assert.assertNotNull(masterTableId2);

      connPeer.tableOperations().create(peerTable1);
      String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
      Assert.assertNotNull(peerTableId1);

      connPeer.tableOperations().create(peerTable2);
      String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
      Assert.assertNotNull(peerTableId2);

      // Grant write permission
      connPeer
          .securityOperations()
          .grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
      connPeer
          .securityOperations()
          .grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);

      // Replicate this table to the peerClusterName in a table with the peerTableId table id
      connMaster
          .tableOperations()
          .setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
      connMaster
          .tableOperations()
          .setProperty(
              masterTable1,
              Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName,
              peerTableId1);

      connMaster
          .tableOperations()
          .setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
      connMaster
          .tableOperations()
          .setProperty(
              masterTable2,
              Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName,
              peerTableId2);

      // Wait for zookeeper updates (configuration) to propogate
      sleepUninterruptibly(3, TimeUnit.SECONDS);

      // Write some data to table1
      BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
      long masterTable1Records = 0l;
      for (int rows = 0; rows < 2500; rows++) {
        Mutation m = new Mutation(masterTable1 + rows);
        for (int cols = 0; cols < 100; cols++) {
          String value = Integer.toString(cols);
          m.put(value, "", value);
          masterTable1Records++;
        }
        bw.addMutation(m);
      }

      bw.close();

      // Write some data to table2
      bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
      long masterTable2Records = 0l;
      for (int rows = 0; rows < 2500; rows++) {
        Mutation m = new Mutation(masterTable2 + rows);
        for (int cols = 0; cols < 100; cols++) {
          String value = Integer.toString(cols);
          m.put(value, "", value);
          masterTable2Records++;
        }
        bw.addMutation(m);
      }

      bw.close();

      log.info("Wrote all data to master cluster");

      Set<String> filesFor1 = connMaster.replicationOperations().referencedFiles(masterTable1),
          filesFor2 = connMaster.replicationOperations().referencedFiles(masterTable2);

      while (!ReplicationTable.isOnline(connMaster)) {
        Thread.sleep(500);
      }

      // Restart the tserver to force a close on the WAL
      for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
        cluster.killProcess(ServerType.TABLET_SERVER, proc);
      }
      cluster.exec(TabletServer.class);

      log.info("Restarted the tserver");

      // Read the data -- the tserver is back up and running
      Iterators.size(connMaster.createScanner(masterTable1, Authorizations.EMPTY).iterator());

      // Wait for both tables to be replicated
      log.info("Waiting for {} for {}", filesFor1, masterTable1);
      connMaster.replicationOperations().drain(masterTable1, filesFor1);

      log.info("Waiting for {} for {}", filesFor2, masterTable2);
      connMaster.replicationOperations().drain(masterTable2, filesFor2);

      long countTable = 0l;
      for (int i = 0; i < 5; i++) {
        countTable = 0l;
        for (Entry<Key, Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
          countTable++;
          Assert.assertTrue(
              "Found unexpected key-value"
                  + entry.getKey().toStringNoTruncate()
                  + " "
                  + entry.getValue(),
              entry.getKey().getRow().toString().startsWith(masterTable1));
        }

        log.info("Found {} records in {}", countTable, peerTable1);

        if (masterTable1Records != countTable) {
          log.warn(
              "Did not find {} expected records in {}, only found {}",
              masterTable1Records,
              peerTable1,
              countTable);
        }
      }

      Assert.assertEquals(masterTable1Records, countTable);

      for (int i = 0; i < 5; i++) {
        countTable = 0l;
        for (Entry<Key, Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
          countTable++;
          Assert.assertTrue(
              "Found unexpected key-value"
                  + entry.getKey().toStringNoTruncate()
                  + " "
                  + entry.getValue(),
              entry.getKey().getRow().toString().startsWith(masterTable2));
        }

        log.info("Found {} records in {}", countTable, peerTable2);

        if (masterTable2Records != countTable) {
          log.warn(
              "Did not find {} expected records in {}, only found {}",
              masterTable2Records,
              peerTable2,
              countTable);
        }
      }

      Assert.assertEquals(masterTable2Records, countTable);

    } finally {
      peer1Cluster.stop();
    }
  }
  @Test
  public void dataWasReplicatedToThePeerWithoutDrain() throws Exception {
    MiniAccumuloConfigImpl peerCfg =
        new MiniAccumuloConfigImpl(
            createTestDir(
                this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
            ROOT_PASSWORD);
    peerCfg.setNumTservers(1);
    peerCfg.setInstanceName("peer");
    updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
    peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
    MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);

    peerCluster.start();

    Connector connMaster = getConnector();
    Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));

    String peerUserName = "******";
    String peerPassword = "******";

    // Create a user on the peer for replication to use
    connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));

    String peerClusterName = "peer";

    // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
    connMaster
        .instanceOperations()
        .setProperty(
            Property.REPLICATION_PEERS.getKey() + peerClusterName,
            ReplicaSystemFactory.getPeerConfigurationValue(
                AccumuloReplicaSystem.class,
                AccumuloReplicaSystem.buildConfiguration(
                    peerCluster.getInstanceName(), peerCluster.getZooKeepers())));

    // Configure the credentials we should use to authenticate ourselves to the peer for replication
    connMaster
        .instanceOperations()
        .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
    connMaster
        .instanceOperations()
        .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);

    String masterTable = "master", peerTable = "peer";

    connMaster.tableOperations().create(masterTable);
    String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
    Assert.assertNotNull(masterTableId);

    connPeer.tableOperations().create(peerTable);
    String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
    Assert.assertNotNull(peerTableId);

    // Give our replication user the ability to write to the table
    connPeer
        .securityOperations()
        .grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);

    // Replicate this table to the peerClusterName in a table with the peerTableId table id
    connMaster
        .tableOperations()
        .setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
    connMaster
        .tableOperations()
        .setProperty(
            masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);

    // Write some data to table1
    BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
    for (int rows = 0; rows < 5000; rows++) {
      Mutation m = new Mutation(Integer.toString(rows));
      for (int cols = 0; cols < 100; cols++) {
        String value = Integer.toString(cols);
        m.put(value, "", value);
      }
      bw.addMutation(m);
    }

    bw.close();

    log.info("Wrote all data to master cluster");

    Set<String> files = connMaster.replicationOperations().referencedFiles(masterTable);
    for (String s : files) {
      log.info("Found referenced file for " + masterTable + ": " + s);
    }

    for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
      cluster.killProcess(ServerType.TABLET_SERVER, proc);
    }

    cluster.exec(TabletServer.class);

    Iterators.size(connMaster.createScanner(masterTable, Authorizations.EMPTY).iterator());

    for (Entry<Key, Value> kv :
        connMaster.createScanner(ReplicationTable.NAME, Authorizations.EMPTY)) {
      log.debug(
          kv.getKey().toStringNoTruncate()
              + " "
              + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
    }

    connMaster.replicationOperations().drain(masterTable, files);

    Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY),
        peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
    Iterator<Entry<Key, Value>> masterIter = master.iterator(), peerIter = peer.iterator();
    Assert.assertTrue("No data in master table", masterIter.hasNext());
    Assert.assertTrue("No data in peer table", peerIter.hasNext());
    while (masterIter.hasNext() && peerIter.hasNext()) {
      Entry<Key, Value> masterEntry = masterIter.next(), peerEntry = peerIter.next();
      Assert.assertEquals(
          peerEntry.getKey() + " was not equal to " + peerEntry.getKey(),
          0,
          masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
      Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
    }

    Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
    Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());

    peerCluster.stop();
  }
Exemplo n.º 19
0
  @Test
  public void testSwap() throws Exception {
    final DataSchema schema =
        new DataSchema(
            "test",
            null,
            new AggregatorFactory[] {new CountAggregatorFactory("rows")},
            new UniformGranularitySpec(Granularity.HOUR, QueryGranularities.MINUTE, null),
            new DefaultObjectMapper());

    final Interval interval = new Interval("2013-01-01/2013-01-02");
    final String version = new DateTime().toString();
    RealtimeTuningConfig tuningConfig =
        new RealtimeTuningConfig(
            100,
            new Period("P1Y"),
            null,
            null,
            null,
            null,
            null,
            null,
            null,
            null,
            0,
            0,
            null,
            null);
    final Sink sink =
        new Sink(
            interval,
            schema,
            tuningConfig.getShardSpec(),
            version,
            tuningConfig.getMaxRowsInMemory(),
            tuningConfig.isReportParseExceptions());

    sink.add(
        new InputRow() {
          @Override
          public List<String> getDimensions() {
            return Lists.newArrayList();
          }

          @Override
          public long getTimestampFromEpoch() {
            return new DateTime("2013-01-01").getMillis();
          }

          @Override
          public DateTime getTimestamp() {
            return new DateTime("2013-01-01");
          }

          @Override
          public List<String> getDimension(String dimension) {
            return Lists.newArrayList();
          }

          @Override
          public float getFloatMetric(String metric) {
            return 0;
          }

          @Override
          public long getLongMetric(String metric) {
            return 0L;
          }

          @Override
          public Object getRaw(String dimension) {
            return null;
          }

          @Override
          public int compareTo(Row o) {
            return 0;
          }
        });

    FireHydrant currHydrant = sink.getCurrHydrant();
    Assert.assertEquals(new Interval("2013-01-01/PT1M"), currHydrant.getIndex().getInterval());

    FireHydrant swapHydrant = sink.swap();

    sink.add(
        new InputRow() {
          @Override
          public List<String> getDimensions() {
            return Lists.newArrayList();
          }

          @Override
          public long getTimestampFromEpoch() {
            return new DateTime("2013-01-01").getMillis();
          }

          @Override
          public DateTime getTimestamp() {
            return new DateTime("2013-01-01");
          }

          @Override
          public List<String> getDimension(String dimension) {
            return Lists.newArrayList();
          }

          @Override
          public float getFloatMetric(String metric) {
            return 0;
          }

          @Override
          public long getLongMetric(String metric) {
            return 0L;
          }

          @Override
          public Object getRaw(String dimension) {
            return null;
          }

          @Override
          public int compareTo(Row o) {
            return 0;
          }
        });

    Assert.assertEquals(currHydrant, swapHydrant);
    Assert.assertNotSame(currHydrant, sink.getCurrHydrant());
    Assert.assertEquals(
        new Interval("2013-01-01/PT1M"), sink.getCurrHydrant().getIndex().getInterval());

    Assert.assertEquals(2, Iterators.size(sink.iterator()));
  }
Exemplo n.º 20
0
 /** Returns the number of elements in {@code iterable}. */
 public static int size(Iterable<?> iterable) {
   return (iterable instanceof Collection)
       ? ((Collection<?>) iterable).size()
       : Iterators.size(iterable.iterator());
 }