/**
   * Create the completion node for the snapshot identified by the txnId. It assumes that all hosts
   * will race to call this, so it doesn't fail if the node already exists.
   *
   * @param nonce Nonce of the snapshot
   * @param txnId
   * @param hostId The local host ID
   * @param isTruncation Whether or not this is a truncation snapshot
   * @param truncReqId Optional unique ID fed back to the monitor for identification
   * @return true if the node is created successfully, false if the node already exists.
   */
  public static ZKUtil.StringCallback createSnapshotCompletionNode(
      String path, String nonce, long txnId, boolean isTruncation, String truncReqId) {
    if (!(txnId > 0)) {
      VoltDB.crashGlobalVoltDB("Txnid must be greather than 0", true, null);
    }

    byte nodeBytes[] = null;
    try {
      JSONStringer stringer = new JSONStringer();
      stringer.object();
      stringer.key("txnId").value(txnId);
      stringer.key("isTruncation").value(isTruncation);
      stringer.key("didSucceed").value(false);
      stringer.key("hostCount").value(-1);
      stringer.key("path").value(path);
      stringer.key("nonce").value(nonce);
      stringer.key("truncReqId").value(truncReqId);
      stringer.key("exportSequenceNumbers").object().endObject();
      stringer.endObject();
      JSONObject jsonObj = new JSONObject(stringer.toString());
      nodeBytes = jsonObj.toString(4).getBytes(Charsets.UTF_8);
    } catch (Exception e) {
      VoltDB.crashLocalVoltDB("Error serializing snapshot completion node JSON", true, e);
    }

    ZKUtil.StringCallback cb = new ZKUtil.StringCallback();
    final String snapshotPath = VoltZK.completed_snapshots + "/" + txnId;
    VoltDB.instance()
        .getHostMessenger()
        .getZK()
        .create(snapshotPath, nodeBytes, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, cb, null);

    return cb;
  }
  ProcedureRunner(
      VoltProcedure procedure,
      SiteProcedureConnection site,
      SystemProcedureExecutionContext sysprocContext,
      Procedure catProc,
      CatalogSpecificPlanner csp) {
    assert (m_inputCRC.getValue() == 0L);

    if (procedure instanceof StmtProcedure) {
      m_procedureName = catProc.getTypeName().intern();
    } else {
      m_procedureName = procedure.getClass().getSimpleName();
    }
    m_procedure = procedure;
    m_isSysProc = procedure instanceof VoltSystemProcedure;
    m_catProc = catProc;
    m_site = site;
    m_systemProcedureContext = sysprocContext;
    m_csp = csp;

    m_procedure.init(this);

    m_statsCollector =
        new ProcedureStatsCollector(
            m_site.getCorrespondingSiteId(), m_site.getCorrespondingPartitionId(), m_catProc);
    VoltDB.instance()
        .getStatsAgent()
        .registerStatsSource(
            SysProcSelector.PROCEDURE, site.getCorrespondingSiteId(), m_statsCollector);

    reflect();
  }
Exemple #3
0
 @Override
 public void run() {
   try {
     m_rvdb
         .getAsyncCompilerAgent()
         .createMailbox(
             VoltDB.instance().getHostMessenger(),
             m_rvdb
                 .getHostMessenger()
                 .getHSIdForLocalSite(HostMessenger.ASYNC_COMPILER_SITE_ID));
   } catch (Exception e) {
     hostLog.fatal(null, e);
     System.exit(-1);
   }
 }
  @Override
  public ListenableFuture<?> write(final Callable<BBContainer> tupleData, int tableId) {
    final ListenableFuture<BBContainer> computedData =
        VoltDB.instance().getComputationService().submit(tupleData);

    return m_es.submit(
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            try {
              final BBContainer data = computedData.get();
              /*
               * If a filter nulled out the buffer do nothing.
               */
              if (data == null) return null;
              if (m_writeFailed) {
                data.discard();
                return null;
              }
              try {
                int totalWritten = 0;

                final ByteBuffer dataBuf = data.b();
                DefaultSnapshotDataTarget.enforceSnapshotRateLimit(dataBuf.remaining());

                while (dataBuf.hasRemaining()) {
                  int written = m_fc.write(dataBuf);
                  if (written > 0) {
                    m_bytesWritten += written;
                    totalWritten += written;
                  }
                }
                if (m_bytesSinceLastSync.addAndGet(totalWritten) > m_bytesAllowedBeforeSync) {
                  m_fc.force(false);
                  m_bytesSinceLastSync.set(0);
                }
              } finally {
                data.discard();
              }
            } catch (Throwable t) {
              m_writeException = t;
              m_writeFailed = true;
              throw Throwables.propagate(t);
            }
            return null;
          }
        });
  }
 @Test
 public void testUpdateCatalog() throws IOException {
   // only makes sense in pro (sysproc suite has a complementary test for community)
   if (VoltDB.instance().getConfig().m_isEnterprise) {
     String catalogHex = Encoder.hexEncode("blah");
     ByteBuffer msg = createMsg("@UpdateApplicationCatalog", catalogHex, "blah");
     ClientResponseImpl resp = m_ci.handleRead(msg, m_handler, m_cxn);
     assertNull(resp);
     ArgumentCaptor<LocalObjectMessage> captor = ArgumentCaptor.forClass(LocalObjectMessage.class);
     verify(m_messenger)
         .send(
             eq(32L), // A fixed number set in setUpOnce()
             captor.capture());
     assertTrue(captor.getValue().payload instanceof CatalogChangeWork);
   }
 }
  public void testRejoinPropogateAdminMode() throws Exception {
    // Reset the VoltFile prefix that may have been set by previous tests in this suite
    org.voltdb.utils.VoltFile.resetSubrootForThisProcess();
    VoltProjectBuilder builder = getBuilderForTest();
    builder.setSecurityEnabled(true);

    LocalCluster cluster =
        new LocalCluster("rejoin.jar", 2, 3, 1, BackendTarget.NATIVE_EE_JNI, true);
    boolean success = cluster.compileWithAdminMode(builder, 9998, false);
    assertTrue(success);
    MiscUtils.copyFile(
        builder.getPathToDeployment(), Configuration.getPathToCatalogForTest("rejoin.xml"));
    cluster.setHasLocalServer(false);

    cluster.startUp();

    ClientResponse response;
    Client client;

    client = ClientFactory.createClient(m_cconfig);
    client.createConnection("localhost", 9997);

    response = client.callProcedure("@Pause");
    assertEquals(ClientResponse.SUCCESS, response.getStatus());
    client.close();

    cluster.shutDownSingleHost(0);
    Thread.sleep(100);

    VoltDB.Configuration config = new VoltDB.Configuration();
    config.m_pathToCatalog = Configuration.getPathToCatalogForTest("rejoin.jar");
    config.m_pathToDeployment = Configuration.getPathToCatalogForTest("rejoin.xml");
    config.m_rejoinToHostAndPort = m_username + ":" + m_password + "@localhost:9996";
    config.m_isRejoinTest = true;
    ServerThread localServer = new ServerThread(config);

    localServer.start();
    localServer.waitForInitialization();

    Thread.sleep(1000);

    assertTrue(VoltDB.instance().getMode() == OperationMode.PAUSED);

    localServer.shutdown();
    cluster.shutDown();
  }
  /**
   * Once participating host count is set, SnapshotCompletionMonitor can check this ZK node to
   * determine whether the snapshot has finished or not.
   *
   * <p>This should only be called when all participants have responded. It is possible that some
   * hosts finish taking snapshot before the coordinator logs the participating host count. In this
   * case, the host count would have been decremented multiple times already. To make sure finished
   * hosts are logged correctly, this method adds participating host count + 1 to the current host
   * count.
   *
   * @param txnId The snapshot txnId
   * @param participantCount The number of hosts participating in this snapshot
   */
  public static void logParticipatingHostCount(long txnId, int participantCount) {
    ZooKeeper zk = VoltDB.instance().getHostMessenger().getZK();
    final String snapshotPath = VoltZK.completed_snapshots + "/" + txnId;

    boolean success = false;
    while (!success) {
      Stat stat = new Stat();
      byte data[] = null;
      try {
        data = zk.getData(snapshotPath, false, stat);
      } catch (KeeperException e) {
        if (e.code() == KeeperException.Code.NONODE) {
          // If snapshot creation failed for some reason, the node won't exist. ignore
          return;
        }
        VoltDB.crashLocalVoltDB("Failed to get snapshot completion node", true, e);
      } catch (InterruptedException e) {
        VoltDB.crashLocalVoltDB("Interrupted getting snapshot completion node", true, e);
      }
      if (data == null) {
        VoltDB.crashLocalVoltDB("Data should not be null if the node exists", false, null);
      }

      try {
        JSONObject jsonObj = new JSONObject(new String(data, Charsets.UTF_8));
        if (jsonObj.getLong("txnId") != txnId) {
          VoltDB.crashLocalVoltDB("TxnId should match", false, null);
        }

        int hostCount = jsonObj.getInt("hostCount");
        // +1 because hostCount was initialized to -1
        jsonObj.put("hostCount", hostCount + participantCount + 1);
        zk.setData(snapshotPath, jsonObj.toString(4).getBytes(Charsets.UTF_8), stat.getVersion());
      } catch (KeeperException.BadVersionException e) {
        continue;
      } catch (Exception e) {
        VoltDB.crashLocalVoltDB("This ZK call should never fail", true, e);
      }

      success = true;
    }
  }
  /**
   * Load the full subclass if it should, otherwise load the noop stub.
   *
   * @param partitionId partition id
   * @param overflowDir
   * @return Instance of PartitionDRGateway
   */
  public static PartitionDRGateway getInstance(
      int partitionId, NodeDRGateway nodeGateway, boolean isRejoin) {
    final VoltDBInterface vdb = VoltDB.instance();
    LicenseApi api = vdb.getLicenseApi();
    final boolean licensedToDR = api.isDrReplicationAllowed();

    // if this is a primary cluster in a DR-enabled scenario
    // try to load the real version of this class
    PartitionDRGateway pdrg = null;
    if (licensedToDR && nodeGateway != null) {
      pdrg = tryToLoadProVersion();
    }
    if (pdrg == null) {
      pdrg = new PartitionDRGateway();
    }

    // init the instance and return
    try {
      pdrg.init(partitionId, nodeGateway, isRejoin);
    } catch (IOException e) {
      VoltDB.crashLocalVoltDB(e.getMessage(), false, e);
    }
    return pdrg;
  }
Exemple #9
0
  @SuppressWarnings("deprecation")
  @Override
  public void setUp() throws IOException, InterruptedException {
    VoltDB.instance().readBuildInfo("Test");

    // compile a catalog
    String testDir = BuildDirectoryUtils.getBuildDirectoryPath();
    String catalogJar = testDir + File.separator + JAR;

    TPCCProjectBuilder pb = new TPCCProjectBuilder();
    pb.addDefaultSchema();
    pb.addDefaultPartitioning();
    pb.addProcedures(MultiSiteSelect.class, InsertNewOrder.class);

    pb.compile(catalogJar, 2, 0);

    // load a catalog
    byte[] bytes = CatalogUtil.toBytes(new File(catalogJar));
    String serializedCatalog = CatalogUtil.loadCatalogFromJar(bytes, null);

    // create the catalog (that will be passed to the ClientInterface
    catalog = new Catalog();
    catalog.execute(serializedCatalog);

    // update the catalog with the data from the deployment file
    String pathToDeployment = pb.getPathToDeployment();
    assertTrue(CatalogUtil.compileDeploymentAndGetCRC(catalog, pathToDeployment, true) >= 0);

    cluster = catalog.getClusters().get("cluster");
    CatalogMap<Procedure> procedures = cluster.getDatabases().get("database").getProcedures();
    Procedure insertProc = procedures.get("InsertNewOrder");
    assert (insertProc != null);
    selectProc = procedures.get("MultiSiteSelect");
    assert (selectProc != null);

    // Each EE needs its own thread for correct initialization.
    final AtomicReference<ExecutionEngine> site1Reference = new AtomicReference<ExecutionEngine>();
    final byte configBytes[] = LegacyHashinator.getConfigureBytes(2);
    Thread site1Thread =
        new Thread() {
          @Override
          public void run() {
            site1Reference.set(
                new ExecutionEngineJNI(
                    cluster.getRelativeIndex(),
                    1,
                    0,
                    0,
                    "",
                    100,
                    HashinatorType.LEGACY,
                    configBytes));
          }
        };
    site1Thread.start();
    site1Thread.join();

    final AtomicReference<ExecutionEngine> site2Reference = new AtomicReference<ExecutionEngine>();
    Thread site2Thread =
        new Thread() {
          @Override
          public void run() {
            site2Reference.set(
                new ExecutionEngineJNI(
                    cluster.getRelativeIndex(),
                    2,
                    1,
                    0,
                    "",
                    100,
                    HashinatorType.LEGACY,
                    configBytes));
          }
        };
    site2Thread.start();
    site2Thread.join();

    // create two EEs
    site1 = new ExecutionSite(0); // site 0
    ee1 = site1Reference.get();
    ee1.loadCatalog(0, catalog.serialize());
    site2 = new ExecutionSite(1); // site 1
    ee2 = site2Reference.get();
    ee2.loadCatalog(0, catalog.serialize());

    // cache some plan fragments
    selectStmt = selectProc.getStatements().get("selectAll");
    assert (selectStmt != null);
    int i = 0;
    // this kinda assumes the right order
    for (PlanFragment f : selectStmt.getFragments()) {
      if (i == 0) selectTopFrag = f;
      else selectBottomFrag = f;
      i++;
    }
    assert (selectTopFrag != null);
    assert (selectBottomFrag != null);

    if (selectTopFrag.getHasdependencies() == false) {
      PlanFragment temp = selectTopFrag;
      selectTopFrag = selectBottomFrag;
      selectBottomFrag = temp;
    }

    // get the insert frag
    Statement insertStmt = insertProc.getStatements().get("insert");
    assert (insertStmt != null);

    for (PlanFragment f : insertStmt.getFragments()) insertFrag = f;

    // populate plan cache
    ActivePlanRepository.clear();
    ActivePlanRepository.addFragmentForTest(
        CatalogUtil.getUniqueIdForFragment(selectBottomFrag),
        Encoder.base64Decode(selectBottomFrag.getPlannodetree()));
    ActivePlanRepository.addFragmentForTest(
        CatalogUtil.getUniqueIdForFragment(selectTopFrag),
        Encoder.base64Decode(selectTopFrag.getPlannodetree()));
    ActivePlanRepository.addFragmentForTest(
        CatalogUtil.getUniqueIdForFragment(insertFrag),
        Encoder.base64Decode(insertFrag.getPlannodetree()));

    // insert some data
    ParameterSet params = ParameterSet.fromArrayNoCopy(1L, 1L, 1L);

    VoltTable[] results =
        ee2.executePlanFragments(
            1,
            new long[] {CatalogUtil.getUniqueIdForFragment(insertFrag)},
            null,
            new ParameterSet[] {params},
            1,
            0,
            42,
            Long.MAX_VALUE);
    assert (results.length == 1);
    assert (results[0].asScalarLong() == 1L);

    params = ParameterSet.fromArrayNoCopy(2L, 2L, 2L);

    results =
        ee1.executePlanFragments(
            1,
            new long[] {CatalogUtil.getUniqueIdForFragment(insertFrag)},
            null,
            new ParameterSet[] {params},
            2,
            1,
            42,
            Long.MAX_VALUE);
    assert (results.length == 1);
    assert (results[0].asScalarLong() == 1L);
  }
  private void createSetupIv2(
      final String file_path,
      final String file_nonce,
      SnapshotFormat format,
      final long txnId,
      final Map<Integer, Long> partitionTransactionIds,
      String data,
      final SystemProcedureExecutionContext context,
      final VoltTable result,
      Map<String, Map<Integer, Pair<Long, Long>>> exportSequenceNumbers,
      SiteTracker tracker,
      HashinatorSnapshotData hashinatorData,
      long timestamp) {
    JSONObject jsData = null;
    if (data != null && !data.isEmpty()) {
      try {
        jsData = new JSONObject(data);
      } catch (JSONException e) {
        SNAP_LOG.error(String.format("JSON exception on snapshot data \"%s\".", data), e);
      }
    }

    SnapshotWritePlan plan;
    if (format == SnapshotFormat.NATIVE) {
      plan = new NativeSnapshotWritePlan();
    } else if (format == SnapshotFormat.CSV) {
      plan = new CSVSnapshotWritePlan();
    } else if (format == SnapshotFormat.STREAM) {
      plan = new StreamSnapshotWritePlan();
    } else if (format == SnapshotFormat.INDEX) {
      plan = new IndexSnapshotWritePlan();
    } else {
      throw new RuntimeException("BAD BAD BAD");
    }
    final Callable<Boolean> deferredSetup =
        plan.createSetup(
            file_path,
            file_nonce,
            txnId,
            partitionTransactionIds,
            jsData,
            context,
            result,
            exportSequenceNumbers,
            tracker,
            hashinatorData,
            timestamp);
    m_deferredSetupFuture =
        VoltDB.instance()
            .submitSnapshotIOWork(
                new DeferredSnapshotSetup(plan, deferredSetup, txnId, partitionTransactionIds));

    synchronized (m_createLock) {
      // Seems like this should be cleared out just in case
      // Log if there is actually anything to clear since it is unexpected
      if (!m_taskListsForHSIds.isEmpty()) {
        SNAP_LOG.warn("Found lingering snapshot tasks while setting up a snapshot");
      }
      m_taskListsForHSIds.clear();
      m_createSuccess.set(true);
      m_createResult.set(result);

      m_taskListsForHSIds.putAll(plan.getTaskListsForHSIds());

      // HACK HACK HACK.  If the task list is empty, this host has no work to do for
      // this snapshot.  We're going to create an empty list of tasks for one of the sites to do
      // so that we'll have a SnapshotSiteProcessor which will do the logSnapshotCompleteToZK.
      if (m_taskListsForHSIds.isEmpty()) {
        SNAP_LOG.debug(
            "Node had no snapshot work to do.  Creating a null task to drive completion.");
        m_taskListsForHSIds.put(context.getSiteId(), new ArrayDeque<SnapshotTableTask>());
      }
      SNAP_LOG.debug(
          "Planned tasks: "
              + CoreUtils.hsIdCollectionToString(plan.getTaskListsForHSIds().keySet()));
      SNAP_LOG.debug(
          "Created tasks for HSIds: "
              + CoreUtils.hsIdCollectionToString(m_taskListsForHSIds.keySet()));
    }
  }
  public void testRejoinWithExport() throws Exception {
    VoltProjectBuilder builder = getBuilderForTest();
    // builder.setTableAsExportOnly("blah", false);
    // builder.setTableAsExportOnly("blah_replicated", false);
    // builder.setTableAsExportOnly("PARTITIONED", false);
    // builder.setTableAsExportOnly("PARTITIONED_LARGE", false);
    builder.addExport(
        "org.voltdb.export.processors.RawProcessor",
        true, // enabled
        null); // authGroups (off)

    LocalCluster cluster =
        new LocalCluster("rejoin.jar", 2, 3, 1, BackendTarget.NATIVE_EE_JNI, true);
    boolean success = cluster.compile(builder);
    assertTrue(success);
    MiscUtils.copyFile(
        builder.getPathToDeployment(), Configuration.getPathToCatalogForTest("rejoin.xml"));
    cluster.setHasLocalServer(false);

    cluster.startUp();

    ClientResponse response;
    Client client;

    client = ClientFactory.createClient(m_cconfig);
    client.createConnection("localhost");

    response = client.callProcedure("InsertSinglePartition", 0);
    assertEquals(ClientResponse.SUCCESS, response.getStatus());
    response = client.callProcedure("Insert", 1);
    assertEquals(ClientResponse.SUCCESS, response.getStatus());
    response = client.callProcedure("InsertReplicated", 0);
    assertEquals(ClientResponse.SUCCESS, response.getStatus());
    client.close();

    client = ClientFactory.createClient(m_cconfig);
    client.createConnection("localhost", 21213);
    response = client.callProcedure("InsertSinglePartition", 2);
    assertEquals(ClientResponse.SUCCESS, response.getStatus());
    response = client.callProcedure("Insert", 3);
    assertEquals(ClientResponse.SUCCESS, response.getStatus());
    client.close();

    TrivialExportClient exportClient = new TrivialExportClient();
    exportClient.work();
    exportClient.work();

    Thread.sleep(4000);

    exportClient.work();

    Thread.sleep(4000);

    exportClient.work();

    cluster.shutDownSingleHost(0);
    Thread.sleep(100);

    VoltDB.Configuration config = new VoltDB.Configuration();
    config.m_pathToCatalog = Configuration.getPathToCatalogForTest("rejoin.jar");
    config.m_pathToDeployment = Configuration.getPathToCatalogForTest("rejoin.xml");
    config.m_rejoinToHostAndPort = m_username + ":" + m_password + "@localhost:21213";
    config.m_isRejoinTest = true;
    ServerThread localServer = new ServerThread(config);

    localServer.start();
    localServer.waitForInitialization();

    Thread.sleep(1000);
    while (VoltDB.instance().recovering()) {
      Thread.sleep(100);
    }

    client = ClientFactory.createClient(m_cconfig);
    client.createConnection("localhost");

    response = client.callProcedure("InsertSinglePartition", 5);
    assertEquals(ClientResponse.SUCCESS, response.getStatus());
    response = client.callProcedure("Insert", 6);
    assertEquals(ClientResponse.SUCCESS, response.getStatus());
    response = client.callProcedure("InsertReplicated", 7);
    assertEquals(ClientResponse.SUCCESS, response.getStatus());
    client.close();

    client = ClientFactory.createClient(m_cconfig);
    client.createConnection("localhost", 21213);
    response = client.callProcedure("InsertSinglePartition", 8);
    assertEquals(ClientResponse.SUCCESS, response.getStatus());
    response = client.callProcedure("Insert", 9);
    assertEquals(ClientResponse.SUCCESS, response.getStatus());
    client.close();

    exportClient = new TrivialExportClient();
    exportClient.work();

    Thread.sleep(4000);

    exportClient.work();

    Thread.sleep(4000);

    exportClient.work();

    localServer.shutdown();
    cluster.shutDown();
  }