Example #1
0
  /**
   * Create the completion node for the snapshot identified by the txnId. It assumes that all hosts
   * will race to call this, so it doesn't fail if the node already exists.
   *
   * @param nonce Nonce of the snapshot
   * @param txnId
   * @param hostId The local host ID
   * @param isTruncation Whether or not this is a truncation snapshot
   * @param truncReqId Optional unique ID fed back to the monitor for identification
   * @return true if the node is created successfully, false if the node already exists.
   */
  public static ZKUtil.StringCallback createSnapshotCompletionNode(
      String path, String nonce, long txnId, boolean isTruncation, String truncReqId) {
    if (!(txnId > 0)) {
      VoltDB.crashGlobalVoltDB("Txnid must be greather than 0", true, null);
    }

    byte nodeBytes[] = null;
    try {
      JSONStringer stringer = new JSONStringer();
      stringer.object();
      stringer.key("txnId").value(txnId);
      stringer.key("isTruncation").value(isTruncation);
      stringer.key("didSucceed").value(false);
      stringer.key("hostCount").value(-1);
      stringer.key("path").value(path);
      stringer.key("nonce").value(nonce);
      stringer.key("truncReqId").value(truncReqId);
      stringer.key("exportSequenceNumbers").object().endObject();
      stringer.endObject();
      JSONObject jsonObj = new JSONObject(stringer.toString());
      nodeBytes = jsonObj.toString(4).getBytes(Charsets.UTF_8);
    } catch (Exception e) {
      VoltDB.crashLocalVoltDB("Error serializing snapshot completion node JSON", true, e);
    }

    ZKUtil.StringCallback cb = new ZKUtil.StringCallback();
    final String snapshotPath = VoltZK.completed_snapshots + "/" + txnId;
    VoltDB.instance()
        .getHostMessenger()
        .getZK()
        .create(snapshotPath, nodeBytes, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, cb, null);

    return cb;
  }
Example #2
0
    @Override
    public void run() {
      CatalogAndIds catalogStuff = null;
      do {
        try {
          catalogStuff = CatalogUtil.getCatalogFromZK(m_rvdb.getHostMessenger().getZK());
        } catch (org.apache.zookeeper_voltpatches.KeeperException.NoNodeException e) {
        } catch (Exception e) {
          VoltDB.crashLocalVoltDB(
              "System was interrupted while waiting for a catalog.", false, null);
        }
      } while (catalogStuff == null || catalogStuff.catalogBytes.length == 0);

      String serializedCatalog = null;
      byte[] catalogJarBytes = catalogStuff.catalogBytes;
      try {
        Pair<InMemoryJarfile, String> loadResults =
            CatalogUtil.loadAndUpgradeCatalogFromJar(catalogStuff.catalogBytes);
        serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(loadResults.getFirst());
        catalogJarBytes = loadResults.getFirst().getFullJarBytes();
      } catch (IOException e) {
        VoltDB.crashLocalVoltDB("Unable to load catalog", false, e);
      }

      if ((serializedCatalog == null) || (serializedCatalog.length() == 0))
        VoltDB.crashLocalVoltDB("Catalog loading failure", false, null);

      /* N.B. node recovery requires discovering the current catalog version. */
      Catalog catalog = new Catalog();
      catalog.execute(serializedCatalog);
      serializedCatalog = null;

      // note if this fails it will print an error first
      // This is where we compile real catalog and create runtime
      // catalog context. To validate deployment we compile and create
      // a starter context which uses a placeholder catalog.
      String result = CatalogUtil.compileDeployment(catalog, m_deployment, false);
      if (result != null) {
        hostLog.fatal(result);
        VoltDB.crashLocalVoltDB(result);
      }

      try {
        m_rvdb.m_catalogContext =
            new CatalogContext(
                catalogStuff.txnId,
                catalogStuff.uniqueId,
                catalog,
                catalogJarBytes,
                // Our starter catalog has set the deployment stuff, just yoink it out for now
                m_rvdb.m_catalogContext.getDeploymentBytes(),
                catalogStuff.version,
                -1);
      } catch (Exception e) {
        VoltDB.crashLocalVoltDB("Error agreeing on starting catalog version", true, e);
      }
    }
Example #3
0
    @Override
    public void run() {
      // if I'm the leader, send out the catalog
      if (m_rvdb.m_myHostId == m_rvdb.m_hostIdWithStartupCatalog) {

        try {
          // If no catalog was supplied provide an empty one.
          if (m_rvdb.m_pathToStartupCatalog == null) {
            try {
              File emptyJarFile = CatalogUtil.createTemporaryEmptyCatalogJarFile();
              if (emptyJarFile == null) {
                VoltDB.crashLocalVoltDB("Failed to generate empty catalog.");
              }
              m_rvdb.m_pathToStartupCatalog = emptyJarFile.getAbsolutePath();
            } catch (IOException e) {
              VoltDB.crashLocalVoltDB(
                  "I/O exception while creating empty catalog jar file.", false, e);
            }
          }

          // Get the catalog bytes and byte count.
          byte[] catalogBytes = readCatalog(m_rvdb.m_pathToStartupCatalog);

          // Export needs a cluster global unique id for the initial catalog version
          long catalogUniqueId =
              UniqueIdGenerator.makeIdFromComponents(
                  System.currentTimeMillis(), 0, MpInitiator.MP_INIT_PID);
          hostLog.debug(String.format("Sending %d catalog bytes", catalogBytes.length));

          long catalogTxnId;
          catalogTxnId = TxnEgo.makeZero(MpInitiator.MP_INIT_PID).getTxnId();

          // Need to get the deployment bytes from the starter catalog context
          byte[] deploymentBytes = m_rvdb.getCatalogContext().getDeploymentBytes();

          // publish the catalog bytes to ZK
          CatalogUtil.updateCatalogToZK(
              m_rvdb.getHostMessenger().getZK(),
              0,
              catalogTxnId,
              catalogUniqueId,
              catalogBytes,
              deploymentBytes);
        } catch (IOException e) {
          VoltDB.crashGlobalVoltDB("Unable to distribute catalog.", false, e);
        } catch (org.apache.zookeeper_voltpatches.KeeperException e) {
          VoltDB.crashGlobalVoltDB("Unable to publish catalog.", false, e);
        } catch (InterruptedException e) {
          VoltDB.crashGlobalVoltDB("Interrupted while publishing catalog.", false, e);
        }
      }
    }
Example #4
0
    @Override
    public void run() {
      try {
        JSONStringer js = new JSONStringer();
        js.object();
        js.key("role").value(m_config.m_replicationRole.ordinal());
        js.key("active").value(m_rvdb.getReplicationActive());
        js.endObject();

        ZooKeeper zk = m_rvdb.getHostMessenger().getZK();
        // rejoining nodes figure out the replication role from other nodes
        if (!m_isRejoin) {
          try {
            zk.create(
                VoltZK.replicationconfig,
                js.toString().getBytes("UTF-8"),
                Ids.OPEN_ACL_UNSAFE,
                CreateMode.PERSISTENT);
          } catch (KeeperException.NodeExistsException e) {
          }
          String discoveredReplicationConfig =
              new String(zk.getData(VoltZK.replicationconfig, false, null), "UTF-8");
          JSONObject discoveredjsObj = new JSONObject(discoveredReplicationConfig);
          ReplicationRole discoveredRole =
              ReplicationRole.get((byte) discoveredjsObj.getLong("role"));
          if (!discoveredRole.equals(m_config.m_replicationRole)) {
            VoltDB.crashGlobalVoltDB(
                "Discovered replication role "
                    + discoveredRole
                    + " doesn't match locally specified replication role "
                    + m_config.m_replicationRole,
                true,
                null);
          }

          // See if we should bring the server up in WAN replication mode
          m_rvdb.setReplicationRole(discoveredRole);
        } else {
          String discoveredReplicationConfig =
              new String(zk.getData(VoltZK.replicationconfig, false, null), "UTF-8");
          JSONObject discoveredjsObj = new JSONObject(discoveredReplicationConfig);
          ReplicationRole discoveredRole =
              ReplicationRole.get((byte) discoveredjsObj.getLong("role"));
          boolean replicationActive = discoveredjsObj.getBoolean("active");
          // See if we should bring the server up in WAN replication mode
          m_rvdb.setReplicationRole(discoveredRole);
          m_rvdb.setReplicationActive(replicationActive);
        }
      } catch (Exception e) {
        VoltDB.crashGlobalVoltDB("Error discovering replication role", false, e);
      }
    }
Example #5
0
  ProcedureRunner(
      VoltProcedure procedure,
      SiteProcedureConnection site,
      SystemProcedureExecutionContext sysprocContext,
      Procedure catProc,
      CatalogSpecificPlanner csp) {
    assert (m_inputCRC.getValue() == 0L);

    if (procedure instanceof StmtProcedure) {
      m_procedureName = catProc.getTypeName().intern();
    } else {
      m_procedureName = procedure.getClass().getSimpleName();
    }
    m_procedure = procedure;
    m_isSysProc = procedure instanceof VoltSystemProcedure;
    m_catProc = catProc;
    m_site = site;
    m_systemProcedureContext = sysprocContext;
    m_csp = csp;

    m_procedure.init(this);

    m_statsCollector =
        new ProcedureStatsCollector(
            m_site.getCorrespondingSiteId(), m_site.getCorrespondingPartitionId(), m_catProc);
    VoltDB.instance()
        .getStatsAgent()
        .registerStatsSource(
            SysProcSelector.PROCEDURE, site.getCorrespondingSiteId(), m_statsCollector);

    reflect();
  }
Example #6
0
 @After
 public void tearDown() throws Exception {
   MockStatsSource.delay = 0;
   StatsAgent.OPS_COLLECTION_TIMEOUT = 60 * 1000;
   m_mvoltdb.shutdown(null);
   VoltDB.replaceVoltDBInstanceForTest(null);
 }
Example #7
0
 void shutdown() {
   try {
     m_leaderElector.shutdown();
   } catch (Exception e) {
     VoltDB.crashLocalVoltDB("Error shutting down GlobalServiceElector's LeaderElector", true, e);
   }
 }
Example #8
0
 /** Add a service to be notified if this node becomes the global leader */
 synchronized void registerService(Promotable service) {
   m_services.add(service);
   if (m_isLeader) {
     try {
       service.acceptPromotion();
     } catch (Exception e) {
       VoltDB.crashLocalVoltDB("Unable to promote global service.", true, e);
     }
   }
 }
Example #9
0
    @Override
    public void run() {

      boolean logEnabled = m_rvdb.m_catalogContext.cluster.getLogconfig().get("log").getEnabled();

      if (logEnabled) {
        if (m_config.m_isEnterprise) {
          try {
            Class<?> loggerClass =
                MiscUtils.loadProClass("org.voltdb.CommandLogImpl", "Command logging", false);
            if (loggerClass != null) {
              m_rvdb.m_commandLog = (CommandLog) loggerClass.newInstance();
            }
          } catch (InstantiationException e) {
            VoltDB.crashLocalVoltDB("Unable to instantiate command log", true, e);
          } catch (IllegalAccessException e) {
            VoltDB.crashLocalVoltDB("Unable to instantiate command log", true, e);
          }
        }
      }
    }
Example #10
0
 @Override
 public synchronized void becomeLeader() {
   hostLog.info("Host " + m_hostId + " promoted to be the global service provider");
   m_isLeader = true;
   for (Promotable service : m_services) {
     try {
       service.acceptPromotion();
     } catch (Exception e) {
       VoltDB.crashLocalVoltDB("Unable to promote global service.", true, e);
     }
   }
 }
Example #11
0
  @Test
  public void testLoader() {
    VoltDB.Configuration configuration = new VoltDB.Configuration();
    configuration.m_noLoadLibVOLTDB = true;
    MockVoltDB mockvolt = new MockVoltDB();
    VoltDB.ignoreCrash = true;
    VoltDB.replaceVoltDBInstanceForTest(mockvolt);
    mockvolt.m_noLoadLib = true;
    assertFalse(EELibraryLoader.loadExecutionEngineLibrary(false));
    assertFalse(VoltDB.wasCrashCalled);
    boolean threw = false;
    try {
      assertFalse(EELibraryLoader.loadExecutionEngineLibrary(true));
    } catch (AssertionError ae) {
      threw = true;
    }
    assertTrue(threw);
    assertTrue(VoltDB.wasCrashCalled);
    VoltDB.wasCrashCalled = false;
    VoltDB.initialize(configuration);
    assertFalse(EELibraryLoader.loadExecutionEngineLibrary(true));
    assertFalse(VoltDB.wasCrashCalled);

    // Now test SUCCESS case
    configuration = new VoltDB.Configuration();
    VoltDBInterface mockitovolt = mock(VoltDBInterface.class);
    VoltDBInterface realvolt = new RealVoltDB();
    when(mockitovolt.getEELibraryVersionString()).thenReturn(realvolt.getEELibraryVersionString());
    CatalogContext catContext = mock(CatalogContext.class);
    Cluster cluster = mock(Cluster.class);
    when(cluster.getVoltroot()).thenReturn(System.getProperty("java.io.tmpdir"));
    when(catContext.getCluster()).thenReturn(cluster);
    when(mockitovolt.getCatalogContext()).thenReturn(catContext);

    VoltDB.replaceVoltDBInstanceForTest(mockitovolt);
    VoltDB.initialize(configuration);
    assertTrue(EELibraryLoader.loadExecutionEngineLibrary(true));
  }
Example #12
0
 @Override
 public void run() {
   // Let the Export system read its configuration from the catalog.
   try {
     ExportManager.initialize(
         m_rvdb.m_myHostId,
         m_rvdb.m_catalogContext,
         m_isRejoin,
         m_rvdb.m_messenger,
         m_rvdb.m_partitionsToSitesAtStartupForExportInit);
   } catch (Throwable t) {
     VoltDB.crashLocalVoltDB("Error setting up export", true, t);
   }
 }
Example #13
0
  @Before
  public void setUp() throws Exception {
    // Set up CI with the mock objects.
    m_volt = mock(VoltDBInterface.class);
    m_statsAgent = mock(StatsAgent.class);
    m_sysinfoAgent = mock(SystemInformationAgent.class);
    m_messenger = mock(HostMessenger.class);
    m_handler = mock(ClientInputHandler.class);
    m_cartographer = mock(Cartographer.class);
    m_zk = mock(ZooKeeper.class);
    m_cxn = mock(Connection.class);

    /*
     * Setup the mock objects so that they return expected objects in CI
     * construction
     */
    VoltDB.replaceVoltDBInstanceForTest(m_volt);
    doReturn(m_statsAgent).when(m_volt).getStatsAgent();
    doReturn(m_statsAgent).when(m_volt).getOpsAgent(OpsSelector.STATISTICS);
    doReturn(m_sysinfoAgent).when(m_volt).getOpsAgent(OpsSelector.SYSTEMINFORMATION);
    doReturn(mock(SnapshotCompletionMonitor.class)).when(m_volt).getSnapshotCompletionMonitor();
    doReturn(m_messenger).when(m_volt).getHostMessenger();
    doReturn(mock(VoltNetworkPool.class)).when(m_messenger).getNetwork();
    doReturn(m_zk).when(m_messenger).getZK();
    doReturn(mock(Configuration.class)).when(m_volt).getConfig();
    doReturn(32L).when(m_messenger).getHSIdForLocalSite(HostMessenger.ASYNC_COMPILER_SITE_ID);
    doAnswer(
            new Answer<Object>() {
              @Override
              public Object answer(InvocationOnMock invocation) {
                return null;
              }
            })
        .when(m_cxn)
        .queueTask(any(Runnable.class));
    m_ci =
        spy(
            new ClientInterface(
                null,
                VoltDB.DEFAULT_PORT,
                VoltDB.DEFAULT_ADMIN_PORT,
                m_context,
                m_messenger,
                ReplicationRole.NONE,
                m_cartographer,
                m_allPartitions));
    m_ci.bindAdapter(m_cxn);

    // m_mb = m_ci.m_mailbox;
  }
Example #14
0
  Inits(RealVoltDB rvdb, int threadCount) {
    m_rvdb = rvdb;
    m_config = rvdb.m_config;
    // determine if this is a rejoining node
    // (used for license check and later the actual rejoin)
    if (m_config.m_startAction.doesRejoin()) {
      m_isRejoin = true;
    } else {
      m_isRejoin = false;
    }
    m_threadCount = threadCount;
    m_deployment = rvdb.m_catalogContext.getDeployment();

    // find all the InitWork subclasses using reflection and load them up
    Class<?>[] declaredClasses = Inits.class.getDeclaredClasses();
    for (Class<?> cls : declaredClasses) {
      // skip base classes and fake classes
      if (cls == InitWork.class) continue;
      if (cls == COMPLETION_WORK.class) continue;

      if (InitWork.class.isAssignableFrom(cls)) {
        InitWork instance = null;
        try {
          Constructor<?> constructor = cls.getDeclaredConstructor(Inits.class);
          instance = (InitWork) constructor.newInstance(this);
        } catch (Exception e) {
          VoltDB.crashLocalVoltDB("Critical error loading class " + cls.getName(), true, e);
        }
        m_jobs.put(instance.getClass(), instance);
      }
    }

    // make blockers and blockees symmetrical
    for (InitWork iw : m_jobs.values()) {
      for (Class<? extends InitWork> cls : iw.m_blockers) {
        InitWork blocker = m_jobs.get(cls);
        blocker.m_blockees.add(iw.getClass());
      }
    }

    // collect initially ready jobs
    List<Class<? extends InitWork>> toRemove = new ArrayList<Class<? extends InitWork>>();
    for (Entry<Class<? extends InitWork>, InitWork> e : m_jobs.entrySet()) {
      if (e.getValue().m_blockers.size() == 0) {
        toRemove.add(e.getKey());
        m_readyJobs.add(e.getValue());
      }
    }
  }
Example #15
0
  /**
   * Load the full subclass if it should, otherwise load the noop stub.
   *
   * @param partitionId partition id
   * @param overflowDir
   * @return Instance of PartitionDRGateway
   */
  public static PartitionDRGateway getInstance(
      int partitionId, NodeDRGateway nodeGateway, boolean isRejoin) {
    final VoltDBInterface vdb = VoltDB.instance();
    LicenseApi api = vdb.getLicenseApi();
    final boolean licensedToDR = api.isDrReplicationAllowed();

    // if this is a primary cluster in a DR-enabled scenario
    // try to load the real version of this class
    PartitionDRGateway pdrg = null;
    if (licensedToDR && nodeGateway != null) {
      pdrg = tryToLoadProVersion();
    }
    if (pdrg == null) {
      pdrg = new PartitionDRGateway();
    }

    // init the instance and return
    try {
      pdrg.init(partitionId, nodeGateway, isRejoin);
    } catch (IOException e) {
      VoltDB.crashLocalVoltDB(e.getMessage(), false, e);
    }
    return pdrg;
  }
Example #16
0
 @Override
 public void run() {
   try {
     m_rvdb
         .getAsyncCompilerAgent()
         .createMailbox(
             VoltDB.instance().getHostMessenger(),
             m_rvdb
                 .getHostMessenger()
                 .getHSIdForLocalSite(HostMessenger.ASYNC_COMPILER_SITE_ID));
   } catch (Exception e) {
     hostLog.fatal(null, e);
     System.exit(-1);
   }
 }
Example #17
0
 @Override
 public void run() {
   while (true) {
     InitWork iw = null;
     try {
       iw = m_readyJobs.take();
     } catch (InterruptedException e) {
       VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
     }
     if (iw instanceof COMPLETION_WORK) return;
     // hostLog.info("Running InitWorker: " + iw.getClass().getName());
     iw.run();
     completeInitWork(iw);
   }
 }
  @Override
  public ListenableFuture<?> write(final Callable<BBContainer> tupleData, int tableId) {
    final ListenableFuture<BBContainer> computedData =
        VoltDB.instance().getComputationService().submit(tupleData);

    return m_es.submit(
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            try {
              final BBContainer data = computedData.get();
              /*
               * If a filter nulled out the buffer do nothing.
               */
              if (data == null) return null;
              if (m_writeFailed) {
                data.discard();
                return null;
              }
              try {
                int totalWritten = 0;

                final ByteBuffer dataBuf = data.b();
                DefaultSnapshotDataTarget.enforceSnapshotRateLimit(dataBuf.remaining());

                while (dataBuf.hasRemaining()) {
                  int written = m_fc.write(dataBuf);
                  if (written > 0) {
                    m_bytesWritten += written;
                    totalWritten += written;
                  }
                }
                if (m_bytesSinceLastSync.addAndGet(totalWritten) > m_bytesAllowedBeforeSync) {
                  m_fc.force(false);
                  m_bytesSinceLastSync.set(0);
                }
              } finally {
                data.discard();
              }
            } catch (Throwable t) {
              m_writeException = t;
              m_writeFailed = true;
              throw Throwables.propagate(t);
            }
            return null;
          }
        });
  }
Example #19
0
 @Test
 public void testUpdateCatalog() throws IOException {
   // only makes sense in pro (sysproc suite has a complementary test for community)
   if (VoltDB.instance().getConfig().m_isEnterprise) {
     String catalogHex = Encoder.hexEncode("blah");
     ByteBuffer msg = createMsg("@UpdateApplicationCatalog", catalogHex, "blah");
     ClientResponseImpl resp = m_ci.handleRead(msg, m_handler, m_cxn);
     assertNull(resp);
     ArgumentCaptor<LocalObjectMessage> captor = ArgumentCaptor.forClass(LocalObjectMessage.class);
     verify(m_messenger)
         .send(
             eq(32L), // A fixed number set in setUpOnce()
             captor.capture());
     assertTrue(captor.getValue().payload instanceof CatalogChangeWork);
   }
 }
Example #20
0
  @Test
  public void testLoader() {
    final VoltDB.Configuration configuration = new VoltDB.Configuration();
    configuration.m_noLoadLibVOLTDB = true;
    MockVoltDB mockvolt = new MockVoltDB();

    VoltDB.replaceVoltDBInstanceForTest(mockvolt);

    assert (EELibraryLoader.loadExecutionEngineLibrary(false));
    //        assertEquals(0, mockvolt.getCrashCount());
    assert (EELibraryLoader.loadExecutionEngineLibrary(true));
    //        assertEquals(1, mockvolt.getCrashCount());
    //        VoltDB.initialize(configuration);
    //        assertFalse(EELibraryLoader.loadExecutionEngineLibrary(true));
    //        assertEquals(1, mockvolt.getCrashCount());
  }
Example #21
0
    @Override
    public void run() {
      // If running commercial code (of value) and not rejoining, enforce licensing.
      // Make the leader the only license enforcer.
      boolean isLeader = (m_rvdb.m_myHostId == 0);
      if (m_config.m_isEnterprise && isLeader && !m_isRejoin) {

        if (!MiscUtils.validateLicense(
            m_rvdb.getLicenseApi(),
            m_deployment.getCluster().getHostcount(),
            m_rvdb.getReplicationRole())) {
          // validateLicense logs. Exit call is here for testability.
          VoltDB.crashGlobalVoltDB("VoltDB license constraints are not met.", false, null);
        }
      }
    }
 public ParameterSet getParams() {
   params.run();
   try {
     return params.get();
   } catch (InterruptedException e) {
     VoltDB.crashLocalVoltDB("Interrupted while deserializing a parameter set", false, e);
   } catch (ExecutionException e) {
     // Don't rethrow Errors as RuntimeExceptions because we will eat their
     // delicious goodness later
     if (e.getCause() != null && e.getCause() instanceof Error) {
       throw (Error) e.getCause();
     }
     throw new RuntimeException(e);
   }
   return null;
 }
Example #23
0
  public void testRejoinPropogateAdminMode() throws Exception {
    // Reset the VoltFile prefix that may have been set by previous tests in this suite
    org.voltdb.utils.VoltFile.resetSubrootForThisProcess();
    VoltProjectBuilder builder = getBuilderForTest();
    builder.setSecurityEnabled(true);

    LocalCluster cluster =
        new LocalCluster("rejoin.jar", 2, 3, 1, BackendTarget.NATIVE_EE_JNI, true);
    boolean success = cluster.compileWithAdminMode(builder, 9998, false);
    assertTrue(success);
    MiscUtils.copyFile(
        builder.getPathToDeployment(), Configuration.getPathToCatalogForTest("rejoin.xml"));
    cluster.setHasLocalServer(false);

    cluster.startUp();

    ClientResponse response;
    Client client;

    client = ClientFactory.createClient(m_cconfig);
    client.createConnection("localhost", 9997);

    response = client.callProcedure("@Pause");
    assertEquals(ClientResponse.SUCCESS, response.getStatus());
    client.close();

    cluster.shutDownSingleHost(0);
    Thread.sleep(100);

    VoltDB.Configuration config = new VoltDB.Configuration();
    config.m_pathToCatalog = Configuration.getPathToCatalogForTest("rejoin.jar");
    config.m_pathToDeployment = Configuration.getPathToCatalogForTest("rejoin.xml");
    config.m_rejoinToHostAndPort = m_username + ":" + m_password + "@localhost:9996";
    config.m_isRejoinTest = true;
    ServerThread localServer = new ServerThread(config);

    localServer.start();
    localServer.waitForInitialization();

    Thread.sleep(1000);

    assertTrue(VoltDB.instance().getMode() == OperationMode.PAUSED);

    localServer.shutdown();
    cluster.shutDown();
  }
Example #24
0
 public static HsqlBackend initializeHSQLBackend(long siteId, CatalogContext context) {
   synchronized (backendLock) {
     if (m_backend == null) {
       try {
         m_backend = new HsqlBackend(siteId);
         final String binDDL = context.database.getSchema();
         final String ddl = Encoder.decodeBase64AndDecompress(binDDL);
         final String[] commands = ddl.split("\n");
         for (String command : commands) {
           String decoded_cmd = Encoder.hexDecodeToString(command);
           decoded_cmd = decoded_cmd.trim();
           if (decoded_cmd.length() == 0) {
             continue;
           }
           m_backend.runDDL(decoded_cmd);
         }
       } catch (final Exception ex) {
         hostLog.fatal("Unable to construct HSQL backend");
         VoltDB.crashLocalVoltDB(ex.getMessage(), true, ex);
       }
     }
     return m_backend;
   }
 }
Example #25
0
  /**
   * Once participating host count is set, SnapshotCompletionMonitor can check this ZK node to
   * determine whether the snapshot has finished or not.
   *
   * <p>This should only be called when all participants have responded. It is possible that some
   * hosts finish taking snapshot before the coordinator logs the participating host count. In this
   * case, the host count would have been decremented multiple times already. To make sure finished
   * hosts are logged correctly, this method adds participating host count + 1 to the current host
   * count.
   *
   * @param txnId The snapshot txnId
   * @param participantCount The number of hosts participating in this snapshot
   */
  public static void logParticipatingHostCount(long txnId, int participantCount) {
    ZooKeeper zk = VoltDB.instance().getHostMessenger().getZK();
    final String snapshotPath = VoltZK.completed_snapshots + "/" + txnId;

    boolean success = false;
    while (!success) {
      Stat stat = new Stat();
      byte data[] = null;
      try {
        data = zk.getData(snapshotPath, false, stat);
      } catch (KeeperException e) {
        if (e.code() == KeeperException.Code.NONODE) {
          // If snapshot creation failed for some reason, the node won't exist. ignore
          return;
        }
        VoltDB.crashLocalVoltDB("Failed to get snapshot completion node", true, e);
      } catch (InterruptedException e) {
        VoltDB.crashLocalVoltDB("Interrupted getting snapshot completion node", true, e);
      }
      if (data == null) {
        VoltDB.crashLocalVoltDB("Data should not be null if the node exists", false, null);
      }

      try {
        JSONObject jsonObj = new JSONObject(new String(data, Charsets.UTF_8));
        if (jsonObj.getLong("txnId") != txnId) {
          VoltDB.crashLocalVoltDB("TxnId should match", false, null);
        }

        int hostCount = jsonObj.getInt("hostCount");
        // +1 because hostCount was initialized to -1
        jsonObj.put("hostCount", hostCount + participantCount + 1);
        zk.setData(snapshotPath, jsonObj.toString(4).getBytes(Charsets.UTF_8), stat.getVersion());
      } catch (KeeperException.BadVersionException e) {
        continue;
      } catch (Exception e) {
        VoltDB.crashLocalVoltDB("This ZK call should never fail", true, e);
      }

      success = true;
    }
  }
Example #26
0
  @SuppressWarnings("deprecation")
  @Override
  public void setUp() throws IOException, InterruptedException {
    VoltDB.instance().readBuildInfo("Test");

    // compile a catalog
    String testDir = BuildDirectoryUtils.getBuildDirectoryPath();
    String catalogJar = testDir + File.separator + JAR;

    TPCCProjectBuilder pb = new TPCCProjectBuilder();
    pb.addDefaultSchema();
    pb.addDefaultPartitioning();
    pb.addProcedures(MultiSiteSelect.class, InsertNewOrder.class);

    pb.compile(catalogJar, 2, 0);

    // load a catalog
    byte[] bytes = CatalogUtil.toBytes(new File(catalogJar));
    String serializedCatalog = CatalogUtil.loadCatalogFromJar(bytes, null);

    // create the catalog (that will be passed to the ClientInterface
    catalog = new Catalog();
    catalog.execute(serializedCatalog);

    // update the catalog with the data from the deployment file
    String pathToDeployment = pb.getPathToDeployment();
    assertTrue(CatalogUtil.compileDeploymentAndGetCRC(catalog, pathToDeployment, true) >= 0);

    cluster = catalog.getClusters().get("cluster");
    CatalogMap<Procedure> procedures = cluster.getDatabases().get("database").getProcedures();
    Procedure insertProc = procedures.get("InsertNewOrder");
    assert (insertProc != null);
    selectProc = procedures.get("MultiSiteSelect");
    assert (selectProc != null);

    // Each EE needs its own thread for correct initialization.
    final AtomicReference<ExecutionEngine> site1Reference = new AtomicReference<ExecutionEngine>();
    final byte configBytes[] = LegacyHashinator.getConfigureBytes(2);
    Thread site1Thread =
        new Thread() {
          @Override
          public void run() {
            site1Reference.set(
                new ExecutionEngineJNI(
                    cluster.getRelativeIndex(),
                    1,
                    0,
                    0,
                    "",
                    100,
                    HashinatorType.LEGACY,
                    configBytes));
          }
        };
    site1Thread.start();
    site1Thread.join();

    final AtomicReference<ExecutionEngine> site2Reference = new AtomicReference<ExecutionEngine>();
    Thread site2Thread =
        new Thread() {
          @Override
          public void run() {
            site2Reference.set(
                new ExecutionEngineJNI(
                    cluster.getRelativeIndex(),
                    2,
                    1,
                    0,
                    "",
                    100,
                    HashinatorType.LEGACY,
                    configBytes));
          }
        };
    site2Thread.start();
    site2Thread.join();

    // create two EEs
    site1 = new ExecutionSite(0); // site 0
    ee1 = site1Reference.get();
    ee1.loadCatalog(0, catalog.serialize());
    site2 = new ExecutionSite(1); // site 1
    ee2 = site2Reference.get();
    ee2.loadCatalog(0, catalog.serialize());

    // cache some plan fragments
    selectStmt = selectProc.getStatements().get("selectAll");
    assert (selectStmt != null);
    int i = 0;
    // this kinda assumes the right order
    for (PlanFragment f : selectStmt.getFragments()) {
      if (i == 0) selectTopFrag = f;
      else selectBottomFrag = f;
      i++;
    }
    assert (selectTopFrag != null);
    assert (selectBottomFrag != null);

    if (selectTopFrag.getHasdependencies() == false) {
      PlanFragment temp = selectTopFrag;
      selectTopFrag = selectBottomFrag;
      selectBottomFrag = temp;
    }

    // get the insert frag
    Statement insertStmt = insertProc.getStatements().get("insert");
    assert (insertStmt != null);

    for (PlanFragment f : insertStmt.getFragments()) insertFrag = f;

    // populate plan cache
    ActivePlanRepository.clear();
    ActivePlanRepository.addFragmentForTest(
        CatalogUtil.getUniqueIdForFragment(selectBottomFrag),
        Encoder.base64Decode(selectBottomFrag.getPlannodetree()));
    ActivePlanRepository.addFragmentForTest(
        CatalogUtil.getUniqueIdForFragment(selectTopFrag),
        Encoder.base64Decode(selectTopFrag.getPlannodetree()));
    ActivePlanRepository.addFragmentForTest(
        CatalogUtil.getUniqueIdForFragment(insertFrag),
        Encoder.base64Decode(insertFrag.getPlannodetree()));

    // insert some data
    ParameterSet params = ParameterSet.fromArrayNoCopy(1L, 1L, 1L);

    VoltTable[] results =
        ee2.executePlanFragments(
            1,
            new long[] {CatalogUtil.getUniqueIdForFragment(insertFrag)},
            null,
            new ParameterSet[] {params},
            1,
            0,
            42,
            Long.MAX_VALUE);
    assert (results.length == 1);
    assert (results[0].asScalarLong() == 1L);

    params = ParameterSet.fromArrayNoCopy(2L, 2L, 2L);

    results =
        ee1.executePlanFragments(
            1,
            new long[] {CatalogUtil.getUniqueIdForFragment(insertFrag)},
            null,
            new ParameterSet[] {params},
            2,
            1,
            42,
            Long.MAX_VALUE);
    assert (results.length == 1);
    assert (results[0].asScalarLong() == 1L);
  }
Example #27
0
    @Override
    public void run() {
      if (!m_isRejoin && !m_config.m_isRejoinTest && !m_rvdb.m_joining) {
        String snapshotPath = null;
        if (m_rvdb
                .m_catalogContext
                .cluster
                .getDatabases()
                .get("database")
                .getSnapshotschedule()
                .get("default")
            != null) {
          snapshotPath =
              m_rvdb
                  .m_catalogContext
                  .cluster
                  .getDatabases()
                  .get("database")
                  .getSnapshotschedule()
                  .get("default")
                  .getPath();
        }

        int[] allPartitions = new int[m_rvdb.m_configuredNumberOfPartitions];
        for (int ii = 0; ii < allPartitions.length; ii++) {
          allPartitions[ii] = ii;
        }

        org.voltdb.catalog.CommandLog cl =
            m_rvdb.m_catalogContext.cluster.getLogconfig().get("log");

        try {
          m_rvdb.m_restoreAgent =
              new RestoreAgent(
                  m_rvdb.m_messenger,
                  m_rvdb.getSnapshotCompletionMonitor(),
                  m_rvdb,
                  m_config.m_startAction,
                  cl.getEnabled(),
                  cl.getLogpath(),
                  cl.getInternalsnapshotpath(),
                  snapshotPath,
                  allPartitions,
                  CatalogUtil.getVoltDbRoot(m_deployment.getPaths()).getAbsolutePath());
        } catch (IOException e) {
          VoltDB.crashLocalVoltDB("Unable to construct the RestoreAgent", true, e);
        }

        m_rvdb.m_globalServiceElector.registerService(m_rvdb.m_restoreAgent);
        m_rvdb.m_restoreAgent.setCatalogContext(m_rvdb.m_catalogContext);
        // Generate plans and get (hostID, catalogPath) pair
        Pair<Integer, String> catalog = m_rvdb.m_restoreAgent.findRestoreCatalog();

        // if the restore agent found a catalog, set the following info
        // so the right node can send it out to the others
        if (catalog != null) {
          // Make sure the catalog corresponds to the current server version.
          // Prevent automatic upgrades by rejecting mismatched versions.
          int hostId = catalog.getFirst().intValue();
          String catalogPath = catalog.getSecond();
          // Perform a version check when the catalog jar is available
          // on the current host.
          // Check that this host is the one providing the catalog.
          if (m_rvdb.m_myHostId == hostId) {
            try {
              byte[] catalogBytes = readCatalog(catalogPath);
              InMemoryJarfile inMemoryJar = CatalogUtil.loadInMemoryJarFile(catalogBytes);
              // This call pre-checks and returns the build info/version.
              String[] buildInfo = CatalogUtil.getBuildInfoFromJar(inMemoryJar);
              String catalogVersion = buildInfo[0];
              String serverVersion = m_rvdb.getVersionString();
              if (!catalogVersion.equals(serverVersion)) {
                VoltDB.crashLocalVoltDB(
                    String.format(
                        "Unable to load version %s catalog \"%s\" "
                            + "from snapshot into a version %s server.",
                        catalogVersion, catalogPath, serverVersion),
                    false,
                    null);
              }
            } catch (IOException e) {
              // Make it non-fatal with no check performed.
              hostLog.warn(
                  String.format(
                      "Unable to load catalog for version check due to exception: %s.",
                      e.getMessage()));
            }
          }
          hostLog.debug("Found catalog to load on host " + hostId + ": " + catalogPath);
          m_rvdb.m_hostIdWithStartupCatalog = hostId;
          assert (m_rvdb.m_hostIdWithStartupCatalog >= 0);
          m_rvdb.m_pathToStartupCatalog = catalogPath;
          assert (m_rvdb.m_pathToStartupCatalog != null);
        }
      }
    }
Example #28
0
 @Before
 public void setUp() throws Exception {
   m_mvoltdb = new MockVoltDB();
   VoltDB.replaceVoltDBInstanceForTest(m_mvoltdb);
 }
Example #29
0
  /**
   * The only public method: do all the work to start a snapshot. Assumes that a snapshot is
   * feasible, that the caller has validated it can be accomplished, that the caller knows this is a
   * consistent or useful transaction point at which to snapshot.
   *
   * @param file_path
   * @param file_nonce
   * @param format
   * @param block
   * @param txnId
   * @param data
   * @param context
   * @param hostname
   * @return VoltTable describing the results of the snapshot attempt
   */
  public VoltTable startSnapshotting(
      final String file_path,
      final String file_nonce,
      final SnapshotFormat format,
      final byte block,
      final long multiPartTxnId,
      final long partitionTxnId,
      final long legacyPerPartitionTxnIds[],
      final String data,
      final SystemProcedureExecutionContext context,
      final String hostname,
      final HashinatorSnapshotData hashinatorData,
      final long timestamp) {
    TRACE_LOG.trace("Creating snapshot target and handing to EEs");
    final VoltTable result = SnapshotUtil.constructNodeResultsTable();
    final int numLocalSites =
        context.getCluster().getDeployment().get("deployment").getSitesperhost();

    // One site wins the race to create the snapshot targets, populating
    // m_taskListsForSites for the other sites and creating an appropriate
    // number of snapshot permits.
    synchronized (SnapshotSiteProcessor.m_snapshotCreateLock) {
      SnapshotSiteProcessor.m_snapshotCreateSetupBarrierActualAction.set(
          new Runnable() {
            @Override
            public void run() {
              Map<Integer, Long> partitionTransactionIds = new HashMap<Integer, Long>();
              partitionTransactionIds = m_partitionLastSeenTransactionIds;
              SNAP_LOG.debug("Last seen partition transaction ids " + partitionTransactionIds);
              m_partitionLastSeenTransactionIds = new HashMap<Integer, Long>();
              partitionTransactionIds.put(TxnEgo.getPartitionId(multiPartTxnId), multiPartTxnId);

              /*
               * Do a quick sanity check that the provided IDs
               * don't conflict with currently active partitions. If they do
               * it isn't fatal we can just skip it.
               */
              for (long txnId : legacyPerPartitionTxnIds) {
                final int legacyPartition = TxnEgo.getPartitionId(txnId);
                if (partitionTransactionIds.containsKey(legacyPartition)) {
                  SNAP_LOG.warn(
                      "While saving a snapshot and propagating legacy "
                          + "transaction ids found an id that matches currently active partition"
                          + partitionTransactionIds.get(legacyPartition));
                } else {
                  partitionTransactionIds.put(legacyPartition, txnId);
                }
              }
              exportSequenceNumbers = SnapshotSiteProcessor.getExportSequenceNumbers();
              createSetupIv2(
                  file_path,
                  file_nonce,
                  format,
                  multiPartTxnId,
                  partitionTransactionIds,
                  data,
                  context,
                  result,
                  exportSequenceNumbers,
                  context.getSiteTrackerForSnapshot(),
                  hashinatorData,
                  timestamp);
            }
          });

      // Create a barrier to use with the current number of sites to wait for
      // or if the barrier is already set up check if it is broken and reset if necessary
      SnapshotSiteProcessor.readySnapshotSetupBarriers(numLocalSites);

      // From within this EE, record the sequence numbers as of the start of the snapshot (now)
      // so that the info can be put in the digest.
      SnapshotSiteProcessor.populateExportSequenceNumbersForExecutionSite(context);
      SNAP_LOG.debug(
          "Registering transaction id "
              + partitionTxnId
              + " for "
              + TxnEgo.getPartitionId(partitionTxnId));
      m_partitionLastSeenTransactionIds.put(TxnEgo.getPartitionId(partitionTxnId), partitionTxnId);
    }

    boolean runPostTasks = false;
    VoltTable earlyResultTable = null;
    try {
      SnapshotSiteProcessor.m_snapshotCreateSetupBarrier.await();
      try {
        synchronized (m_createLock) {
          SNAP_LOG.debug(
              "Found tasks for HSIds: "
                  + CoreUtils.hsIdCollectionToString(m_taskListsForHSIds.keySet()));
          SNAP_LOG.debug("Looking for local HSID: " + CoreUtils.hsIdToString(context.getSiteId()));
          Deque<SnapshotTableTask> taskList = m_taskListsForHSIds.remove(context.getSiteId());
          // If createSetup failed, then the first site to reach here is going
          // to send the results table generated by createSetup, and then empty out the table.
          // All other sites to reach here will send the appropriate empty table.
          // If createSetup was a success but the taskList is null, then we'll use the block
          // switch to figure out what flavor of empty SnapshotSave result table to return.
          if (!m_createSuccess.get()) {
            // There shouldn't be any work for any site if we failed
            assert (m_taskListsForHSIds.isEmpty());
            VoltTable finalresult = m_createResult.get();
            if (finalresult != null) {
              m_createResult.set(null);
              earlyResultTable = finalresult;
            } else {
              // We returned a non-empty NodeResultsTable with the failures in it,
              // every other site needs to return a NodeResultsTable as well.
              earlyResultTable = SnapshotUtil.constructNodeResultsTable();
            }
          } else if (taskList == null) {
            SNAP_LOG.debug("No task for this site, block " + block);
            // This node is participating in the snapshot but this site has nothing to do.
            // Send back an appropriate empty table based on the block flag
            if (block != 0) {
              runPostTasks = true;
              earlyResultTable = SnapshotUtil.constructPartitionResultsTable();
              earlyResultTable.addRow(
                  context.getHostId(),
                  hostname,
                  CoreUtils.getSiteIdFromHSId(context.getSiteId()),
                  "SUCCESS",
                  "");
            } else {
              earlyResultTable = SnapshotUtil.constructNodeResultsTable();
            }
          } else {
            context
                .getSiteSnapshotConnection()
                .initiateSnapshots(format, taskList, multiPartTxnId, exportSequenceNumbers);
          }

          if (m_deferredSetupFuture != null) {
            // Add a listener to the deferred setup so that it can kick off the snapshot
            // task once the setup is done.
            m_deferredSetupFuture.addListener(
                new Runnable() {
                  @Override
                  public void run() {
                    DeferredSnapshotSetup deferredSnapshotSetup = null;
                    try {
                      deferredSnapshotSetup = m_deferredSetupFuture.get();
                    } catch (Exception e) {
                      // it doesn't throw
                    }

                    assert deferredSnapshotSetup != null;
                    context
                        .getSiteSnapshotConnection()
                        .startSnapshotWithTargets(
                            deferredSnapshotSetup.getPlan().getSnapshotDataTargets());
                  }
                },
                CoreUtils.SAMETHREADEXECUTOR);
          }
        }
      } finally {
        SnapshotSiteProcessor.m_snapshotCreateFinishBarrier.await(120, TimeUnit.SECONDS);
      }
    } catch (TimeoutException e) {
      VoltDB.crashLocalVoltDB(
          "Timed out waiting 120 seconds for all threads to arrive and start snapshot", true, null);
    } catch (InterruptedException e) {
      result.addRow(context.getHostId(), hostname, "", "FAILURE", CoreUtils.throwableToString(e));
      earlyResultTable = result;
    } catch (BrokenBarrierException e) {
      result.addRow(context.getHostId(), hostname, "", "FAILURE", CoreUtils.throwableToString(e));
      earlyResultTable = result;
    }

    // If earlyResultTable is set, return here
    if (earlyResultTable != null) {
      if (runPostTasks) {
        // Need to run post-snapshot tasks before finishing
        SnapshotSiteProcessor.runPostSnapshotTasks(context);
      }
      return earlyResultTable;
    }

    if (block != 0) {
      HashSet<Exception> failures = Sets.newHashSet();
      String status = "SUCCESS";
      String err = "";
      try {
        // For blocking snapshot, propogate the error from deferred setup back to the client
        final DeferredSnapshotSetup deferredSnapshotSetup = m_deferredSetupFuture.get();
        if (deferredSnapshotSetup != null && deferredSnapshotSetup.getError() != null) {
          status = "FAILURE";
          err = deferredSnapshotSetup.getError().toString();
          failures.add(deferredSnapshotSetup.getError());
        }

        failures.addAll(context.getSiteSnapshotConnection().completeSnapshotWork());
        SnapshotSiteProcessor.runPostSnapshotTasks(context);
      } catch (Exception e) {
        status = "FAILURE";
        err = e.toString();
        failures.add(e);
      }
      final VoltTable blockingResult = SnapshotUtil.constructPartitionResultsTable();

      if (failures.isEmpty()) {
        blockingResult.addRow(
            context.getHostId(),
            hostname,
            CoreUtils.getSiteIdFromHSId(context.getSiteId()),
            status,
            err);
      } else {
        status = "FAILURE";
        for (Exception e : failures) {
          err = e.toString();
        }
        blockingResult.addRow(
            context.getHostId(),
            hostname,
            CoreUtils.getSiteIdFromHSId(context.getSiteId()),
            status,
            err);
      }
      return blockingResult;
    }

    return result;
  }
Example #30
0
  private void createSetupIv2(
      final String file_path,
      final String file_nonce,
      SnapshotFormat format,
      final long txnId,
      final Map<Integer, Long> partitionTransactionIds,
      String data,
      final SystemProcedureExecutionContext context,
      final VoltTable result,
      Map<String, Map<Integer, Pair<Long, Long>>> exportSequenceNumbers,
      SiteTracker tracker,
      HashinatorSnapshotData hashinatorData,
      long timestamp) {
    JSONObject jsData = null;
    if (data != null && !data.isEmpty()) {
      try {
        jsData = new JSONObject(data);
      } catch (JSONException e) {
        SNAP_LOG.error(String.format("JSON exception on snapshot data \"%s\".", data), e);
      }
    }

    SnapshotWritePlan plan;
    if (format == SnapshotFormat.NATIVE) {
      plan = new NativeSnapshotWritePlan();
    } else if (format == SnapshotFormat.CSV) {
      plan = new CSVSnapshotWritePlan();
    } else if (format == SnapshotFormat.STREAM) {
      plan = new StreamSnapshotWritePlan();
    } else if (format == SnapshotFormat.INDEX) {
      plan = new IndexSnapshotWritePlan();
    } else {
      throw new RuntimeException("BAD BAD BAD");
    }
    final Callable<Boolean> deferredSetup =
        plan.createSetup(
            file_path,
            file_nonce,
            txnId,
            partitionTransactionIds,
            jsData,
            context,
            result,
            exportSequenceNumbers,
            tracker,
            hashinatorData,
            timestamp);
    m_deferredSetupFuture =
        VoltDB.instance()
            .submitSnapshotIOWork(
                new DeferredSnapshotSetup(plan, deferredSetup, txnId, partitionTransactionIds));

    synchronized (m_createLock) {
      // Seems like this should be cleared out just in case
      // Log if there is actually anything to clear since it is unexpected
      if (!m_taskListsForHSIds.isEmpty()) {
        SNAP_LOG.warn("Found lingering snapshot tasks while setting up a snapshot");
      }
      m_taskListsForHSIds.clear();
      m_createSuccess.set(true);
      m_createResult.set(result);

      m_taskListsForHSIds.putAll(plan.getTaskListsForHSIds());

      // HACK HACK HACK.  If the task list is empty, this host has no work to do for
      // this snapshot.  We're going to create an empty list of tasks for one of the sites to do
      // so that we'll have a SnapshotSiteProcessor which will do the logSnapshotCompleteToZK.
      if (m_taskListsForHSIds.isEmpty()) {
        SNAP_LOG.debug(
            "Node had no snapshot work to do.  Creating a null task to drive completion.");
        m_taskListsForHSIds.put(context.getSiteId(), new ArrayDeque<SnapshotTableTask>());
      }
      SNAP_LOG.debug(
          "Planned tasks: "
              + CoreUtils.hsIdCollectionToString(plan.getTaskListsForHSIds().keySet()));
      SNAP_LOG.debug(
          "Created tasks for HSIds: "
              + CoreUtils.hsIdCollectionToString(m_taskListsForHSIds.keySet()));
    }
  }