예제 #1
0
  /**
   * Pass the VoltMessage to CI's handleRead() and inspect if the expected parameters are passed to
   * the initiator's createTranction() method. This is a convenient method if the caller expects the
   * result of handling this message is to create a new transaction.
   *
   * @param msg
   * @param procName
   * @param partitionParam null if it's a multi-part txn
   * @param isAdmin
   * @param isReadonly
   * @param isSinglePart
   * @param isEverySite
   * @return StoredProcedureInvocation object passed to createTransaction()
   * @throws IOException
   */
  private StoredProcedureInvocation readAndCheck(
      ByteBuffer msg,
      String procName,
      Object partitionParam,
      boolean isAdmin,
      boolean isReadonly,
      boolean isSinglePart,
      boolean isEverySite)
      throws IOException {
    ClientResponseImpl resp = m_ci.handleRead(msg, m_handler, m_cxn);
    assertNull(resp);

    ArgumentCaptor<Long> destinationCaptor = ArgumentCaptor.forClass(Long.class);
    ArgumentCaptor<Iv2InitiateTaskMessage> messageCaptor =
        ArgumentCaptor.forClass(Iv2InitiateTaskMessage.class);
    verify(m_messenger).send(destinationCaptor.capture(), messageCaptor.capture());

    Iv2InitiateTaskMessage message = messageCaptor.getValue();
    // assertEquals(isAdmin, message.); // is admin
    assertEquals(isReadonly, message.isReadOnly()); // readonly
    assertEquals(isSinglePart, message.isSinglePartition()); // single-part
    // assertEquals(isEverySite, message.g); // every site
    assertEquals(procName, message.getStoredProcedureName());
    if (isSinglePart) {
      int expected = TheHashinator.hashToPartition(partitionParam);
      assertEquals(
          new Long(m_cartographer.getHSIdForMaster(expected)), destinationCaptor.getValue());
    } else {
      assertEquals(
          new Long(m_cartographer.getHSIdForMultiPartitionInitiator()),
          destinationCaptor.getValue());
    }
    return message.getStoredProcedureInvocation();
  }
예제 #2
0
  private static void buildCatalog() throws IOException {
    // build a real catalog
    File cat = File.createTempFile("temp-log-reinitiator", "catalog");
    cat.deleteOnExit();

    VoltProjectBuilder builder = new VoltProjectBuilder();
    String schema = "create table A (i integer not null, primary key (i));";
    builder.addLiteralSchema(schema);
    builder.addPartitionInfo("A", "i");
    builder.addStmtProcedure("hello", "select * from A where i = ?", "A.i: 0");

    if (!builder.compile(cat.getAbsolutePath())) {
      throw new IOException();
    }

    byte[] bytes = CatalogUtil.toBytes(cat);
    String serializedCat = CatalogUtil.loadCatalogFromJar(bytes, null);
    assertNotNull(serializedCat);
    Catalog catalog = new Catalog();
    catalog.execute(serializedCat);

    String deploymentPath = builder.getPathToDeployment();
    CatalogUtil.compileDeploymentAndGetCRC(catalog, deploymentPath, true);

    m_context = new CatalogContext(0, 0, catalog, bytes, 0, 0, 0);
    TheHashinator.initialize(LegacyHashinator.class, LegacyHashinator.getConfigureBytes(3));
  }
예제 #3
0
파일: Inits.java 프로젝트: tnn/voltdb
    @Override
    public void run() {
      // Initialize the complex partitioning scheme
      int partitionCount;
      if (m_config.m_startAction == StartAction.JOIN) {
        // Initialize the hashinator with the existing partition count in the cluster,
        // don't include the partitions that we're adding because they shouldn't contain
        // any ranges yet.
        partitionCount = m_rvdb.m_cartographer.getPartitionCount();
      } else {
        partitionCount = m_rvdb.m_configuredNumberOfPartitions;
      }

      TheHashinator.initialize(
          TheHashinator.getConfiguredHashinatorClass(),
          TheHashinator.getConfigureBytes(partitionCount));
    }
예제 #4
0
    @Override
    public void restore(SiteProcedureConnection connection) {
      rejoinLog.debug("Updating the hashinator to version " + version);

      // Update the Java hashinator
      Pair<? extends UndoAction, TheHashinator> hashinatorPair =
          TheHashinator.updateConfiguredHashinator(version, hashinatorConfig);

      // Update the EE hashinator
      connection.updateHashinator(hashinatorPair.getSecond());
    }
예제 #5
0
  @Test
  public void testFinishedSPAdHocPlanning() throws Exception {
    // Need a batch and a statement
    AdHocPlannedStmtBatch plannedStmtBatch =
        new AdHocPlannedStmtBatch(
            "select * from a where i = 3",
            3,
            0,
            0,
            "localhost",
            false,
            ProcedureInvocationType.ORIGINAL,
            0,
            0,
            null);
    AdHocPlannedStatement s =
        new AdHocPlannedStatement(
            "select * from a where i = 3".getBytes(Constants.UTF8ENCODING),
            new CorePlan(
                new byte[0], null, new byte[20], null, false, false, true, new VoltType[0], 0),
            ParameterSet.fromArrayNoCopy(new Object[0]),
            null,
            null,
            3);
    plannedStmtBatch.addStatement(s);
    m_ci.processFinishedCompilerWork(plannedStmtBatch).run();

    ArgumentCaptor<Long> destinationCaptor = ArgumentCaptor.forClass(Long.class);
    ArgumentCaptor<Iv2InitiateTaskMessage> messageCaptor =
        ArgumentCaptor.forClass(Iv2InitiateTaskMessage.class);
    verify(m_messenger).send(destinationCaptor.capture(), messageCaptor.capture());
    Iv2InitiateTaskMessage message = messageCaptor.getValue();

    assertTrue(message.isReadOnly()); // readonly
    assertTrue(message.isSinglePartition()); // single-part
    assertEquals("@AdHoc_RO_SP", message.getStoredProcedureName());

    // SP AdHoc should have partitioning parameter serialized in the parameter set
    Object partitionParam = message.getStoredProcedureInvocation().getParameterAtIndex(0);
    assertTrue(partitionParam instanceof byte[]);
    VoltType type =
        VoltType.get((Byte) message.getStoredProcedureInvocation().getParameterAtIndex(1));
    assertTrue(type.isInteger());
    byte[] serializedData = (byte[]) message.getStoredProcedureInvocation().getParameterAtIndex(2);
    AdHocPlannedStatement[] statements =
        AdHocPlannedStmtBatch.planArrayFromBuffer(ByteBuffer.wrap(serializedData));
    assertTrue(Arrays.equals(TheHashinator.valueToBytes(3), (byte[]) partitionParam));
    assertEquals(1, statements.length);
    String sql = new String(statements[0].sql, Constants.UTF8ENCODING);
    assertEquals("select * from a where i = 3", sql);
  }
예제 #6
0
  // produce the contents of the repair log.
  public List<Iv2RepairLogResponseMessage> contents(long requestId, boolean forMPI) {
    List<Item> items = new LinkedList<Item>();
    // All cases include the log of MP transactions
    items.addAll(m_logMP);
    // SP repair requests also want the SP transactions
    if (!forMPI) {
      items.addAll(m_logSP);
    }

    // Contents need to be sorted in increasing spHandle order
    Collections.sort(items, m_handleComparator);

    int ofTotal = items.size() + 1;
    if (tmLog.isDebugEnabled()) {
      tmLog.debug("Responding with " + ofTotal + " repair log parts.");
    }
    List<Iv2RepairLogResponseMessage> responses = new LinkedList<Iv2RepairLogResponseMessage>();

    // this constructor sets its sequence no to 0 as ack
    // messages are first in the sequence
    Iv2RepairLogResponseMessage hheader =
        new Iv2RepairLogResponseMessage(
            requestId,
            ofTotal,
            m_lastSpHandle,
            m_lastMpHandle,
            TheHashinator.getCurrentVersionedConfigCooked());
    responses.add(hheader);

    int seq = responses.size(); // = 1, as the first sequence

    Iterator<Item> itemator = items.iterator();
    while (itemator.hasNext()) {
      Item item = itemator.next();
      Iv2RepairLogResponseMessage response =
          new Iv2RepairLogResponseMessage(
              requestId, seq++, ofTotal, item.getHandle(), item.getTxnId(), item.getMessage());
      responses.add(response);
    }
    return responses;
  }
예제 #7
0
  @Override
  public void run() {
    // ratio of upsert for @Load*Table
    final float upsertratio = 0.50F;
    // ratio of upsert to an existing table for @Load*Table
    final float upserthitratio = 0.20F;

    CopyAndDeleteDataTask cdtask = new CopyAndDeleteDataTask();
    cdtask.start();
    try {
      while (m_shouldContinue.get()) {
        // 1 in 3 gets copied and then deleted after leaving some data
        byte shouldCopy = (byte) (m_random.nextInt(3) == 0 ? 1 : 0);
        byte upsertMode = (byte) (m_random.nextFloat() < upsertratio ? 1 : 0);
        byte upsertHitMode =
            (byte) ((upsertMode != 0) && (m_random.nextFloat() < upserthitratio) ? 1 : 0);

        CountDownLatch latch = new CountDownLatch(batchSize);
        final ArrayList<Long> lcpDelQueue = new ArrayList<Long>();

        // try to insert batchSize random rows
        for (int i = 0; i < batchSize; i++) {
          m_table.clearRowData();
          m_permits.acquire();
          long p = Math.abs(r.nextLong());
          m_table.addRow(p, p, Calendar.getInstance().getTimeInMillis());
          boolean success = false;
          if (!m_isMP) {
            Object rpartitionParam =
                TheHashinator.valueToBytes(
                    m_table.fetchRow(0).get(m_partitionedColumnIndex, VoltType.BIGINT));
            if (upsertHitMode
                != 0) { // for test upsert an existing row, insert it and then upsert same row
              // again.
              success =
                  client.callProcedure(
                      new InsertCallback(latch, p, shouldCopy),
                      m_procName,
                      rpartitionParam,
                      m_tableName,
                      (byte) 1,
                      m_table);
            }
            success =
                client.callProcedure(
                    new InsertCallback(latch, p, shouldCopy),
                    m_procName,
                    rpartitionParam,
                    m_tableName,
                    (byte) 1,
                    m_table);
          } else {
            if (upsertHitMode != 0) {
              success =
                  client.callProcedure(
                      new InsertCallback(latch, p, shouldCopy),
                      m_procName,
                      m_tableName,
                      (byte) 1,
                      m_table);
            }
            success =
                client.callProcedure(
                    new InsertCallback(latch, p, shouldCopy),
                    m_procName,
                    m_tableName,
                    (byte) 1,
                    m_table);
          }
          // Ad if successfully queued but remove if proc fails.
          if (success) {
            if (shouldCopy != 0) {
              lcpDelQueue.add(p);
            } else {
              onlyDelQueue.add(p);
            }
          }
        }
        // Wait for all @Load{SP|MP}Done
        latch.await();
        cpDelQueue.addAll(lcpDelQueue);
        long nextRowCount = 0;
        try {
          nextRowCount = TxnId2Utils.getRowCount(client, m_tableName);
        } catch (Exception e) {
          hardStop("getrowcount exception", e);
        }
        // if no progress, throttle a bit
        if (nextRowCount == currentRowCount.get()) {
          Thread.sleep(1000);
        }
        if (onlyDelQueue.size() > 0 && m_shouldContinue.get()) {
          List<Long> workList = new ArrayList<Long>();
          onlyDelQueue.drainTo(workList);
          CountDownLatch odlatch = new CountDownLatch(workList.size());
          for (Long lcid : workList) {
            client.callProcedure(new DeleteCallback(odlatch, 1), m_onlydelprocName, lcid);
          }
          odlatch.await();
        }
      }
      // Any accumulated in p/mp tables are left behind.
    } catch (Exception e) {
      // on exception, log and end the thread, but don't kill the process
      log.error(
          "LoadTableLoader failed a procedure call for table "
              + m_tableName
              + " and the thread will now stop.",
          e);
    } finally {
      cdtask.shutdown();
      try {
        cdtask.join();
      } catch (InterruptedException ex) {
        log.error("CopyDelete Task was stopped.", ex);
      }
    }
  }