示例#1
0
  private ModuleManager(File cacheRoot) {

    String systemPackagesSpec =
        FluentIterable.from(SYSTEM_PACKAGES).transform(appendVersion).join(COMMA_JOINER);

    Map<String, String> frameworkProps =
        ImmutableMap.<String, String>builder()
            .put(Constants.FRAMEWORK_SYSTEMPACKAGES_EXTRA, systemPackagesSpec)
            .put("org.osgi.framework.storage.clean", "onFirstInit")
            .put("felix.cache.rootdir", cacheRoot.getAbsolutePath())
            .put("felix.cache.locking", Boolean.FALSE.toString())
            .build();

    LOG.info("Framework properties are: " + frameworkProps);

    FrameworkFactory frameworkFactory =
        ServiceLoader.load(FrameworkFactory.class).iterator().next();

    m_framework = frameworkFactory.newFramework(frameworkProps);

    try {
      m_framework.start();
    } catch (BundleException e) {
      LOG.error("Failed to start the felix OSGi framework", e);
      throw new SetUpException("Failed to start the felix OSGi framework", e);
    }

    m_bundles = new BundleRef(m_framework);
  }
示例#2
0
 public static void logIv2InitiateTaskMessage(
     Iv2InitiateTaskMessage itask, long localHSId, long txnid, long spHandle) {
   if (iv2log.isTraceEnabled()) {
     String logmsg = "rxInitMsg %s from %s ciHandle %s txnId %s spHandle %s trunc %s";
     if (itask.getTxnId() != Long.MIN_VALUE && itask.getTxnId() != txnid) {
       iv2log.error(
           "Iv2InitiateTaskMessage TXN ID conflict.  Message: "
               + itask.getTxnId()
               + ", locally held: "
               + txnid);
     }
     if (itask.getSpHandle() != Long.MIN_VALUE && itask.getSpHandle() != spHandle) {
       iv2log.error(
           "Iv2InitiateTaskMessage SP HANDLE conflict.  Message: "
               + itask.getSpHandle()
               + ", locally held: "
               + spHandle);
     }
     iv2log.trace(
         String.format(
             logmsg,
             CoreUtils.hsIdToString(localHSId),
             CoreUtils.hsIdToString(itask.m_sourceHSId),
             ClientInterfaceHandleManager.handleToString(itask.getClientInterfaceHandle()),
             txnIdToString(txnid),
             txnIdToString(spHandle),
             txnIdToString(itask.getTruncationHandle())));
   }
 }
示例#3
0
 @Override
 public void handleResponse(ClientResponse resp) {
   if (resp == null) {
     VoltDB.crashLocalVoltDB(
         "Received a null response to a snapshot initiation request.  "
             + "This should be impossible.",
         true,
         null);
   } else if (resp.getStatus() != ClientResponse.SUCCESS) {
     tmLog.info(
         "Failed to complete partition detection snapshot, status: "
             + resp.getStatus()
             + ", reason: "
             + resp.getStatusString());
     tmLog.info("Retrying partition detection snapshot...");
     SnapshotUtil.requestSnapshot(
         0L,
         m_partSnapshotSchedule.getPath(),
         m_partSnapshotSchedule.getPrefix() + System.currentTimeMillis(),
         true,
         SnapshotFormat.NATIVE,
         null,
         m_snapshotHandler,
         true);
   } else if (!SnapshotUtil.didSnapshotRequestSucceed(resp.getResults())) {
     VoltDB.crashGlobalVoltDB(
         "Unable to complete partition detection snapshot: " + resp.getResults()[0],
         false,
         null);
   } else {
     VoltDB.crashGlobalVoltDB(
         "Partition detection snapshot completed. Shutting down.", false, null);
   }
 }
示例#4
0
 public static void logFragmentTaskMessage(
     FragmentTaskMessage ftask, long localHSId, long spHandle, boolean borrow) {
   if (iv2log.isTraceEnabled()) {
     String label = "rxFragMsg";
     if (borrow) {
       label = "rxBrrwMsg";
     }
     if (ftask.getSpHandle() != Long.MIN_VALUE && ftask.getSpHandle() != spHandle) {
       iv2log.error(
           "FragmentTaskMessage SP HANDLE conflict.  Message: "
               + ftask.getSpHandle()
               + ", locally held: "
               + spHandle);
     }
     String logmsg = "%s %s from %s txnId %s spHandle %s trunc %s";
     iv2log.trace(
         String.format(
             logmsg,
             label,
             CoreUtils.hsIdToString(localHSId),
             CoreUtils.hsIdToString(ftask.m_sourceHSId),
             txnIdToString(ftask.getTxnId()),
             txnIdToString(spHandle),
             txnIdToString(ftask.getTruncationHandle())));
   }
 }
示例#5
0
 public static void logInitiatorRxMsg(VoltMessage msg, long localHSId) {
   if (iv2log.isTraceEnabled()) {
     if (msg instanceof InitiateResponseMessage) {
       InitiateResponseMessage iresp = (InitiateResponseMessage) msg;
       String logmsg = "rxInitRsp %s from %s ciHandle %s txnId %s spHandle %s status %s";
       iv2log.trace(
           String.format(
               logmsg,
               CoreUtils.hsIdToString(localHSId),
               CoreUtils.hsIdToString(iresp.m_sourceHSId),
               ClientInterfaceHandleManager.handleToString(iresp.getClientInterfaceHandle()),
               txnIdToString(iresp.getTxnId()),
               txnIdToString(iresp.getSpHandle()),
               respStatusToString(iresp.getClientResponseData().getStatus())));
     } else if (msg instanceof FragmentResponseMessage) {
       FragmentResponseMessage fresp = (FragmentResponseMessage) msg;
       String logmsg = "rxFragRsp %s from %s txnId %s spHandle %s status %s";
       iv2log.trace(
           String.format(
               logmsg,
               CoreUtils.hsIdToString(localHSId),
               CoreUtils.hsIdToString(fresp.m_sourceHSId),
               txnIdToString(fresp.getTxnId()),
               txnIdToString(fresp.getSpHandle()),
               fragStatusToString(fresp.getStatusCode())));
     }
   }
 }
示例#6
0
 @Override
 public void logTask(TransactionInfoBaseMessage message) throws IOException {
   assert (!(message instanceof Iv2InitiateTaskMessage));
   if (message instanceof FragmentTaskMessage) {
     if (JOINLOG.isTraceEnabled()) {
       JOINLOG.trace("P" + m_partitionId + " received first fragment");
     }
     m_receivedFirstFragment = true;
   }
   m_taskLog.logTask(message);
 }
示例#7
0
 public static void logTopology(long leaderHSId, List<Long> replicas, int partitionId) {
   if (iv2log.isTraceEnabled()) {
     String logmsg = "topology partition %d leader %s replicas (%s)";
     iv2log.trace(
         String.format(
             logmsg,
             partitionId,
             CoreUtils.hsIdToString(leaderHSId),
             CoreUtils.hsIdCollectionToString(replicas)));
   }
 }
示例#8
0
 public static void logSiteTaskerQueueOffer(TransactionTask task) {
   if (iv2queuelog.isTraceEnabled()) {
     String logmsg = "tskQOffer txnId %s spHandle %s type %s";
     iv2queuelog.trace(
         String.format(
             logmsg,
             txnIdToString(task.getTxnId()),
             txnIdToString(task.getSpHandle()),
             task.m_txn.isSinglePartition() ? "SP" : "MP"));
   }
 }
示例#9
0
 public void runDDL(String ddl) {
   try {
     // LOG.info("Executing " + ddl);
     Statement stmt = dbconn.createStatement();
     /*boolean success =*/ stmt.execute(ddl);
     SQLWarning warn = stmt.getWarnings();
     if (warn != null) sqlLog.warn(warn.getMessage());
     // LOG.info("SQL DDL execute result: " + (success ? "true" : "false"));
   } catch (SQLException e) {
     hostLog.l7dlog(Level.ERROR, LogKeys.host_Backend_RunDDLFailed.name(), new Object[] {ddl}, e);
   }
 }
示例#10
0
 public static void logFinishTransaction(InitiateResponseMessage msg, long localHSId) {
   if (iv2log.isTraceEnabled()) {
     String logmsg = "finishTxn %s ciHandle %s initHSId %s status %s";
     iv2log.trace(
         String.format(
             logmsg,
             CoreUtils.hsIdToString(localHSId),
             ClientInterfaceHandleManager.handleToString(msg.getClientInterfaceHandle()),
             CoreUtils.hsIdToString(msg.getCoordinatorHSId()),
             respStatusToString(msg.getClientResponseData().getStatus())));
   }
 }
示例#11
0
 public static void logCreateTransaction(Iv2InitiateTaskMessage msg) {
   if (iv2log.isTraceEnabled()) {
     String logmsg = "createTxn %s ciHandle %s initHSId %s proc %s";
     iv2log.trace(
         String.format(
             logmsg,
             CoreUtils.hsIdToString(msg.getInitiatorHSId()),
             ClientInterfaceHandleManager.handleToString(msg.getClientInterfaceHandle()),
             CoreUtils.hsIdToString(msg.getCoordinatorHSId()),
             msg.getStoredProcedureInvocation().getProcName()));
   }
 }
示例#12
0
 public static void logIv2MultipartSentinel(
     MultiPartitionParticipantMessage message, long localHSId, long txnId) {
   if (iv2log.isTraceEnabled()) {
     String logmsg = "rxSntlMsg %s from %s txnId %s";
     iv2log.trace(
         String.format(
             logmsg,
             CoreUtils.hsIdToString(localHSId),
             CoreUtils.hsIdToString(message.m_sourceHSId),
             txnIdToString(txnId)));
   }
 }
示例#13
0
 /**
  * Call VoltDB.crashVoltDB on behalf of the EE
  *
  * @param reason Reason the EE crashed
  */
 public static void crashVoltDB(String reason, String traces[], String filename, int lineno) {
   VoltLogger hostLog = new VoltLogger("HOST");
   String fn = (filename == null) ? "unknown" : filename;
   String re = (reason == null) ? "Fatal EE error." : reason;
   hostLog.fatal(re + " In " + fn + ":" + lineno);
   if (traces != null) {
     for (String trace : traces) {
       hostLog.fatal(trace);
     }
   }
   VoltDB.crashLocalVoltDB(re + " In " + fn + ":" + lineno, true, null);
 }
示例#14
0
 /** Process a new repair log response */
 @Override
 public void deliver(VoltMessage message) {
   if (message instanceof Iv2RepairLogResponseMessage) {
     Iv2RepairLogResponseMessage response = (Iv2RepairLogResponseMessage) message;
     if (response.getRequestId() != m_requestId) {
       tmLog.debug(
           m_whoami
               + "rejecting stale repair response."
               + " Current request id is: "
               + m_requestId
               + " Received response for request id: "
               + response.getRequestId());
       return;
     }
     ReplicaRepairStruct rrs = m_replicaRepairStructs.get(response.m_sourceHSId);
     if (rrs.m_expectedResponses < 0) {
       tmLog.debug(
           m_whoami
               + "collecting "
               + response.getOfTotal()
               + " repair log entries from "
               + CoreUtils.hsIdToString(response.m_sourceHSId));
     }
     // Long.MAX_VALUE has rejoin semantics
     if (response.getHandle() != Long.MAX_VALUE) {
       m_maxSeenTxnId = Math.max(m_maxSeenTxnId, response.getHandle());
     }
     if (response.getPayload() != null) {
       m_repairLogUnion.add(response);
       if (tmLog.isTraceEnabled()) {
         tmLog.trace(
             m_whoami
                 + " collected from "
                 + CoreUtils.hsIdToString(response.m_sourceHSId)
                 + ", message: "
                 + response.getPayload());
       }
     }
     if (rrs.update(response)) {
       tmLog.debug(
           m_whoami
               + "collected "
               + rrs.m_receivedResponses
               + " responses for "
               + rrs.m_expectedResponses
               + " repair log entries from "
               + CoreUtils.hsIdToString(response.m_sourceHSId));
       if (areRepairLogsComplete()) {
         repairSurvivors();
       }
     }
   }
 }
示例#15
0
 @Override
 public void run(ImmutableMap<Integer, Long> cache) {
   Set<Long> currentLeaders = new HashSet<Long>(cache.values());
   tmLog.debug("Updated leaders: " + currentLeaders);
   if (m_state.get() == AppointerState.CLUSTER_START) {
     if (currentLeaders.size() == m_partitionCount) {
       tmLog.debug("Leader appointment complete, promoting MPI and unblocking.");
       m_state.set(AppointerState.DONE);
       m_MPI.acceptPromotion();
       m_startupLatch.countDown();
     }
   }
 }
示例#16
0
 /**
  * Indicate to all associated {@link ExportDataSource}to assume mastership role for the given
  * partition id
  *
  * @param partitionId
  */
 public void acceptMastershipTask(int partitionId) {
   HashMap<String, ExportDataSource> partitionDataSourceMap =
       m_dataSourcesByPartition.get(partitionId);
   exportLog.info(
       "Export generation " + m_timestamp + " accepting mastership for partition " + partitionId);
   for (ExportDataSource eds : partitionDataSourceMap.values()) {
     try {
       eds.acceptMastership();
     } catch (Exception e) {
       exportLog.error("Unable to start exporting", e);
     }
   }
 }
示例#17
0
 /** Notify the coordinator that this site has received the first fragment message */
 private void sendFirstFragResponse() {
   if (JOINLOG.isDebugEnabled()) {
     JOINLOG.debug(
         "P"
             + m_partitionId
             + " sending first fragment response to coordinator "
             + CoreUtils.hsIdToString(m_coordinatorHsId));
   }
   RejoinMessage msg =
       new RejoinMessage(m_mailbox.getHSId(), RejoinMessage.Type.FIRST_FRAGMENT_RECEIVED);
   m_mailbox.send(m_coordinatorHsId, msg);
   m_firstFragResponseSent = true;
 }
示例#18
0
 /**
  * By default returns HashinatorType.LEGACY, but for development another hashinator can be
  * specified using the environment variable or the Java property HASHINATOR
  */
 public static HashinatorType getConfiguredHashinatorType() {
   if (configuredHashinatorType != null) {
     return configuredHashinatorType;
   }
   String hashinatorType = System.getenv("HASHINATOR");
   if (hashinatorType == null) {
     hashinatorType = System.getProperty("HASHINATOR", HashinatorType.LEGACY.name());
   }
   if (hostLogger.isDebugEnabled()) {
     hostLogger.debug("Overriding hashinator to use " + hashinatorType);
   }
   configuredHashinatorType = HashinatorType.valueOf(hashinatorType.trim().toUpperCase());
   return configuredHashinatorType;
 }
示例#19
0
 public static void logCompleteTransactionMessage(
     CompleteTransactionMessage ctask, long localHSId) {
   if (iv2log.isTraceEnabled()) {
     String logmsg = "rxCompMsg %s from %s txnId %s %s %s";
     iv2log.trace(
         String.format(
             logmsg,
             CoreUtils.hsIdToString(localHSId),
             CoreUtils.hsIdToString(ctask.m_sourceHSId),
             txnIdToString(ctask.getTxnId()),
             ctask.isRollback() ? "ROLLBACK" : "COMMIT",
             ctask.isRestart() ? "RESTART" : ""));
   }
 }
示例#20
0
  /**
   * Given a set of the known host IDs before a fault, and the known host IDs in the post-fault
   * cluster, determine whether or not we think a network partition may have happened. NOTE: this
   * assumes that we have already done the k-safety validation for every partition and already
   * failed if we weren't a viable cluster. ALSO NOTE: not private so it may be unit-tested.
   */
  static boolean makePPDDecision(Set<Integer> previousHosts, Set<Integer> currentHosts) {
    // Real partition detection stuff would go here
    // find the lowest hostId between the still-alive hosts and the
    // failed hosts. Which set contains the lowest hostId?
    int blessedHostId = Integer.MAX_VALUE;
    boolean blessedHostIdInFailedSet = true;

    // This should be all the pre-partition hosts IDs.  Any new host IDs
    // (say, if this was triggered by rejoin), will be greater than any surviving
    // host ID, so don't worry about including it in this search.
    for (Integer hostId : previousHosts) {
      if (hostId < blessedHostId) {
        blessedHostId = hostId;
      }
    }

    for (Integer hostId : currentHosts) {
      if (hostId.equals(blessedHostId)) {
        blessedHostId = hostId;
        blessedHostIdInFailedSet = false;
      }
    }

    // Evaluate PPD triggers.
    boolean partitionDetectionTriggered = false;
    // Exact 50-50 splits. The set with the lowest survivor host doesn't trigger PPD
    // If the blessed host is in the failure set, this set is not blessed.
    if (currentHosts.size() * 2 == previousHosts.size()) {
      if (blessedHostIdInFailedSet) {
        tmLog.info(
            "Partition detection triggered for 50/50 cluster failure. "
                + "This survivor set is shutting down.");
        partitionDetectionTriggered = true;
      } else {
        tmLog.info(
            "Partition detected for 50/50 failure. "
                + "This survivor set is continuing execution.");
      }
    }

    // A strict, viable minority is always a partition.
    if (currentHosts.size() * 2 < previousHosts.size()) {
      tmLog.info(
          "Partition detection triggered. " + "This minority survivor set is shutting down.");
      partitionDetectionTriggered = true;
    }

    return partitionDetectionTriggered;
  }
示例#21
0
  private long assignLeader(int partitionId, List<Long> children) {
    // We used masterHostId = -1 as a way to force the leader choice to be
    // the first replica in the list, if we don't have some other mechanism
    // which has successfully overridden it.
    int masterHostId = -1;
    if (m_state.get() == AppointerState.CLUSTER_START) {
      try {
        // find master in topo
        JSONArray parts = m_topo.getJSONArray("partitions");
        for (int p = 0; p < parts.length(); p++) {
          JSONObject aPartition = parts.getJSONObject(p);
          int pid = aPartition.getInt("partition_id");
          if (pid == partitionId) {
            masterHostId = aPartition.getInt("master");
          }
        }
      } catch (JSONException jse) {
        tmLog.error("Failed to find master for partition " + partitionId + ", defaulting to 0");
        jse.printStackTrace();
        masterHostId = -1; // stupid default
      }
    } else {
      // For now, if we're appointing a new leader as a result of a
      // failure, just pick the first replica in the children list.
      // Could eventually do something more complex here to try to keep a
      // semi-balance, but it's unclear that this has much utility until
      // we add rebalancing on rejoin as well.
      masterHostId = -1;
    }

    long masterHSId = children.get(0);
    for (Long child : children) {
      if (CoreUtils.getHostIdFromHSId(child) == masterHostId) {
        masterHSId = child;
        break;
      }
    }
    tmLog.info(
        "Appointing HSId "
            + CoreUtils.hsIdToString(masterHSId)
            + " as leader for partition "
            + partitionId);
    try {
      m_iv2appointees.put(partitionId, masterHSId);
    } catch (Exception e) {
      VoltDB.crashLocalVoltDB("Unable to appoint new master for partition " + partitionId, true, e);
    }
    return masterHSId;
  }
示例#22
0
  /**
   * Blocking transfer all partitioned table data and notify the coordinator.
   *
   * @param siteConnection
   */
  private void runForBlockingDataTransfer(SiteProcedureConnection siteConnection) {
    boolean sourcesReady = false;
    RestoreWork restoreWork = m_dataSink.poll(m_snapshotBufferAllocator);
    if (restoreWork != null) {
      restoreBlock(restoreWork, siteConnection);
      sourcesReady = true;
    }

    // The completion monitor may fire even if m_dataSink has not reached EOF in the case that
    // there's no
    // replicated table in the database, so check for both conditions.
    if (m_dataSink.isEOF() || m_snapshotCompletionMonitor.isDone()) {
      // No more data from this data sink, close and remove it from the list
      m_dataSink.close();

      if (m_streamSnapshotMb != null) {
        VoltDB.instance().getHostMessenger().removeMailbox(m_streamSnapshotMb.getHSId());
      }

      JOINLOG.debug(m_whoami + " data transfer is finished");

      if (m_snapshotCompletionMonitor.isDone()) {
        try {
          SnapshotCompletionEvent event = m_snapshotCompletionMonitor.get();
          assert (event != null);
          JOINLOG.debug("P" + m_partitionId + " noticed data transfer completion");
          m_completionAction.setSnapshotTxnId(event.multipartTxnId);

          setJoinComplete(
              siteConnection,
              event.exportSequenceNumbers,
              event.drSequenceNumbers,
              false /* requireExistingSequenceNumbers */);
        } catch (InterruptedException e) {
          // isDone() already returned true, this shouldn't happen
          VoltDB.crashLocalVoltDB("Impossible interruption happend", true, e);
        } catch (ExecutionException e) {
          VoltDB.crashLocalVoltDB("Error waiting for snapshot to finish", true, e);
        }
      } else {
        m_taskQueue.offer(this);
      }
    } else {
      // The sources are not set up yet, don't block the site,
      // return here and retry later.
      returnToTaskQueue(sourcesReady);
    }
  }
示例#23
0
  /** Runs when the RejoinCoordinator decides this site should start rejoin. */
  void doInitiation(RejoinMessage message) {
    m_coordinatorHsId = message.m_sourceHSId;
    m_streamSnapshotMb = VoltDB.instance().getHostMessenger().createMailbox();
    m_rejoinSiteProcessor = new StreamSnapshotSink(m_streamSnapshotMb);

    // MUST choose the leader as the source.
    long sourceSite = m_mailbox.getMasterHsId(m_partitionId);
    long hsId =
        m_rejoinSiteProcessor.initialize(
            message.getSnapshotSourceCount(), message.getSnapshotBufferPool());

    REJOINLOG.debug(
        m_whoami
            + "received INITIATION message. Doing rejoin"
            + ". Source site is: "
            + CoreUtils.hsIdToString(sourceSite)
            + " and destination rejoin processor is: "
            + CoreUtils.hsIdToString(hsId)
            + " and snapshot nonce is: "
            + message.getSnapshotNonce());

    registerSnapshotMonitor(message.getSnapshotNonce());
    // Tell the RejoinCoordinator everything it will need to know to get us our snapshot stream.
    RejoinMessage initResp = new RejoinMessage(m_mailbox.getHSId(), sourceSite, hsId);
    m_mailbox.send(m_coordinatorHsId, initResp);

    // Start waiting for snapshot data
    m_taskQueue.offer(this);
  }
示例#24
0
 public void dump(long hsId) {
   final String who = CoreUtils.hsIdToString(hsId);
   tmLog.info(
       String.format(
           "%s: REPLAY SEQUENCER DUMP, LAST POLLED FRAGMENT %d (%s), LAST SEEN TXNID %d (%s), %s%s",
           who,
           m_lastPolledFragmentTxnId,
           TxnEgo.txnIdToString(m_lastPolledFragmentTxnId),
           m_lastSeenTxnId,
           TxnEgo.txnIdToString(m_lastSeenTxnId),
           m_mpiEOLReached ? "MPI EOL, " : "",
           m_mustDrain ? "MUST DRAIN" : ""));
   for (Entry<Long, ReplayEntry> e : m_replayEntries.entrySet()) {
     tmLog.info(String.format("%s: REPLAY ENTRY %s: %s", who, e.getKey(), e.getValue()));
   }
 }
示例#25
0
  @Override
  public void run() {
    byte[] data = new byte[rowSize];
    r.nextBytes(data);

    try {
      long currentRowCount = getRowCount();
      while ((currentRowCount < targetCount) && (m_shouldContinue.get())) {
        CountDownLatch latch = new CountDownLatch(batchSize);
        // try to insert batchSize random rows
        for (int i = 0; i < batchSize; i++) {
          long p = Math.abs(r.nextLong());
          m_permits.acquire();
          client.callProcedure(
              new InsertCallback(latch), tableName.toUpperCase() + "TableInsert", p, data);
        }
        latch.await(10, TimeUnit.SECONDS);
        long nextRowCount = getRowCount();
        // if no progress, throttle a bit
        if (nextRowCount == currentRowCount) {
          Thread.sleep(1000);
        }
        currentRowCount = nextRowCount;
      }

    } catch (Exception e) {
      // on exception, log and end the thread, but don't kill the process
      log.error(
          "BigTableLoader failed a procedure call for table "
              + tableName
              + " and the thread will now stop.",
          e);
      return;
    }
  }
示例#26
0
    NavigableMap<URI, Bundle> stopBundles(Set<URI> bundles) {
      NavigableMap<URI, Bundle> expect, update;
      do {
        expect = get();
        update =
            ImmutableSortedMap.<URI, Bundle>naturalOrder()
                .putAll(Maps.filterKeys(expect, not(in(bundles))))
                .build();
      } while (!compareAndSet(expect, update));

      List<URI> couldNotStop = new ArrayList<>();
      NavigableMap<URI, Bundle> stopped = Maps.filterKeys(expect, in(bundles));
      for (Map.Entry<URI, Bundle> e : stopped.entrySet()) {
        URI bundleURI = e.getKey();
        Bundle bundle = e.getValue();
        try {
          bundle.stop();
        } catch (BundleException exc) {
          LOG.error("Failed to stop bundle " + bundleURI, exc);
          couldNotStop.add(bundleURI);
        }
      }
      if (!couldNotStop.isEmpty()) {
        throw new ModularException("Failed to stop bundles %s", couldNotStop);
      }
      return stopped;
    }
示例#27
0
 public List<Integer> getIv2PartitionsToReplace(JSONObject topology) throws JSONException {
   ClusterConfig clusterConfig = new ClusterConfig(topology);
   hostLog.info(
       "Computing partitions to replace.  Total partitions: " + clusterConfig.getPartitionCount());
   Map<Integer, Integer> repsPerPart = new HashMap<Integer, Integer>();
   for (int i = 0; i < clusterConfig.getPartitionCount(); i++) {
     repsPerPart.put(i, getReplicaCountForPartition(i));
   }
   List<Integer> partitions =
       computeReplacementPartitions(
           repsPerPart,
           clusterConfig.getReplicationFactor(),
           clusterConfig.getSitesPerHost(),
           clusterConfig.getPartitionCount());
   hostLog.info("IV2 Sites will replicate the following partitions: " + partitions);
   return partitions;
 }
示例#28
0
  public void processKafkaMessages() throws Exception {
    // Split server list
    final String[] serverlist = m_config.servers.split(",");

    // Create connection
    final ClientConfig c_config = new ClientConfig(m_config.user, m_config.password);
    c_config.setProcedureCallTimeout(0); // Set procedure all to infinite

    m_client = getClient(c_config, serverlist, m_config.port);

    if (m_config.useSuppliedProcedure) {
      m_loader =
          new CSVTupleDataLoader(
              (ClientImpl) m_client, m_config.procedure, new KafkaBulkLoaderCallback());
    } else {
      m_loader =
          new CSVBulkDataLoader(
              (ClientImpl) m_client, m_config.table, m_config.batch, new KafkaBulkLoaderCallback());
    }
    m_loader.setFlushInterval(m_config.flush, m_config.flush);
    m_consumer =
        new KafkaConsumerConnector(
            m_config.zookeeper,
            m_config.useSuppliedProcedure ? m_config.procedure : m_config.table);
    try {
      m_es = getConsumerExecutor(m_consumer, m_loader);
      if (m_config.useSuppliedProcedure) {
        m_log.info(
            "Kafka Consumer from topic: "
                + m_config.topic
                + " Started using procedure: "
                + m_config.procedure);
      } else {
        m_log.info(
            "Kafka Consumer from topic: "
                + m_config.topic
                + " Started for table: "
                + m_config.table);
      }
      m_es.awaitTermination(365, TimeUnit.DAYS);
    } catch (Exception ex) {
      m_log.error("Error in Kafka Consumer", ex);
      System.exit(-1);
    }
    close();
  }
示例#29
0
  /**
   * Constructor for benchmark instance. Configures VoltDB client and prints configuration.
   *
   * @param config Parsed & validated CLI options.
   */
  Benchmark(Config config) {
    this.config = config;

    processor =
        new PayloadProcessor(
            config.minvaluesize, config.maxvaluesize,
            config.entropy, config.usecompression);

    log.info(HORIZONTAL_RULE);
    log.info(" Command Line Configuration");
    log.info(HORIZONTAL_RULE);
    log.info(config.getConfigDumpString());

    StatusListener statusListener = new StatusListener();
    ClientConfig clientConfig = new ClientConfig("", "", statusListener);
    client = ClientFactory.createClient(clientConfig);
  }
示例#30
0
  public void pushExportBuffer(
      int partitionId,
      String signature,
      long uso,
      long bufferPtr,
      ByteBuffer buffer,
      boolean sync,
      boolean endOfStream) {
    //        System.out.println("In generation " + m_timestamp + " partition " + partitionId + "
    // signature " + signature + (buffer == null ? " null buffer " : (" buffer length " +
    // buffer.remaining())));
    //        for (Integer i : m_dataSourcesByPartition.keySet()) {
    //            System.out.println("Have partition " + i);
    //        }
    assert (m_dataSourcesByPartition.containsKey(partitionId));
    assert (m_dataSourcesByPartition.get(partitionId).containsKey(signature));
    HashMap<String, ExportDataSource> sources = m_dataSourcesByPartition.get(partitionId);

    if (sources == null) {
      exportLog.error(
          "Could not find export data sources for partition "
              + partitionId
              + " generation "
              + m_timestamp
              + " the export data is being discarded");
      DBBPool.deleteCharArrayMemory(bufferPtr);
      return;
    }

    ExportDataSource source = sources.get(signature);
    if (source == null) {
      exportLog.error(
          "Could not find export data source for partition "
              + partitionId
              + " signature "
              + signature
              + " generation "
              + m_timestamp
              + " the export data is being discarded");
      DBBPool.deleteCharArrayMemory(bufferPtr);
      return;
    }

    source.pushExportBuffer(uso, bufferPtr, buffer, sync, endOfStream);
  }