コード例 #1
0
ファイル: Iv2Trace.java プロジェクト: taharafiq/voltdb
 public static void logIv2InitiateTaskMessage(
     Iv2InitiateTaskMessage itask, long localHSId, long txnid, long spHandle) {
   if (iv2log.isTraceEnabled()) {
     String logmsg = "rxInitMsg %s from %s ciHandle %s txnId %s spHandle %s trunc %s";
     if (itask.getTxnId() != Long.MIN_VALUE && itask.getTxnId() != txnid) {
       iv2log.error(
           "Iv2InitiateTaskMessage TXN ID conflict.  Message: "
               + itask.getTxnId()
               + ", locally held: "
               + txnid);
     }
     if (itask.getSpHandle() != Long.MIN_VALUE && itask.getSpHandle() != spHandle) {
       iv2log.error(
           "Iv2InitiateTaskMessage SP HANDLE conflict.  Message: "
               + itask.getSpHandle()
               + ", locally held: "
               + spHandle);
     }
     iv2log.trace(
         String.format(
             logmsg,
             CoreUtils.hsIdToString(localHSId),
             CoreUtils.hsIdToString(itask.m_sourceHSId),
             ClientInterfaceHandleManager.handleToString(itask.getClientInterfaceHandle()),
             txnIdToString(txnid),
             txnIdToString(spHandle),
             txnIdToString(itask.getTruncationHandle())));
   }
 }
コード例 #2
0
ファイル: ModuleManager.java プロジェクト: jmptrader/voltdb
    NavigableMap<URI, Bundle> stopBundles(Set<URI> bundles) {
      NavigableMap<URI, Bundle> expect, update;
      do {
        expect = get();
        update =
            ImmutableSortedMap.<URI, Bundle>naturalOrder()
                .putAll(Maps.filterKeys(expect, not(in(bundles))))
                .build();
      } while (!compareAndSet(expect, update));

      List<URI> couldNotStop = new ArrayList<>();
      NavigableMap<URI, Bundle> stopped = Maps.filterKeys(expect, in(bundles));
      for (Map.Entry<URI, Bundle> e : stopped.entrySet()) {
        URI bundleURI = e.getKey();
        Bundle bundle = e.getValue();
        try {
          bundle.stop();
        } catch (BundleException exc) {
          LOG.error("Failed to stop bundle " + bundleURI, exc);
          couldNotStop.add(bundleURI);
        }
      }
      if (!couldNotStop.isEmpty()) {
        throw new ModularException("Failed to stop bundles %s", couldNotStop);
      }
      return stopped;
    }
コード例 #3
0
ファイル: ModuleManager.java プロジェクト: jmptrader/voltdb
  private ModuleManager(File cacheRoot) {

    String systemPackagesSpec =
        FluentIterable.from(SYSTEM_PACKAGES).transform(appendVersion).join(COMMA_JOINER);

    Map<String, String> frameworkProps =
        ImmutableMap.<String, String>builder()
            .put(Constants.FRAMEWORK_SYSTEMPACKAGES_EXTRA, systemPackagesSpec)
            .put("org.osgi.framework.storage.clean", "onFirstInit")
            .put("felix.cache.rootdir", cacheRoot.getAbsolutePath())
            .put("felix.cache.locking", Boolean.FALSE.toString())
            .build();

    LOG.info("Framework properties are: " + frameworkProps);

    FrameworkFactory frameworkFactory =
        ServiceLoader.load(FrameworkFactory.class).iterator().next();

    m_framework = frameworkFactory.newFramework(frameworkProps);

    try {
      m_framework.start();
    } catch (BundleException e) {
      LOG.error("Failed to start the felix OSGi framework", e);
      throw new SetUpException("Failed to start the felix OSGi framework", e);
    }

    m_bundles = new BundleRef(m_framework);
  }
コード例 #4
0
  @Override
  public void run() {
    byte[] data = new byte[rowSize];
    r.nextBytes(data);

    try {
      long currentRowCount = getRowCount();
      while ((currentRowCount < targetCount) && (m_shouldContinue.get())) {
        CountDownLatch latch = new CountDownLatch(batchSize);
        // try to insert batchSize random rows
        for (int i = 0; i < batchSize; i++) {
          long p = Math.abs(r.nextLong());
          m_permits.acquire();
          client.callProcedure(
              new InsertCallback(latch), tableName.toUpperCase() + "TableInsert", p, data);
        }
        latch.await(10, TimeUnit.SECONDS);
        long nextRowCount = getRowCount();
        // if no progress, throttle a bit
        if (nextRowCount == currentRowCount) {
          Thread.sleep(1000);
        }
        currentRowCount = nextRowCount;
      }

    } catch (Exception e) {
      // on exception, log and end the thread, but don't kill the process
      log.error(
          "BigTableLoader failed a procedure call for table "
              + tableName
              + " and the thread will now stop.",
          e);
      return;
    }
  }
コード例 #5
0
ファイル: Iv2Trace.java プロジェクト: taharafiq/voltdb
 public static void logFragmentTaskMessage(
     FragmentTaskMessage ftask, long localHSId, long spHandle, boolean borrow) {
   if (iv2log.isTraceEnabled()) {
     String label = "rxFragMsg";
     if (borrow) {
       label = "rxBrrwMsg";
     }
     if (ftask.getSpHandle() != Long.MIN_VALUE && ftask.getSpHandle() != spHandle) {
       iv2log.error(
           "FragmentTaskMessage SP HANDLE conflict.  Message: "
               + ftask.getSpHandle()
               + ", locally held: "
               + spHandle);
     }
     String logmsg = "%s %s from %s txnId %s spHandle %s trunc %s";
     iv2log.trace(
         String.format(
             logmsg,
             label,
             CoreUtils.hsIdToString(localHSId),
             CoreUtils.hsIdToString(ftask.m_sourceHSId),
             txnIdToString(ftask.getTxnId()),
             txnIdToString(spHandle),
             txnIdToString(ftask.getTruncationHandle())));
   }
 }
コード例 #6
0
 public void log(String message, long now, Level level) {
   if (now - m_lastLogTime > m_maxLogIntervalMillis) {
     synchronized (this) {
       if (now - m_lastLogTime > m_maxLogIntervalMillis) {
         switch (level) {
           case DEBUG:
             m_logger.debug(message);
             break;
           case ERROR:
             m_logger.error(message);
             break;
           case FATAL:
             m_logger.fatal(message);
             break;
           case INFO:
             m_logger.info(message);
             break;
           case TRACE:
             m_logger.trace(message);
             break;
           case WARN:
             m_logger.warn(message);
             break;
         }
         m_lastLogTime = now;
       }
     }
   }
 }
コード例 #7
0
 /**
  * This variant delays the formatting of the string message until it is actually logged
  *
  * @param now
  * @param level a {@link Level debug level}
  * @param cause evidentiary exception
  * @param stemformat a {@link String#format(String, Object...) string format}
  * @param args format arguments
  */
 public void log(long now, Level level, Throwable cause, String stemformat, Object... args) {
   if (now - m_lastLogTime > m_maxLogIntervalMillis) {
     synchronized (this) {
       if (now - m_lastLogTime > m_maxLogIntervalMillis) {
         String message = formatMessage(cause, stemformat, args);
         switch (level) {
           case DEBUG:
             m_logger.debug(message);
             break;
           case ERROR:
             m_logger.error(message);
             break;
           case FATAL:
             m_logger.fatal(message);
             break;
           case INFO:
             m_logger.info(message);
             break;
           case TRACE:
             m_logger.trace(message);
             break;
           case WARN:
             m_logger.warn(message);
             break;
         }
         m_lastLogTime = now;
       }
     }
   }
 }
コード例 #8
0
  public void pushExportBuffer(
      int partitionId,
      String signature,
      long uso,
      long bufferPtr,
      ByteBuffer buffer,
      boolean sync,
      boolean endOfStream) {
    //        System.out.println("In generation " + m_timestamp + " partition " + partitionId + "
    // signature " + signature + (buffer == null ? " null buffer " : (" buffer length " +
    // buffer.remaining())));
    //        for (Integer i : m_dataSourcesByPartition.keySet()) {
    //            System.out.println("Have partition " + i);
    //        }
    assert (m_dataSourcesByPartition.containsKey(partitionId));
    assert (m_dataSourcesByPartition.get(partitionId).containsKey(signature));
    HashMap<String, ExportDataSource> sources = m_dataSourcesByPartition.get(partitionId);

    if (sources == null) {
      exportLog.error(
          "Could not find export data sources for partition "
              + partitionId
              + " generation "
              + m_timestamp
              + " the export data is being discarded");
      DBBPool.deleteCharArrayMemory(bufferPtr);
      return;
    }

    ExportDataSource source = sources.get(signature);
    if (source == null) {
      exportLog.error(
          "Could not find export data source for partition "
              + partitionId
              + " signature "
              + signature
              + " generation "
              + m_timestamp
              + " the export data is being discarded");
      DBBPool.deleteCharArrayMemory(bufferPtr);
      return;
    }

    source.pushExportBuffer(uso, bufferPtr, buffer, sync, endOfStream);
  }
コード例 #9
0
ファイル: ModuleManager.java プロジェクト: jmptrader/voltdb
 static ModularException loggedModularException(Throwable e, String msg, Object... args) {
   ModularException.isCauseFor(e)
       .map(
           me -> {
             throw me;
           });
   LOG.error(String.format(msg, args), e);
   return new ModularException(msg, e, args);
 }
コード例 #10
0
ファイル: ExportDataSource.java プロジェクト: thaingo/voltdb
 public void close() {
   synchronized (m_committedBuffers) {
     try {
       m_committedBuffers.close();
     } catch (IOException e) {
       exportLog.error(e);
     }
   }
 }
コード例 #11
0
 private String formatMessage(Throwable cause, String stemformat, Object... args) {
   String format = stemformat;
   if (cause != null) {
     format =
         new StringBuilder(stemformat.length() + 8)
             .append(stemformat)
             .append("\n%s")
             .toString()
             .intern();
     args = Arrays.copyOf(args, args.length + 1);
     args[args.length - 1] = Throwables.getStackTraceAsString(cause);
   }
   String msg = null;
   try {
     msg = String.format(format, args);
   } catch (MissingFormatArgumentException ex) {
     m_logger.error(
         "failed to format log message. Format: "
             + format
             + ", arguments: "
             + Arrays.toString(args),
         ex);
   } catch (IllegalFormatConversionException ex) {
     m_logger.error(
         "failed to format log message. Format: "
             + format
             + ", arguments: "
             + Arrays.toString(args),
         ex);
   } catch (UnknownFormatConversionException ex) {
     m_logger.error(
         "failed to format log message. Format: "
             + format
             + ", arguments: "
             + Arrays.toString(args),
         ex);
   } finally {
     if (msg == null) {
       msg = "Format: " + format + ", arguments: " + Arrays.toString(args);
     }
   }
   return msg;
 }
コード例 #12
0
 @Override
 public Future<Pair<Boolean, Long>> start() {
   try {
     prepareForFaultRecovery();
   } catch (Exception e) {
     tmLog.error(m_whoami + "failed leader promotion:", e);
     m_promotionResult.setException(e);
     m_promotionResult.done(m_maxSeenTxnId);
   }
   return m_promotionResult;
 }
コード例 #13
0
ファイル: TestCSVLoader.java プロジェクト: cgvarela/voltdb
  public static void prepare() {
    if (!reportDir.endsWith("/")) reportDir += "/";
    File dir = new File(reportDir);
    try {
      if (!dir.exists()) {
        dir.mkdirs();
      }

    } catch (Exception x) {
      m_log.error(x.getMessage(), x);
      System.exit(-1);
    }
  }
コード例 #14
0
 /**
  * Indicate to all associated {@link ExportDataSource}to assume mastership role for the given
  * partition id
  *
  * @param partitionId
  */
 public void acceptMastershipTask(int partitionId) {
   HashMap<String, ExportDataSource> partitionDataSourceMap =
       m_dataSourcesByPartition.get(partitionId);
   exportLog.info(
       "Export generation " + m_timestamp + " accepting mastership for partition " + partitionId);
   for (ExportDataSource eds : partitionDataSourceMap.values()) {
     try {
       eds.acceptMastership();
     } catch (Exception e) {
       exportLog.error("Unable to start exporting", e);
     }
   }
 }
コード例 #15
0
ファイル: KafkaLoader.java プロジェクト: tnn/voltdb
  /**
   * kafkaloader main
   *
   * @param args
   */
  public static void main(String[] args) {

    final KafkaConfig cfg = new KafkaConfig();
    cfg.parse(KafkaLoader.class.getName(), args);
    try {
      KafkaLoader kloader = new KafkaLoader(cfg);
      kloader.processKafkaMessages();
    } catch (Exception e) {
      m_log.error("Failure in kafkaloader", e);
      System.exit(-1);
    }

    System.exit(0);
  }
コード例 #16
0
ファイル: LeaderAppointer.java プロジェクト: cheryzcc/voltdb
  private long assignLeader(int partitionId, List<Long> children) {
    // We used masterHostId = -1 as a way to force the leader choice to be
    // the first replica in the list, if we don't have some other mechanism
    // which has successfully overridden it.
    int masterHostId = -1;
    if (m_state.get() == AppointerState.CLUSTER_START) {
      try {
        // find master in topo
        JSONArray parts = m_topo.getJSONArray("partitions");
        for (int p = 0; p < parts.length(); p++) {
          JSONObject aPartition = parts.getJSONObject(p);
          int pid = aPartition.getInt("partition_id");
          if (pid == partitionId) {
            masterHostId = aPartition.getInt("master");
          }
        }
      } catch (JSONException jse) {
        tmLog.error("Failed to find master for partition " + partitionId + ", defaulting to 0");
        jse.printStackTrace();
        masterHostId = -1; // stupid default
      }
    } else {
      // For now, if we're appointing a new leader as a result of a
      // failure, just pick the first replica in the children list.
      // Could eventually do something more complex here to try to keep a
      // semi-balance, but it's unclear that this has much utility until
      // we add rebalancing on rejoin as well.
      masterHostId = -1;
    }

    long masterHSId = children.get(0);
    for (Long child : children) {
      if (CoreUtils.getHostIdFromHSId(child) == masterHostId) {
        masterHSId = child;
        break;
      }
    }
    tmLog.info(
        "Appointing HSId "
            + CoreUtils.hsIdToString(masterHSId)
            + " as leader for partition "
            + partitionId);
    try {
      m_iv2appointees.put(partitionId, masterHSId);
    } catch (Exception e) {
      VoltDB.crashLocalVoltDB("Unable to appoint new master for partition " + partitionId, true, e);
    }
    return masterHSId;
  }
コード例 #17
0
ファイル: KafkaLoader.java プロジェクト: tnn/voltdb
 @Override
 public void run() {
   ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
   while (it.hasNext()) {
     MessageAndMetadata<byte[], byte[]> md = it.next();
     byte msg[] = md.message();
     long offset = md.offset();
     String smsg = new String(msg);
     try {
       m_loader.insertRow(new RowWithMetaData(smsg, offset), m_csvParser.parseLine(smsg));
     } catch (Exception ex) {
       m_log.error("Consumer stopped", ex);
       System.exit(1);
     }
   }
 }
コード例 #18
0
ファイル: ModuleManager.java プロジェクト: jmptrader/voltdb
 void uninstallBundles(Set<URI> bundles) {
   List<URI> couldNotUninstall = new ArrayList<>();
   for (Map.Entry<URI, Bundle> e : stopBundles(bundles).entrySet()) {
     URI bundleURI = e.getKey();
     Bundle bundle = e.getValue();
     try {
       bundle.uninstall();
     } catch (BundleException exc) {
       LOG.error("Failed to uninstall bundle " + bundleURI, exc);
       couldNotUninstall.add(bundleURI);
     }
     if (!couldNotUninstall.isEmpty()) {
       throw new ModularException("Failed to uninstall bundles %s", couldNotUninstall);
     }
   }
 }
コード例 #19
0
 public void close() {
   List<ListenableFuture<?>> tasks = new ArrayList<ListenableFuture<?>>();
   for (HashMap<String, ExportDataSource> sources : m_dataSourcesByPartition.values()) {
     for (ExportDataSource source : sources.values()) {
       tasks.add(source.close());
     }
   }
   try {
     Futures.allAsList(tasks).get();
   } catch (Exception e) {
     // Logging of errors  is done inside the tasks so nothing to do here
     // intentionally not failing if there is an issue with close
     exportLog.error("Error closing export data sources", e);
   }
   shutdown = true;
 }
コード例 #20
0
ファイル: KafkaLoader.java プロジェクト: tnn/voltdb
  public void processKafkaMessages() throws Exception {
    // Split server list
    final String[] serverlist = m_config.servers.split(",");

    // Create connection
    final ClientConfig c_config = new ClientConfig(m_config.user, m_config.password);
    c_config.setProcedureCallTimeout(0); // Set procedure all to infinite

    m_client = getClient(c_config, serverlist, m_config.port);

    if (m_config.useSuppliedProcedure) {
      m_loader =
          new CSVTupleDataLoader(
              (ClientImpl) m_client, m_config.procedure, new KafkaBulkLoaderCallback());
    } else {
      m_loader =
          new CSVBulkDataLoader(
              (ClientImpl) m_client, m_config.table, m_config.batch, new KafkaBulkLoaderCallback());
    }
    m_loader.setFlushInterval(m_config.flush, m_config.flush);
    m_consumer =
        new KafkaConsumerConnector(
            m_config.zookeeper,
            m_config.useSuppliedProcedure ? m_config.procedure : m_config.table);
    try {
      m_es = getConsumerExecutor(m_consumer, m_loader);
      if (m_config.useSuppliedProcedure) {
        m_log.info(
            "Kafka Consumer from topic: "
                + m_config.topic
                + " Started using procedure: "
                + m_config.procedure);
      } else {
        m_log.info(
            "Kafka Consumer from topic: "
                + m_config.topic
                + " Started for table: "
                + m_config.table);
      }
      m_es.awaitTermination(365, TimeUnit.DAYS);
    } catch (Exception ex) {
      m_log.error("Error in Kafka Consumer", ex);
      System.exit(-1);
    }
    close();
  }
コード例 #21
0
ファイル: ProcedureRunner.java プロジェクト: taharafiq/voltdb
 private void updateCRC(QueuedSQL queuedSQL) {
   if (!queuedSQL.stmt.isReadOnly()) {
     m_inputCRC.update(queuedSQL.stmt.sqlCRC);
     try {
       queuedSQL.params.addToCRC(m_inputCRC);
     } catch (IOException e) {
       log.error(
           "Unable to compute CRC of parameters to "
               + "a SQL statement in procedure: "
               + m_procedureName,
           e);
       // don't crash
       // presumably, this will fail deterministically at all replicas
       // just log the error and hope people report it
     }
   }
 }
コード例 #22
0
  private void handleLeaderChildrenUpdate(Integer partition, List<String> children) {
    if (m_drainedSources.get() == m_numSources || children.isEmpty()) {
      return;
    }

    String leader = Collections.min(children);
    if (m_partitionLeaderZKName.get(partition).equals(leader)) {
      if (m_partitionsIKnowIAmTheLeader.add(partition)) {
        for (ExportDataSource eds : m_dataSourcesByPartition.get(partition).values()) {
          try {
            eds.acceptMastership();
          } catch (Exception e) {
            exportLog.error("Unable to start exporting", e);
          }
        }
      }
    }
  }
コード例 #23
0
  /*
   * Returns true if the generatino was completely truncated away
   */
  public boolean truncateExportToTxnId(long txnId, long[] perPartitionTxnIds) {
    // create an easy partitionId:txnId lookup.
    HashMap<Integer, Long> partitionToTxnId = new HashMap<Integer, Long>();
    for (long tid : perPartitionTxnIds) {
      partitionToTxnId.put(TxnEgo.getPartitionId(tid), tid);
    }

    List<ListenableFuture<?>> tasks = new ArrayList<ListenableFuture<?>>();

    // pre-iv2, the truncation point is the snapshot transaction id.
    // In iv2, truncation at the per-partition txn id recorded in the snapshot.
    for (HashMap<String, ExportDataSource> dataSources : m_dataSourcesByPartition.values()) {
      for (ExportDataSource source : dataSources.values()) {
        if (VoltDB.instance().isIV2Enabled()) {
          Long truncationPoint = partitionToTxnId.get(source.getPartitionId());
          if (truncationPoint == null) {
            exportLog.error(
                "Snapshot "
                    + txnId
                    + " does not include truncation point for partition "
                    + source.getPartitionId());
          } else {
            tasks.add(source.truncateExportToTxnId(truncationPoint));
          }
        } else {
          tasks.add(source.truncateExportToTxnId(txnId));
        }
      }
    }

    try {
      Futures.allAsList(tasks).get();
    } catch (Exception e) {
      VoltDB.crashLocalVoltDB(
          "Unexpected exception truncating export data during snapshot restore. "
              + "You can back up export overflow data and start the "
              + "DB without it to get past this error",
          true,
          e);
    }

    return m_drainedSources.get() == m_numSources;
  }
コード例 #24
0
ファイル: CSVFileReader.java プロジェクト: jmptrader/voltdb
 private boolean checkHeader() {
   try {
     String[] firstline = m_listReader.getHeader(false);
     Set<String> firstset = new HashSet<String>();
     BiMap<Integer, String> colNames = HashBiMap.create(m_loader.getColumnNames());
     headerlen = firstline.length;
     // remove duplicate.
     for (String name : firstline) {
       if (name != null) {
         firstset.add(name.toUpperCase());
       } else {
         return false;
       }
     }
     // whether column num matches.
     if (headerlen < m_columnCount) {
       return false;
     } else {
       // whether column name has according table column.
       int matchColCount = 0;
       for (String name : firstset) {
         if (colNames.containsValue(name)) {
           matchColCount++;
         }
       }
       if (matchColCount != m_columnCount) {
         return false;
       }
     }
     // get the mapping from file column num to table column num.
     order = new Integer[headerlen];
     for (int fileCol = 0; fileCol < headerlen; fileCol++) {
       String name = firstline[fileCol];
       Integer tableCol = colNames.inverse().get(name.toUpperCase());
       order[fileCol] = tableCol;
     }
   } catch (IOException ex) {
     m_log.error("Failed to read CSV line from file: " + ex);
   }
   return true;
 }
コード例 #25
0
ファイル: ExportDataSource.java プロジェクト: thaingo/voltdb
  /**
   * Obtain next block of data from source
   *
   * @throws MessagingException
   */
  public void exportAction(RawProcessor.ExportInternalMessage m) throws MessagingException {
    assert (m.m_m.getGeneration() == m_generation);
    ExportProtoMessage message = m.m_m;
    ExportProtoMessage result =
        new ExportProtoMessage(message.getGeneration(), message.m_partitionId, message.m_signature);
    ExportInternalMessage mbp = new ExportInternalMessage(m.m_sb, result);
    StreamBlock first_unpolled_block = null;

    // Assemble a list of blocks to delete so that they can be deleted
    // outside of the m_committedBuffers critical section
    ArrayList<StreamBlock> blocksToDelete = new ArrayList<StreamBlock>();

    boolean hitEndOfStreamWithNoRunnable = false;
    try {
      // Perform all interaction with m_committedBuffers under lock
      // because pushExportBuffer may be called from an ExecutionSite at any time
      synchronized (m_committedBuffers) {
        // Process the ack if any and add blocks to the delete list or move the released USO pointer
        if (message.isAck() && message.getAckOffset() > 0) {
          try {
            releaseExportBytes(message.getAckOffset(), blocksToDelete);
          } catch (IOException e) {
            VoltDB.crashLocalVoltDB("Error attempting to release export bytes", true, e);
            return;
          }
        }

        if (m_endOfStream && m_committedBuffers.sizeInBytes() == 0) {
          if (m_onDrain != null) {
            try {
              m_onDrain.run();
            } finally {
              m_onDrain = null;
            }
          } else {
            hitEndOfStreamWithNoRunnable = true;
          }
          return;
        }

        // Reset the first unpolled uso so that blocks that have already been polled will
        // be served up to the next connection
        if (message.isClose()) {
          try {
            resetPollMarker();
          } catch (IOException e) {
            exportLog.error(e);
          }
        }

        // Inside this critical section do the work to find out
        // what block should be returned by the next poll.
        // Copying and sending the data will take place outside the critical section
        try {
          if (message.isPoll()) {
            Iterator<StreamBlock> iter = m_committedBuffers.iterator();
            while (iter.hasNext()) {
              StreamBlock block = iter.next();
              // find the first block that has unpolled data
              if (m_firstUnpolledUso < block.uso() + block.totalUso()) {
                first_unpolled_block = block;
                m_firstUnpolledUso = block.uso() + block.totalUso();
                break;
              } else {
                blocksToDelete.add(block);
                iter.remove();
              }
            }
          }
        } catch (RuntimeException e) {
          if (e.getCause() instanceof IOException) {
            VoltDB.crashLocalVoltDB("Error attempting to find unpolled export data", true, e);
          } else {
            throw e;
          }
        }
      }
    } finally {
      // Try hard not to leak memory
      for (StreamBlock sb : blocksToDelete) {
        sb.deleteContent();
      }
      // Cheesy hack for now where we serve info about old
      // data sources from previous generations. In reality accessing
      // this generation is something of an error
      if (hitEndOfStreamWithNoRunnable) {
        ByteBuffer buf = ByteBuffer.allocate(4);
        buf.putInt(0).flip();
        result.pollResponse(m_firstUnpolledUso, buf);
        mbp.m_sb.event(result);
      }
    }

    if (message.isPoll()) {
      // If there are no unpolled blocks return the firstUnpolledUSO with no data
      if (first_unpolled_block == null) {
        ByteBuffer buf = ByteBuffer.allocate(4);
        buf.putInt(0).flip();
        result.pollResponse(m_firstUnpolledUso, buf);
      } else {
        // Otherwise return the block with the USO for the end of the block
        // since the entire remainder of the block is being sent.
        result.pollResponse(
            first_unpolled_block.uso() + first_unpolled_block.totalUso(),
            first_unpolled_block.unreleasedBuffer());
      }
      mbp.m_sb.event(result);
    }
  }
コード例 #26
0
ファイル: ExportDataSource.java プロジェクト: thaingo/voltdb
  public void pushExportBuffer(
      long uso, final long bufferPtr, ByteBuffer buffer, boolean sync, boolean endOfStream) {
    final java.util.concurrent.atomic.AtomicBoolean deleted =
        new java.util.concurrent.atomic.AtomicBoolean(false);
    synchronized (m_committedBuffers) {
      if (endOfStream) {
        assert (!m_endOfStream);
        assert (bufferPtr == 0);
        assert (buffer == null);
        assert (!sync);
        m_endOfStream = endOfStream;

        if (m_committedBuffers.sizeInBytes() == 0) {
          exportLog.info("Pushed EOS buffer with 0 bytes remaining");
          try {
            m_onDrain.run();
          } finally {
            m_onDrain = null;
          }
        }
        return;
      }
      assert (!m_endOfStream);
      if (buffer != null) {
        if (buffer.capacity() > 0) {
          try {
            m_committedBuffers.offer(
                new StreamBlock(
                    new BBContainer(buffer, bufferPtr) {
                      @Override
                      public void discard() {
                        DBBPool.deleteCharArrayMemory(address);
                        deleted.set(true);
                      }
                    },
                    uso,
                    false));
          } catch (IOException e) {
            exportLog.error(e);
            if (!deleted.get()) {
              DBBPool.deleteCharArrayMemory(bufferPtr);
            }
          }
        } else {
          /*
           * TupleStreamWrapper::setBytesUsed propagates the USO by sending
           * over an empty stream block. The block will be deleted
           * on the native side when this method returns
           */
          exportLog.info(
              "Syncing first unpolled USO to "
                  + uso
                  + " for table "
                  + m_tableName
                  + " partition "
                  + m_partitionId);
          m_firstUnpolledUso = uso;
        }
      }
      if (sync) {
        try {
          // Don't do a real sync, just write the in memory buffers
          // to a file. @Quiesce or blocking snapshot will do the sync
          m_committedBuffers.sync(true);
        } catch (IOException e) {
          exportLog.error(e);
        }
      }
    }
  }
コード例 #27
0
  /**
   * The only public method: do all the work to start a snapshot. Assumes that a snapshot is
   * feasible, that the caller has validated it can be accomplished, that the caller knows this is a
   * consistent or useful transaction point at which to snapshot.
   *
   * @param file_path
   * @param file_nonce
   * @param format
   * @param block
   * @param txnId
   * @param data
   * @param context
   * @param hostname
   * @return VoltTable describing the results of the snapshot attempt
   */
  public VoltTable startSnapshotting(
      final String file_path,
      final String pathType,
      final String file_nonce,
      final SnapshotFormat format,
      final byte block,
      final long multiPartTxnId,
      final long partitionTxnId,
      final long legacyPerPartitionTxnIds[],
      final String data,
      final SystemProcedureExecutionContext context,
      final String hostname,
      final HashinatorSnapshotData hashinatorData,
      final long timestamp) {
    TRACE_LOG.trace("Creating snapshot target and handing to EEs");
    final VoltTable result = SnapshotUtil.constructNodeResultsTable();
    final int numLocalSites =
        context.getCluster().getDeployment().get("deployment").getSitesperhost();
    JSONObject jsData = null;
    if (data != null && !data.isEmpty()) {
      try {
        jsData = new JSONObject(data);
      } catch (JSONException e) {
        SNAP_LOG.error(String.format("JSON exception on snapshot data \"%s\".", data), e);
      }
    }
    final JSONObject finalJsData = jsData;

    JSONObject perSiteRemoteDataCenterDrIds;
    try {
      perSiteRemoteDataCenterDrIds =
          ExtensibleSnapshotDigestData.serializeSiteConsumerDrIdTrackersToJSON(
              context.getDrAppliedTrackers());
    } catch (JSONException e) {
      SNAP_LOG.warn("Failed to serialize the Remote DataCenter's Last applied DRIds");
      perSiteRemoteDataCenterDrIds = new JSONObject();
    }

    // One site wins the race to create the snapshot targets, populating
    // m_taskListsForHSIds for the other sites and creating an appropriate
    // number of snapshot permits.
    synchronized (SnapshotSiteProcessor.m_snapshotCreateLock) {
      SnapshotSiteProcessor.m_snapshotCreateSetupBarrierActualAction.set(
          new Runnable() {
            @Override
            public void run() {
              Map<Integer, Long> partitionTransactionIds = m_partitionLastSeenTransactionIds;

              SNAP_LOG.debug("Last seen partition transaction ids " + partitionTransactionIds);
              m_partitionLastSeenTransactionIds = new HashMap<Integer, Long>();
              partitionTransactionIds.put(TxnEgo.getPartitionId(multiPartTxnId), multiPartTxnId);

              Map<Integer, JSONObject> remoteDataCenterLastIds = m_remoteDataCenterLastIds;
              m_remoteDataCenterLastIds = new HashMap<Integer, JSONObject>();

              /*
               * Do a quick sanity check that the provided IDs
               * don't conflict with currently active partitions. If they do
               * it isn't fatal we can just skip it.
               */
              for (long txnId : legacyPerPartitionTxnIds) {
                final int legacyPartition = TxnEgo.getPartitionId(txnId);
                if (partitionTransactionIds.containsKey(legacyPartition)) {
                  SNAP_LOG.warn(
                      "While saving a snapshot and propagating legacy "
                          + "transaction ids found an id that matches currently active partition"
                          + partitionTransactionIds.get(legacyPartition));
                } else {
                  partitionTransactionIds.put(legacyPartition, txnId);
                }
              }

              m_allLocalSiteSnapshotDigestData =
                  new ExtensibleSnapshotDigestData(
                      SnapshotSiteProcessor.getExportSequenceNumbers(),
                      SnapshotSiteProcessor.getDRTupleStreamStateInfo(),
                      remoteDataCenterLastIds);
              createSetupIv2(
                  file_path,
                  pathType,
                  file_nonce,
                  format,
                  multiPartTxnId,
                  partitionTransactionIds,
                  finalJsData,
                  context,
                  result,
                  m_allLocalSiteSnapshotDigestData,
                  context.getSiteTrackerForSnapshot(),
                  hashinatorData,
                  timestamp);
            }
          });

      // Create a barrier to use with the current number of sites to wait for
      // or if the barrier is already set up check if it is broken and reset if necessary
      SnapshotSiteProcessor.readySnapshotSetupBarriers(numLocalSites);

      // From within this EE, record the sequence numbers as of the start of the snapshot (now)
      // so that the info can be put in the digest.
      SnapshotSiteProcessor.populateSequenceNumbersForExecutionSite(context);
      Integer partitionId = TxnEgo.getPartitionId(partitionTxnId);
      SNAP_LOG.debug(
          "Registering transaction id "
              + partitionTxnId
              + " for "
              + TxnEgo.getPartitionId(partitionTxnId));
      m_partitionLastSeenTransactionIds.put(partitionId, partitionTxnId);
      m_remoteDataCenterLastIds.put(partitionId, perSiteRemoteDataCenterDrIds);
    }

    boolean runPostTasks = false;
    VoltTable earlyResultTable = null;
    try {
      SnapshotSiteProcessor.m_snapshotCreateSetupBarrier.await();
      try {
        synchronized (m_createLock) {
          SNAP_LOG.debug(
              "Found tasks for HSIds: "
                  + CoreUtils.hsIdCollectionToString(m_taskListsForHSIds.keySet()));
          SNAP_LOG.debug("Looking for local HSID: " + CoreUtils.hsIdToString(context.getSiteId()));
          Deque<SnapshotTableTask> taskList = m_taskListsForHSIds.remove(context.getSiteId());
          // If createSetup failed, then the first site to reach here is going
          // to send the results table generated by createSetup, and then empty out the table.
          // All other sites to reach here will send the appropriate empty table.
          // If createSetup was a success but the taskList is null, then we'll use the block
          // switch to figure out what flavor of empty SnapshotSave result table to return.
          if (!m_createSuccess.get()) {
            // There shouldn't be any work for any site if we failed
            assert (m_taskListsForHSIds.isEmpty());
            VoltTable finalresult = m_createResult.get();
            if (finalresult != null) {
              m_createResult.set(null);
              earlyResultTable = finalresult;
            } else {
              // We returned a non-empty NodeResultsTable with the failures in it,
              // every other site needs to return a NodeResultsTable as well.
              earlyResultTable = SnapshotUtil.constructNodeResultsTable();
            }
          } else if (taskList == null) {
            SNAP_LOG.debug("No task for this site, block " + block);
            // This node is participating in the snapshot but this site has nothing to do.
            // Send back an appropriate empty table based on the block flag
            if (block != 0) {
              runPostTasks = true;
              earlyResultTable = SnapshotUtil.constructPartitionResultsTable();
              earlyResultTable.addRow(
                  context.getHostId(),
                  hostname,
                  CoreUtils.getSiteIdFromHSId(context.getSiteId()),
                  "SUCCESS",
                  "");
            } else {
              // If doing snapshot for only replicated table(s), earlyResultTable here
              // may not be empty even if the taskList of this site is null.
              // In that case, snapshot result is preserved by earlyResultTable.
              earlyResultTable = result;
            }
          } else {
            context
                .getSiteSnapshotConnection()
                .initiateSnapshots(
                    format, taskList, multiPartTxnId, m_allLocalSiteSnapshotDigestData);
          }

          if (m_deferredSetupFuture != null && taskList != null) {
            // Add a listener to the deferred setup so that it can kick off the snapshot
            // task once the setup is done.
            m_deferredSetupFuture.addListener(
                new Runnable() {
                  @Override
                  public void run() {
                    DeferredSnapshotSetup deferredSnapshotSetup = null;
                    try {
                      deferredSnapshotSetup = m_deferredSetupFuture.get();
                    } catch (Exception e) {
                      // it doesn't throw
                    }

                    assert deferredSnapshotSetup != null;
                    context
                        .getSiteSnapshotConnection()
                        .startSnapshotWithTargets(
                            deferredSnapshotSetup.getPlan().getSnapshotDataTargets());
                  }
                },
                CoreUtils.SAMETHREADEXECUTOR);
          }
        }
      } finally {
        SnapshotSiteProcessor.m_snapshotCreateFinishBarrier.await(120, TimeUnit.SECONDS);
      }
    } catch (TimeoutException e) {
      VoltDB.crashLocalVoltDB(
          "Timed out waiting 120 seconds for all threads to arrive and start snapshot", true, null);
    } catch (InterruptedException e) {
      result.addRow(context.getHostId(), hostname, "", "FAILURE", CoreUtils.throwableToString(e));
      earlyResultTable = result;
    } catch (BrokenBarrierException e) {
      result.addRow(context.getHostId(), hostname, "", "FAILURE", CoreUtils.throwableToString(e));
      earlyResultTable = result;
    } catch (IllegalArgumentException e) {
      result.addRow(context.getHostId(), hostname, "", "FAILURE", CoreUtils.throwableToString(e));
      earlyResultTable = result;
    }

    // If earlyResultTable is set, return here
    if (earlyResultTable != null) {
      if (runPostTasks) {
        // Need to run post-snapshot tasks before finishing
        SnapshotSiteProcessor.runPostSnapshotTasks(context);
      }
      return earlyResultTable;
    }

    if (block != 0) {
      HashSet<Exception> failures = Sets.newHashSet();
      String status = "SUCCESS";
      String err = "";
      try {
        // For blocking snapshot, propogate the error from deferred setup back to the client
        final DeferredSnapshotSetup deferredSnapshotSetup = m_deferredSetupFuture.get();
        if (deferredSnapshotSetup != null && deferredSnapshotSetup.getError() != null) {
          status = "FAILURE";
          err = deferredSnapshotSetup.getError().toString();
          failures.add(deferredSnapshotSetup.getError());
        }

        failures.addAll(context.getSiteSnapshotConnection().completeSnapshotWork());
        SnapshotSiteProcessor.runPostSnapshotTasks(context);
      } catch (Exception e) {
        status = "FAILURE";
        err = e.toString();
        failures.add(e);
      }
      final VoltTable blockingResult = SnapshotUtil.constructPartitionResultsTable();

      if (failures.isEmpty()) {
        blockingResult.addRow(
            context.getHostId(),
            hostname,
            CoreUtils.getSiteIdFromHSId(context.getSiteId()),
            status,
            err);
      } else {
        status = "FAILURE";
        for (Exception e : failures) {
          err = e.toString();
        }
        blockingResult.addRow(
            context.getHostId(),
            hostname,
            CoreUtils.getSiteIdFromHSId(context.getSiteId()),
            status,
            err);
      }
      return blockingResult;
    }

    return result;
  }
コード例 #28
0
  private void createSetupIv2(
      final String file_path,
      final String file_nonce,
      SnapshotFormat format,
      final long txnId,
      final Map<Integer, Long> partitionTransactionIds,
      String data,
      final SystemProcedureExecutionContext context,
      final VoltTable result,
      Map<String, Map<Integer, Pair<Long, Long>>> exportSequenceNumbers,
      SiteTracker tracker,
      HashinatorSnapshotData hashinatorData,
      long timestamp) {
    JSONObject jsData = null;
    if (data != null && !data.isEmpty()) {
      try {
        jsData = new JSONObject(data);
      } catch (JSONException e) {
        SNAP_LOG.error(String.format("JSON exception on snapshot data \"%s\".", data), e);
      }
    }

    SnapshotWritePlan plan;
    if (format == SnapshotFormat.NATIVE) {
      plan = new NativeSnapshotWritePlan();
    } else if (format == SnapshotFormat.CSV) {
      plan = new CSVSnapshotWritePlan();
    } else if (format == SnapshotFormat.STREAM) {
      plan = new StreamSnapshotWritePlan();
    } else if (format == SnapshotFormat.INDEX) {
      plan = new IndexSnapshotWritePlan();
    } else {
      throw new RuntimeException("BAD BAD BAD");
    }
    final Callable<Boolean> deferredSetup =
        plan.createSetup(
            file_path,
            file_nonce,
            txnId,
            partitionTransactionIds,
            jsData,
            context,
            result,
            exportSequenceNumbers,
            tracker,
            hashinatorData,
            timestamp);
    m_deferredSetupFuture =
        VoltDB.instance()
            .submitSnapshotIOWork(
                new DeferredSnapshotSetup(plan, deferredSetup, txnId, partitionTransactionIds));

    synchronized (m_createLock) {
      // Seems like this should be cleared out just in case
      // Log if there is actually anything to clear since it is unexpected
      if (!m_taskListsForHSIds.isEmpty()) {
        SNAP_LOG.warn("Found lingering snapshot tasks while setting up a snapshot");
      }
      m_taskListsForHSIds.clear();
      m_createSuccess.set(true);
      m_createResult.set(result);

      m_taskListsForHSIds.putAll(plan.getTaskListsForHSIds());

      // HACK HACK HACK.  If the task list is empty, this host has no work to do for
      // this snapshot.  We're going to create an empty list of tasks for one of the sites to do
      // so that we'll have a SnapshotSiteProcessor which will do the logSnapshotCompleteToZK.
      if (m_taskListsForHSIds.isEmpty()) {
        SNAP_LOG.debug(
            "Node had no snapshot work to do.  Creating a null task to drive completion.");
        m_taskListsForHSIds.put(context.getSiteId(), new ArrayDeque<SnapshotTableTask>());
      }
      SNAP_LOG.debug(
          "Planned tasks: "
              + CoreUtils.hsIdCollectionToString(plan.getTaskListsForHSIds().keySet()));
      SNAP_LOG.debug(
          "Created tasks for HSIds: "
              + CoreUtils.hsIdCollectionToString(m_taskListsForHSIds.keySet()));
    }
  }
コード例 #29
0
ファイル: CSVFileReader.java プロジェクト: jmptrader/voltdb
  @Override
  public void run() {
    List<String> lineList;

    // if header option is true, check whether csv first line is valid
    if (m_config.header) {
      if (!checkHeader()) {
        m_log.error(
            "In the CSV file "
                + m_config.file
                + ", the header "
                + m_listReader.getUntokenizedRow()
                + " does not match "
                + "an existing column in the table "
                + m_config.table
                + ".");
        System.exit(-1);
      }
    }

    while ((m_config.limitrows-- > 0)) {
      if (m_errHandler.hasReachedErrorLimit()) {
        break;
      }

      try {
        // Initial setting of m_totalLineCount
        if (m_listReader.getLineNumber() == 0) {
          m_totalLineCount.set(m_config.skip);
        } else {
          m_totalLineCount.set(m_listReader.getLineNumber());
        }
        long st = System.nanoTime();
        lineList = m_listReader.read();
        long end = System.nanoTime();
        m_parsingTime += (end - st);
        if (lineList == null) {
          if (m_totalLineCount.get() > m_listReader.getLineNumber()) {
            m_totalLineCount.set(m_listReader.getLineNumber());
          }
          break;
        }
        m_totalRowCount.incrementAndGet();

        if (lineList.isEmpty()) {
          continue;
        }

        String[] lineValues = lineList.toArray(new String[0]);
        String lineCheckResult;
        String[] reorderValues = new String[m_columnCount];
        if ((lineCheckResult = checkparams_trimspace_reorder(lineValues, reorderValues)) != null) {
          final RowWithMetaData metaData =
              new RowWithMetaData(m_listReader.getUntokenizedRow(), m_totalLineCount.get() + 1);
          if (m_errHandler.handleError(metaData, null, lineCheckResult)) {
            break;
          }
          continue;
        }

        RowWithMetaData lineData =
            new RowWithMetaData(m_listReader.getUntokenizedRow(), m_listReader.getLineNumber());
        m_loader.insertRow(lineData, reorderValues);
      } catch (SuperCsvException e) {
        // Catch rows that can not be read by superCSV m_listReader.
        // e.g. items without quotes when strictquotes is enabled.
        final RowWithMetaData metaData =
            new RowWithMetaData(m_listReader.getUntokenizedRow(), m_totalLineCount.get() + 1);
        if (m_errHandler.handleError(metaData, null, e.getMessage())) {
          break;
        }
      } catch (IOException ex) {
        m_log.error("Failed to read CSV line from file: " + ex);
        break;
      } catch (InterruptedException e) {
        m_log.error("CSVLoader interrupted: " + e);
        break;
      }
    }

    // Now wait for processors to see endOfData and count down. After that drain to finish all
    // callbacks
    try {
      m_log.debug("Waiting for CSVDataLoader to finish.");
      m_loader.close();
      m_log.debug("CSVDataLoader Done.");
    } catch (Exception ex) {
      m_log.warn(
          "Stopped processing because of connection error. "
              + "A report will be generated with what we processed so far. Error: "
              + ex);
    }
  }