예제 #1
0
 public EventExecutorImpl() {
   /** 初始化执行器线程池 */
   System.out.println("init Thread Pool for " + Runtime.getRuntime().availableProcessors());
   this.executorService =
       Executors.newFixedThreadPool(
           Runtime.getRuntime().availableProcessors(),
           new EventThreadFactory(EventUtil.EventContainerName));
   //        this.executorService = Executors.newCachedThreadPool();
 }
예제 #2
0
/**
 * An abstraction for a Scheduler functionality - which can later be replaced by a light-weight
 * Thread
 */
final class Timer {
  private static final ScheduledThreadPoolExecutor executor =
      new ScheduledThreadPoolExecutor(Math.min(Runtime.getRuntime().availableProcessors(), 2));

  private Timer() {}

  /** @param runFrequency implemented only for TimeUnit granularity - Seconds */
  public static void schedule(Runnable runnable, Duration runFrequency, TimerType timerType) {
    switch (timerType) {
      case OneTimeRun:
        long seconds = runFrequency.getSeconds();
        if (seconds > 0) executor.schedule(runnable, seconds, TimeUnit.SECONDS);
        else executor.schedule(runnable, runFrequency.toMillis(), TimeUnit.MILLISECONDS);
        break;

      case RepeatRun:
        executor.scheduleWithFixedDelay(
            runnable, runFrequency.getSeconds(), runFrequency.getSeconds(), TimeUnit.SECONDS);
        break;

      default:
        throw new UnsupportedOperationException("Unsupported timer pattern.");
    }
  }
}
  private ExecutorService createCustomExecutorService(int poolSize, final String method) {
    int coreSize = Runtime.getRuntime().availableProcessors();
    if (poolSize < coreSize) {
      coreSize = poolSize;
    }
    ThreadFactory tf =
        new ThreadFactory() {
          public Thread newThread(Runnable r) {
            Thread t =
                new Thread(r, "thread created at ShardSqlSessionTemplate method [" + method + "]");
            t.setDaemon(true);
            return t;
          }
        };
    BlockingQueue<Runnable> queueToUse = new LinkedBlockingQueue<Runnable>();
    final ThreadPoolExecutor executor =
        new ThreadPoolExecutor(
            coreSize,
            poolSize,
            60,
            TimeUnit.SECONDS,
            queueToUse,
            tf,
            new ThreadPoolExecutor.CallerRunsPolicy());

    return executor;
  }
  private ExecutorService createExecutorForSpecificDataSource(ShardDataSourceSpec dataSourceSpec) {
    final String identity = dataSourceSpec.getIdentity();
    final ExecutorService executor =
        createCustomExecutorService(
            dataSourceSpec.getPoolSize(),
            "createExecutorForSpecificDataSource-" + identity + " data source");
    // 1. register executor for disposing explicitly
    internalExecutorServiceRegistry.add(executor);
    // 2. dispose executor implicitly
    Runtime.getRuntime()
        .addShutdownHook(
            new Thread() {
              @Override
              public void run() {
                if (executor == null) {
                  return;
                }

                try {
                  executor.shutdown();
                  executor.awaitTermination(5, TimeUnit.MINUTES);
                } catch (InterruptedException e) {
                  logger.warn("interrupted when shuting down the query executor:\n{}", e);
                }
              }
            });
    return executor;
  }
예제 #5
0
  /**
   * This is called when JPM runs in the background to start jobs
   *
   * @throws Exception
   */
  public void daemon() throws Exception {
    Runtime.getRuntime()
        .addShutdownHook(
            new Thread("Daemon shutdown") {
              public void run() {

                for (Service service : startedByDaemon) {
                  try {
                    reporter.error("Stopping " + service);
                    service.stop();
                    reporter.error("Stopped " + service);
                  } catch (Exception e) {
                    // Ignore
                  }
                }
              }
            });
    List<ServiceData> services = getServices();
    Map<String, ServiceData> map = new HashMap<String, ServiceData>();
    for (ServiceData d : services) {
      map.put(d.name, d);
    }
    List<ServiceData> start = new ArrayList<ServiceData>();
    Set<ServiceData> set = new HashSet<ServiceData>();
    for (ServiceData sd : services) {
      checkStartup(map, start, sd, set);
    }

    if (start.isEmpty()) reporter.warning("No services to start");

    for (ServiceData sd : start) {
      try {
        Service service = getService(sd.name);
        reporter.trace("Starting " + service);
        String result = service.start();
        if (result != null) reporter.error("Started error " + result);
        else startedByDaemon.add(service);
        reporter.trace("Started " + service);
      } catch (Exception e) {
        reporter.error("Cannot start daemon %s, due to %s", sd.name, e);
      }
    }

    while (true) {
      for (Service sd : startedByDaemon) {
        try {
          if (!sd.isRunning()) {
            reporter.error("Starting due to failure " + sd);
            String result = sd.start();
            if (result != null) reporter.error("Started error " + result);
          }
        } catch (Exception e) {
          reporter.error("Cannot start daemon %s, due to %s", sd, e);
        }
      }
      Thread.sleep(10000);
    }
  }
  /**
   * Calls garbage collector and wait.
   *
   * @throws Exception if any thread has interrupted the current thread while waiting.
   */
  private void gc() throws Exception {
    Runtime rt = Runtime.getRuntime();

    long freeMem0 = rt.freeMemory();
    long freeMem = Long.MAX_VALUE;

    int cnt = 0;

    while (freeMem0 < freeMem && cnt < GC_CALL_CNT) {
      System.gc();

      U.sleep(WAIT_TIME);

      cnt++;

      freeMem = freeMem0;
      freeMem0 = rt.freeMemory();
    }
  }
  @Test
  @Ignore
  public void testCHMAcquirePerf()
      throws IOException, ClassNotFoundException, IllegalAccessException, InstantiationException,
          InterruptedException {
    for (int runs : new int[] {10, 50, 250, 1000, 2500}) {
      System.out.println("Testing " + runs + " million entries");
      final long entries = runs * 1000 * 1000L;
      final ConcurrentMap<String, AtomicInteger> map =
          new ConcurrentHashMap<String, AtomicInteger>((int) (entries * 5 / 4), 1.0f, 1024);

      int procs = Runtime.getRuntime().availableProcessors();
      int threads = procs * 2;
      int count = runs > 500 ? runs > 1200 ? 1 : 2 : 3;
      final int independence = Math.min(procs, runs > 500 ? 8 : 4);
      for (int j = 0; j < count; j++) {
        long start = System.currentTimeMillis();
        ExecutorService es = Executors.newFixedThreadPool(procs);
        for (int i = 0; i < threads; i++) {
          final int t = i;
          es.submit(
              new Runnable() {
                @Override
                public void run() {
                  StringBuilder sb = new StringBuilder();
                  int next = 50 * 1000 * 1000;
                  // use a factor to give up to 10 digit numbers.
                  int factor = Math.max(1, (int) ((10 * 1000 * 1000 * 1000L - 1) / entries));
                  for (long i = t % independence; i < entries; i += independence) {
                    sb.setLength(0);
                    sb.append("u:");
                    sb.append(i * factor);
                    String key = sb.toString();
                    AtomicInteger count = map.get(key);
                    if (count == null) {
                      map.put(key, new AtomicInteger());
                      count = map.get(key);
                    }
                    count.getAndIncrement();
                    if (t == 0 && i == next) {
                      System.out.println(i);
                      next += 50 * 1000 * 1000;
                    }
                  }
                }
              });
        }
        es.shutdown();
        es.awaitTermination(10, TimeUnit.MINUTES);
        printStatus();
        long time = System.currentTimeMillis() - start;
        System.out.printf("Throughput %.1f M ops/sec%n", threads * entries / 1000.0 / time);
      }
    }
  }
예제 #8
0
  private void checkAndLaunchUpdate() {
    Log.i(LOG_FILE_NAME, "Checking for an update");

    int statusCode = 8; // UNEXPECTED_ERROR
    File baseUpdateDir = null;
    if (Build.VERSION.SDK_INT >= 8)
      baseUpdateDir = getExternalFilesDir(Environment.DIRECTORY_DOWNLOADS);
    else baseUpdateDir = new File(Environment.getExternalStorageDirectory().getPath(), "download");

    File updateDir = new File(new File(baseUpdateDir, "updates"), "0");

    File updateFile = new File(updateDir, "update.apk");
    File statusFile = new File(updateDir, "update.status");

    if (!statusFile.exists() || !readUpdateStatus(statusFile).equals("pending")) return;

    if (!updateFile.exists()) return;

    Log.i(LOG_FILE_NAME, "Update is available!");

    // Launch APK
    File updateFileToRun = new File(updateDir, getPackageName() + "-update.apk");
    try {
      if (updateFile.renameTo(updateFileToRun)) {
        String amCmd =
            "/system/bin/am start -a android.intent.action.VIEW "
                + "-n com.android.packageinstaller/.PackageInstallerActivity -d file://"
                + updateFileToRun.getPath();
        Log.i(LOG_FILE_NAME, amCmd);
        Runtime.getRuntime().exec(amCmd);
        statusCode = 0; // OK
      } else {
        Log.i(LOG_FILE_NAME, "Cannot rename the update file!");
        statusCode = 7; // WRITE_ERROR
      }
    } catch (Exception e) {
      Log.i(LOG_FILE_NAME, "error launching installer to update", e);
    }

    // Update the status file
    String status = statusCode == 0 ? "succeeded\n" : "failed: " + statusCode + "\n";

    OutputStream outStream;
    try {
      byte[] buf = status.getBytes("UTF-8");
      outStream = new FileOutputStream(statusFile);
      outStream.write(buf, 0, buf.length);
      outStream.close();
    } catch (Exception e) {
      Log.i(LOG_FILE_NAME, "error writing status file", e);
    }

    if (statusCode == 0) System.exit(0);
  }
예제 #9
0
 protected void die(ShellBoltException exception) {
   String processInfo =
       _process.getProcessInfoString() + _process.getProcessTerminationInfoString();
   _exception = new RuntimeException(processInfo, exception);
   LOG.error("Halting process: ShellBolt died.", _exception);
   _collector.reportError(_exception);
   if (_running
       || (exception.getCause()
           instanceof Error)) { // don't exit if not running, unless it is an Error
     Runtime.getRuntime().halt(11);
   }
 }
예제 #10
0
  /**
   * Create a pool with the specified number of threads.
   *
   * @param nThreads
   */
  public WorkerPool(int nThreads) {
    executorService = Executors.newFixedThreadPool(nThreads);

    Runtime.getRuntime()
        .addShutdownHook(
            new Thread() {
              @Override
              public void run() {
                shutdownAndAwaitTermination(10);
              }
            });
  }
  @Test
  @Ignore
  public void testAcquirePerf()
      throws IOException, ClassNotFoundException, IllegalAccessException, InstantiationException,
          InterruptedException {
    //        int runs = Integer.getInteger("runs", 10);
    for (int runs : new int[] {10, 50, 250, 1000, 2500}) {
      final long entries = runs * 1000 * 1000L;
      final SharedHashMap<CharSequence, LongValue> map = getSharedMap(entries * 4 / 3, 1024, 24);

      int procs = Runtime.getRuntime().availableProcessors();
      int threads = procs * 2;
      int count = runs > 500 ? runs > 1200 ? 1 : 2 : 3;
      final int independence = Math.min(procs, runs > 500 ? 8 : 4);
      for (int j = 0; j < count; j++) {
        long start = System.currentTimeMillis();
        ExecutorService es = Executors.newFixedThreadPool(procs);
        for (int i = 0; i < threads; i++) {
          final int t = i;
          es.submit(
              new Runnable() {
                @Override
                public void run() {
                  LongValue value = nativeLongValue();
                  StringBuilder sb = new StringBuilder();
                  int next = 50 * 1000 * 1000;
                  // use a factor to give up to 10 digit numbers.
                  int factor = Math.max(1, (int) ((10 * 1000 * 1000 * 1000L - 1) / entries));
                  for (long i = t % independence; i < entries; i += independence) {
                    sb.setLength(0);
                    sb.append("u:");
                    sb.append(i * factor);
                    map.acquireUsing(sb, value);
                    long n = value.addAtomicValue(1);
                    assert n >= 0 && n < 1000 : "Counter corrupted " + n;
                    if (t == 0 && i == next) {
                      System.out.println(i);
                      next += 50 * 1000 * 1000;
                    }
                  }
                }
              });
        }
        es.shutdown();
        es.awaitTermination(runs / 10 + 1, TimeUnit.MINUTES);
        long time = System.currentTimeMillis() - start;
        System.out.printf(
            "Throughput %.1f M ops/sec%n", threads * entries / independence / 1000.0 / time);
      }
      printStatus();
      map.close();
    }
  }
예제 #12
0
 public static boolean init() {
   synchronized (cudaEngines) {
     System.err.println("---------Initializing Cuda----------------");
     try {
       extractAndLoadNativeLibs();
       JCudaDriver.setExceptionsEnabled(true);
       JCudaDriver.cuInit(0);
       compileKernelsPtx();
       // Obtain the number of devices
       int deviceCountArray[] = {0};
       JCudaDriver.cuDeviceGetCount(deviceCountArray);
       availableDevicesNb = deviceCountArray[0];
       if (availableDevicesNb == 0) return false;
       availableDevicesNb = NB_OF_DEVICE_TO_USE; // TODO
       initialization = Executors.newCachedThreadPool();
       System.out.println("Found " + availableDevicesNb + " GPU devices");
       for (int i = 0 /*-NB_OF_DEVICE_TO_USE*/; i < availableDevicesNb; i++) {
         final int index = i;
         Future<?> initJob =
             initialization.submit(
                 new Runnable() {
                   public void run() {
                     System.err.println("Initializing device n°" + index);
                     cudaEngines.put(index, new CudaEngine(index));
                   }
                 });
         initJob.get();
         initialization.shutdown();
       }
     } catch (InterruptedException
         | ExecutionException
         | IOException
         | CudaException
         | UnsatisfiedLinkError e) {
       e.printStackTrace();
       System.err.println("---------Cannot initialize Cuda !!! ----------------");
       return false;
     }
     Runtime.getRuntime()
         .addShutdownHook(
             new Thread() {
               @Override
               public void run() {
                 CudaEngine.stop();
               }
             });
     System.out.println("---------Cuda Initialized----------------");
     return true;
   }
 }
예제 #13
0
  private static void build(Iterable<String> in) throws IOException, InterruptedException {
    final List<String> args = newArrayList(in);
    Collections.sort(args);

    final int threads = Runtime.getRuntime().availableProcessors();

    final ExecutorService ex = Executors.newFixedThreadPool(threads);

    final String base = "base";
    new WithVm("base").createIfNotPresent();

    for (String pkg : args)
      ex.submit(
          () -> {
            final WithVm newVm = new WithVm("fbuild-" + pkg, TimeUnit.MINUTES.toMillis(30));

            final File rbuild = new File("wip-" + pkg + ".rbuild");
            try {
              newVm.cloneFrom(base);
              newVm.start();
              newVm.inTee(rbuild, "apt-get", "-oAPT::Get::Only-Source=true", "source", pkg);
              newVm.inTee(rbuild, "apt-get", "build-dep", "-y", "--force-yes", pkg);
              newVm.inTee(rbuild, "ifdown", "eth0");
              final boolean success =
                  0
                      == newVm.inTee(
                          rbuild, "sh", "-c", "cd " + pkg + "-* && dpkg-buildpackage -us -uc");
              newVm.stopNow();
              if (success) {
                rbuild.renameTo(new File("success-" + pkg + ".rbuild"));
                newVm.destroy();
                System.out.println("success: " + pkg);
              } else {
                rbuild.renameTo(new File("failure-" + pkg + ".rbuild"));
                System.out.println("failure: " + pkg);
              }
            } catch (Exception e) {
              rbuild.renameTo(new File("error-" + pkg + ".rbuild"));
              System.err.println("build error: " + pkg);
              e.printStackTrace();
              newVm.stopNow();
            }
            return null;
          });

    ex.shutdown();
    ex.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
  }
예제 #14
0
  public static void main(final String[] args) throws Exception {

    final int length = (1 << 14);
    final ICircularLongsBuffer buffer = new UnsafeCircularLongsBuffer(length);

    final File file = new File("output.log");
    log.info("File " + file);
    file.delete();
    //		file.deleteOnExit();
    final FastLoggerImpl logger =
        new FastLoggerImpl(
            new ThreadFactory() {
              @Override
              public Thread newThread(final Runnable r) {
                return new Thread(r);
              }
            },
            buffer,
            WaitingStrategy.SPINNING,
            new RawFileWriter(file));

    logger.startDraining();

    final int workers = Runtime.getRuntime().availableProcessors() - 1;
    final ExecutorService workersPool = Executors.newFixedThreadPool(workers);

    for (int i = 0; i < workers; i++) {
      workersPool.submit(
          new Runnable() {
            @Override
            public void run() {
              try {
                for (int i = 0; i < 100000000; i++) {
                  logger.log("Message %f -- %d ").with(25.98 + i).with(100 - i).submit();
                }
              } catch (Exception e) {
                log.error("Error", e);
              }
            }
          });
    }

    workersPool.shutdown();
    workersPool.awaitTermination(1000, TimeUnit.SECONDS);
    log.info("Finished");
    System.exit(1);
  }
예제 #15
0
  /**
   * Create a pool with the specified number of threads.
   *
   * @param nThreads
   */
  public WorkerPool(int nThreads) {
    LOGGER.debug("initializing worker pool with %d threads", nThreads);

    executorService =
        LOGGER.isDebugEnabled()
            ? new MetricReportingExecutorService(LOGGER, nThreads)
            : Executors.newFixedThreadPool(nThreads);

    Runtime.getRuntime()
        .addShutdownHook(
            new Thread() {
              @Override
              public void run() {
                shutdownAndAwaitTermination(10);
              }
            });
  }
final class CarRace implements Runnable {
  private final Logger log = LogManager.getLogger(this.getClass().getName());
  private final int numberOfCars = Runtime.getRuntime().availableProcessors();
  private final BlockingQueue<Car> fleet;
  private final int trackLength;
  private final ExecutorService pool;
  private final Random random;
  private CyclicBarrier barrier;

  public CarRace() {
    fleet = new LinkedBlockingQueue<Car>(numberOfCars);
    trackLength = 20000;
    pool = Executors.newFixedThreadPool(numberOfCars);
    random = new Random();
  }

  public int getTrackLength() {
    return this.trackLength;
  }

  public CyclicBarrier getBarrier() {
    return this.barrier;
  }

  public BlockingQueue<Car> getFleet() {
    return this.fleet;
  }

  public void start() throws InterruptedException {
    barrier = new CyclicBarrier(numberOfCars, this);
    for (int i = 0; i < numberOfCars; i++) {
      pool.execute(new Car("Car" + i, 100 + random.nextInt(100), this));
    }
    pool.awaitTermination(300, TimeUnit.MILLISECONDS);
  }

  public void report() {
    this.log.info("fleet: " + Ordering.natural().immutableSortedCopy(fleet));
  }

  @Override
  public void run() {
    report();
  }
}
예제 #17
0
  public DLDFSolver(
      int[] s, int[] learning_startfactors, boolean singleThreaded, boolean useBacktracking) {
    log_info("Heuristic weights: %s", Arrays.deepToString(choicefactors));
    log_info("Reported number of processors: %d", Runtime.getRuntime().availableProcessors());
    nThreads = (singleThreaded || THREAD_COUNT == 1) ? 1 : THREAD_COUNT;
    log_info("No. of threads to be used: %d\n", nThreads);
    this.useBacktracking = useBacktracking;
    log_info("Use backtracking?: %s", useBacktracking ? "Yes" : "No");

    this.tileSequence = new int[s.length];
    System.arraycopy(s, 0, this.tileSequence, 0, s.length);

    if (learning_startfactors != null) {
      if (learning_startfactors.length != learning_starts.length) {
        throw new IllegalArgumentException("Invalid learning factor size");
      }
      System.arraycopy(learning_startfactors, 0, learning_starts, 0, learning_starts.length);
    }
  }
예제 #18
0
  /**
   * Checks if address can be reached using one argument InetAddress.isReachable() version or ping
   * command if failed.
   *
   * @param addr Address to check.
   * @param reachTimeout Timeout for the check.
   * @return {@code True} if address is reachable.
   */
  public static boolean reachableByPing(InetAddress addr, int reachTimeout) {
    try {
      if (addr.isReachable(reachTimeout)) return true;

      String cmd = String.format("ping -%s 1 %s", U.isWindows() ? "n" : "c", addr.getHostAddress());

      Process myProc = Runtime.getRuntime().exec(cmd);

      myProc.waitFor();

      return myProc.exitValue() == 0;
    } catch (IOException ignore) {
      return false;
    } catch (InterruptedException ignored) {
      Thread.currentThread().interrupt();

      return false;
    }
  }
예제 #19
0
 static {
   int nt = Runtime.getRuntime().availableProcessors();
   nt = nt <= 1 ? 1 : nt > 4 ? 4 : nt;
   THREAD_COUNT = nt;
 }
예제 #20
0
/**
 * A FullPrunedBlockChain works in conjunction with a {@link FullPrunedBlockStore} to verify all the
 * rules of the Bitcoin system, with the downside being a larg cost in system resources. Fully
 * verifying means all unspent transaction outputs are stored. Once a transaction output is spent
 * and that spend is buried deep enough, the data related to it is deleted to ensure disk space
 * usage doesn't grow forever. For this reason a pruning node cannot serve the full block chain to
 * other clients, but it nevertheless provides the same security guarantees as a regular Satoshi
 * client does.
 */
public class FullPrunedBlockChain extends AbstractBlockChain {
  private static final Logger log = LoggerFactory.getLogger(FullPrunedBlockChain.class);

  /** Keeps a map of block hashes to StoredBlocks. */
  protected final FullPrunedBlockStore blockStore;

  // Whether or not to execute scriptPubKeys before accepting a transaction (i.e. check signatures).
  private boolean runScripts = true;

  /**
   * Constructs a BlockChain connected to the given wallet and store. To obtain a {@link Wallet} you
   * can construct one from scratch, or you can deserialize a saved wallet from disk using {@link
   * Wallet#loadFromFile(java.io.File)}
   */
  public FullPrunedBlockChain(
      NetworkParameters params, Wallet wallet, FullPrunedBlockStore blockStore)
      throws BlockStoreException {
    this(params, new ArrayList<BlockChainListener>(), blockStore);
    if (wallet != null) addWallet(wallet);
  }

  /**
   * Constructs a BlockChain that has no wallet at all. This is helpful when you don't actually care
   * about sending and receiving coins but rather, just want to explore the network data structures.
   */
  public FullPrunedBlockChain(NetworkParameters params, FullPrunedBlockStore blockStore)
      throws BlockStoreException {
    this(params, new ArrayList<BlockChainListener>(), blockStore);
  }

  /** Constructs a BlockChain connected to the given list of wallets and a store. */
  public FullPrunedBlockChain(
      NetworkParameters params, List<BlockChainListener> listeners, FullPrunedBlockStore blockStore)
      throws BlockStoreException {
    super(params, listeners, blockStore);
    this.blockStore = blockStore;
    // Ignore upgrading for now
    this.chainHead = blockStore.getVerifiedChainHead();
  }

  @Override
  protected StoredBlock addToBlockStore(
      StoredBlock storedPrev, Block header, TransactionOutputChanges txOutChanges)
      throws BlockStoreException, VerificationException {
    StoredBlock newBlock = storedPrev.build(header);
    blockStore.put(newBlock, new StoredUndoableBlock(newBlock.getHeader().getHash(), txOutChanges));
    return newBlock;
  }

  @Override
  protected StoredBlock addToBlockStore(StoredBlock storedPrev, Block block)
      throws BlockStoreException, VerificationException {
    StoredBlock newBlock = storedPrev.build(block);
    blockStore.put(
        newBlock, new StoredUndoableBlock(newBlock.getHeader().getHash(), block.transactions));
    return newBlock;
  }

  @Override
  protected void rollbackBlockStore(int height) throws BlockStoreException {
    throw new BlockStoreException("Unsupported");
  }

  @Override
  protected boolean shouldVerifyTransactions() {
    return true;
  }

  /**
   * Whether or not to run scripts whilst accepting blocks (i.e. checking signatures, for most
   * transactions). If you're accepting data from an untrusted node, such as one found via the P2P
   * network, this should be set to true (which is the default). If you're downloading a chain from
   * a node you control, script execution is redundant because you know the connected node won't
   * relay bad data to you. In that case it's safe to set this to false and obtain a significant
   * speedup.
   */
  public void setRunScripts(boolean value) {
    this.runScripts = value;
  }

  // TODO: Remove lots of duplicated code in the two connectTransactions

  // TODO: execute in order of largest transaction (by input count) first
  ExecutorService scriptVerificationExecutor =
      Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());

  /** A job submitted to the executor which verifies signatures. */
  private static class Verifier implements Callable<VerificationException> {
    final Transaction tx;
    final List<Script> prevOutScripts;
    final boolean enforcePayToScriptHash;

    public Verifier(
        final Transaction tx,
        final List<Script> prevOutScripts,
        final boolean enforcePayToScriptHash) {
      this.tx = tx;
      this.prevOutScripts = prevOutScripts;
      this.enforcePayToScriptHash = enforcePayToScriptHash;
    }

    @Nullable
    @Override
    public VerificationException call() throws Exception {
      try {
        ListIterator<Script> prevOutIt = prevOutScripts.listIterator();
        for (int index = 0; index < tx.getInputs().size(); index++) {
          tx.getInputs()
              .get(index)
              .getScriptSig()
              .correctlySpends(tx, index, prevOutIt.next(), enforcePayToScriptHash);
        }
      } catch (VerificationException e) {
        return e;
      }
      return null;
    }
  }

  @Override
  protected TransactionOutputChanges connectTransactions(int height, Block block)
      throws VerificationException, BlockStoreException {
    checkState(lock.isHeldByCurrentThread());
    if (block.transactions == null)
      throw new RuntimeException(
          "connectTransactions called with Block that didn't have transactions!");
    if (!params.passesCheckpoint(height, block.getHash()))
      throw new VerificationException("Block failed checkpoint lockin at " + height);

    blockStore.beginDatabaseBatchWrite();

    LinkedList<StoredTransactionOutput> txOutsSpent = new LinkedList<StoredTransactionOutput>();
    LinkedList<StoredTransactionOutput> txOutsCreated = new LinkedList<StoredTransactionOutput>();
    long sigOps = 0;
    final boolean enforcePayToScriptHash =
        block.getTimeSeconds() >= NetworkParameters.BIP16_ENFORCE_TIME;

    if (scriptVerificationExecutor.isShutdown())
      scriptVerificationExecutor =
          Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());

    List<Future<VerificationException>> listScriptVerificationResults =
        new ArrayList<Future<VerificationException>>(block.transactions.size());
    try {
      if (!params.isCheckpoint(height)) {
        // BIP30 violator blocks are ones that contain a duplicated transaction. They are all in the
        // checkpoints list and we therefore only check non-checkpoints for duplicated transactions
        // here. See the
        // BIP30 document for more details on this:
        // https://github.com/bitcoin/bips/blob/master/bip-0030.mediawiki
        for (Transaction tx : block.transactions) {
          Sha256Hash hash = tx.getHash();
          // If we already have unspent outputs for this hash, we saw the tx already. Either the
          // block is
          // being added twice (bug) or the block is a BIP30 violator.
          if (blockStore.hasUnspentOutputs(hash, tx.getOutputs().size()))
            throw new VerificationException("Block failed BIP30 test!");
          if (enforcePayToScriptHash) // We already check non-BIP16 sigops in
            // Block.verifyTransactions(true)
            sigOps += tx.getSigOpCount();
        }
      }
      Coin totalFees = Coin.ZERO;
      Coin coinbaseValue = null;
      for (final Transaction tx : block.transactions) {
        boolean isCoinBase = tx.isCoinBase();
        Coin valueIn = Coin.ZERO;
        Coin valueOut = Coin.ZERO;
        final List<Script> prevOutScripts = new LinkedList<Script>();
        if (!isCoinBase) {
          // For each input of the transaction remove the corresponding output from the set of
          // unspent
          // outputs.
          for (int index = 0; index < tx.getInputs().size(); index++) {
            TransactionInput in = tx.getInputs().get(index);
            StoredTransactionOutput prevOut =
                blockStore.getTransactionOutput(
                    in.getOutpoint().getHash(), in.getOutpoint().getIndex());
            if (prevOut == null)
              throw new VerificationException(
                  "Attempted to spend a non-existent or already spent output!");
            // Coinbases can't be spent until they mature, to avoid re-orgs destroying entire
            // transaction
            // chains. The assumption is there will ~never be re-orgs deeper than the spendable
            // coinbase
            // chain depth.
            if (height - prevOut.getHeight() < params.getSpendableCoinbaseDepth())
              throw new VerificationException(
                  "Tried to spend coinbase at depth " + (height - prevOut.getHeight()));
            // TODO: Check we're not spending the genesis transaction here. Satoshis code won't
            // allow it.
            valueIn = valueIn.add(prevOut.getValue());
            if (enforcePayToScriptHash) {
              if (new Script(prevOut.getScriptBytes()).isPayToScriptHash())
                sigOps += Script.getP2SHSigOpCount(in.getScriptBytes());
              if (sigOps > Block.MAX_BLOCK_SIGOPS)
                throw new VerificationException("Too many P2SH SigOps in block");
            }

            prevOutScripts.add(new Script(prevOut.getScriptBytes()));

            // in.getScriptSig().correctlySpends(tx, index, new Script(params,
            // prevOut.getScriptBytes(), 0, prevOut.getScriptBytes().length));

            blockStore.removeUnspentTransactionOutput(prevOut);
            txOutsSpent.add(prevOut);
          }
        }
        Sha256Hash hash = tx.getHash();
        for (TransactionOutput out : tx.getOutputs()) {
          valueOut = valueOut.add(out.getValue());
          // For each output, add it to the set of unspent outputs so it can be consumed in future.
          StoredTransactionOutput newOut =
              new StoredTransactionOutput(
                  hash, out.getIndex(), out.getValue(), height, isCoinBase, out.getScriptBytes());
          blockStore.addUnspentTransactionOutput(newOut);
          txOutsCreated.add(newOut);
        }
        // All values were already checked for being non-negative (as it is verified in
        // Transaction.verify())
        // but we check again here just for defence in depth. Transactions with zero output value
        // are OK.
        if (valueOut.signum() < 0 || valueOut.compareTo(NetworkParameters.MAX_MONEY) > 0)
          throw new VerificationException("Transaction output value out of range");
        if (isCoinBase) {
          coinbaseValue = valueOut;
        } else {
          if (valueIn.compareTo(valueOut) < 0 || valueIn.compareTo(NetworkParameters.MAX_MONEY) > 0)
            throw new VerificationException("Transaction input value out of range");
          totalFees = totalFees.add(valueIn.subtract(valueOut));
        }

        if (!isCoinBase && runScripts) {
          // Because correctlySpends modifies transactions, this must come after we are done with tx
          FutureTask<VerificationException> future =
              new FutureTask<VerificationException>(
                  new Verifier(tx, prevOutScripts, enforcePayToScriptHash));
          scriptVerificationExecutor.execute(future);
          listScriptVerificationResults.add(future);
        }
      }
      if (totalFees.compareTo(NetworkParameters.MAX_MONEY) > 0
          || block.getBlockInflation(height).add(totalFees).compareTo(coinbaseValue) < 0)
        throw new VerificationException("Transaction fees out of range");
      for (Future<VerificationException> future : listScriptVerificationResults) {
        VerificationException e;
        try {
          e = future.get();
        } catch (InterruptedException thrownE) {
          throw new RuntimeException(thrownE); // Shouldn't happen
        } catch (ExecutionException thrownE) {
          log.error("Script.correctlySpends threw a non-normal exception: " + thrownE.getCause());
          throw new VerificationException(
              "Bug in Script.correctlySpends, likely script malformed in some new and interesting way.",
              thrownE);
        }
        if (e != null) throw e;
      }
    } catch (VerificationException e) {
      scriptVerificationExecutor.shutdownNow();
      blockStore.abortDatabaseBatchWrite();
      throw e;
    } catch (BlockStoreException e) {
      scriptVerificationExecutor.shutdownNow();
      blockStore.abortDatabaseBatchWrite();
      throw e;
    }
    return new TransactionOutputChanges(txOutsCreated, txOutsSpent);
  }

  @Override
  /** Used during reorgs to connect a block previously on a fork */
  protected synchronized TransactionOutputChanges connectTransactions(StoredBlock newBlock)
      throws VerificationException, BlockStoreException, PrunedException {
    checkState(lock.isHeldByCurrentThread());
    if (!params.passesCheckpoint(newBlock.getHeight(), newBlock.getHeader().getHash()))
      throw new VerificationException("Block failed checkpoint lockin at " + newBlock.getHeight());

    blockStore.beginDatabaseBatchWrite();
    StoredUndoableBlock block = blockStore.getUndoBlock(newBlock.getHeader().getHash());
    if (block == null) {
      // We're trying to re-org too deep and the data needed has been deleted.
      blockStore.abortDatabaseBatchWrite();
      throw new PrunedException(newBlock.getHeader().getHash());
    }
    TransactionOutputChanges txOutChanges;
    try {
      List<Transaction> transactions = block.getTransactions();
      if (transactions != null) {
        LinkedList<StoredTransactionOutput> txOutsSpent = new LinkedList<StoredTransactionOutput>();
        LinkedList<StoredTransactionOutput> txOutsCreated =
            new LinkedList<StoredTransactionOutput>();
        long sigOps = 0;
        final boolean enforcePayToScriptHash =
            newBlock.getHeader().getTimeSeconds() >= NetworkParameters.BIP16_ENFORCE_TIME;
        if (!params.isCheckpoint(newBlock.getHeight())) {
          for (Transaction tx : transactions) {
            Sha256Hash hash = tx.getHash();
            if (blockStore.hasUnspentOutputs(hash, tx.getOutputs().size()))
              throw new VerificationException("Block failed BIP30 test!");
          }
        }
        Coin totalFees = Coin.ZERO;
        Coin coinbaseValue = null;

        if (scriptVerificationExecutor.isShutdown())
          scriptVerificationExecutor =
              Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
        List<Future<VerificationException>> listScriptVerificationResults =
            new ArrayList<Future<VerificationException>>(transactions.size());
        for (final Transaction tx : transactions) {
          boolean isCoinBase = tx.isCoinBase();
          Coin valueIn = Coin.ZERO;
          Coin valueOut = Coin.ZERO;
          final List<Script> prevOutScripts = new LinkedList<Script>();
          if (!isCoinBase) {
            for (int index = 0; index < tx.getInputs().size(); index++) {
              final TransactionInput in = tx.getInputs().get(index);
              final StoredTransactionOutput prevOut =
                  blockStore.getTransactionOutput(
                      in.getOutpoint().getHash(), in.getOutpoint().getIndex());
              if (prevOut == null)
                throw new VerificationException(
                    "Attempted spend of a non-existent or already spent output!");
              if (newBlock.getHeight() - prevOut.getHeight() < params.getSpendableCoinbaseDepth())
                throw new VerificationException(
                    "Tried to spend coinbase at depth "
                        + (newBlock.getHeight() - prevOut.getHeight()));
              valueIn = valueIn.add(prevOut.getValue());
              if (enforcePayToScriptHash) {
                Script script = new Script(prevOut.getScriptBytes());
                if (script.isPayToScriptHash())
                  sigOps += Script.getP2SHSigOpCount(in.getScriptBytes());
                if (sigOps > Block.MAX_BLOCK_SIGOPS)
                  throw new VerificationException("Too many P2SH SigOps in block");
              }

              prevOutScripts.add(new Script(prevOut.getScriptBytes()));

              blockStore.removeUnspentTransactionOutput(prevOut);
              txOutsSpent.add(prevOut);
            }
          }
          Sha256Hash hash = tx.getHash();
          for (TransactionOutput out : tx.getOutputs()) {
            valueOut = valueOut.add(out.getValue());
            StoredTransactionOutput newOut =
                new StoredTransactionOutput(
                    hash,
                    out.getIndex(),
                    out.getValue(),
                    newBlock.getHeight(),
                    isCoinBase,
                    out.getScriptBytes());
            blockStore.addUnspentTransactionOutput(newOut);
            txOutsCreated.add(newOut);
          }
          // All values were already checked for being non-negative (as it is verified in
          // Transaction.verify())
          // but we check again here just for defence in depth. Transactions with zero output value
          // are OK.
          if (valueOut.signum() < 0 || valueOut.compareTo(NetworkParameters.MAX_MONEY) > 0)
            throw new VerificationException("Transaction output value out of range");
          if (isCoinBase) {
            coinbaseValue = valueOut;
          } else {
            if (valueIn.compareTo(valueOut) < 0
                || valueIn.compareTo(NetworkParameters.MAX_MONEY) > 0)
              throw new VerificationException("Transaction input value out of range");
            totalFees = totalFees.add(valueIn.subtract(valueOut));
          }

          if (!isCoinBase) {
            // Because correctlySpends modifies transactions, this must come after we are done with
            // tx
            FutureTask<VerificationException> future =
                new FutureTask<VerificationException>(
                    new Verifier(tx, prevOutScripts, enforcePayToScriptHash));
            scriptVerificationExecutor.execute(future);
            listScriptVerificationResults.add(future);
          }
        }
        if (totalFees.compareTo(NetworkParameters.MAX_MONEY) > 0
            || newBlock
                    .getHeader()
                    .getBlockInflation(newBlock.getHeight())
                    .add(totalFees)
                    .compareTo(coinbaseValue)
                < 0) throw new VerificationException("Transaction fees out of range");
        txOutChanges = new TransactionOutputChanges(txOutsCreated, txOutsSpent);
        for (Future<VerificationException> future : listScriptVerificationResults) {
          VerificationException e;
          try {
            e = future.get();
          } catch (InterruptedException thrownE) {
            throw new RuntimeException(thrownE); // Shouldn't happen
          } catch (ExecutionException thrownE) {
            log.error("Script.correctlySpends threw a non-normal exception: " + thrownE.getCause());
            throw new VerificationException(
                "Bug in Script.correctlySpends, likely script malformed in some new and interesting way.",
                thrownE);
          }
          if (e != null) throw e;
        }
      } else {
        txOutChanges = block.getTxOutChanges();
        if (!params.isCheckpoint(newBlock.getHeight()))
          for (StoredTransactionOutput out : txOutChanges.txOutsCreated) {
            Sha256Hash hash = out.getHash();
            if (blockStore.getTransactionOutput(hash, out.getIndex()) != null)
              throw new VerificationException("Block failed BIP30 test!");
          }
        for (StoredTransactionOutput out : txOutChanges.txOutsCreated)
          blockStore.addUnspentTransactionOutput(out);
        for (StoredTransactionOutput out : txOutChanges.txOutsSpent)
          blockStore.removeUnspentTransactionOutput(out);
      }
    } catch (VerificationException e) {
      scriptVerificationExecutor.shutdownNow();
      blockStore.abortDatabaseBatchWrite();
      throw e;
    } catch (BlockStoreException e) {
      scriptVerificationExecutor.shutdownNow();
      blockStore.abortDatabaseBatchWrite();
      throw e;
    }
    return txOutChanges;
  }

  /**
   * This is broken for blocks that do not pass BIP30, so all BIP30-failing blocks which are allowed
   * to fail BIP30 must be checkpointed.
   */
  @Override
  protected void disconnectTransactions(StoredBlock oldBlock)
      throws PrunedException, BlockStoreException {
    checkState(lock.isHeldByCurrentThread());
    blockStore.beginDatabaseBatchWrite();
    try {
      StoredUndoableBlock undoBlock = blockStore.getUndoBlock(oldBlock.getHeader().getHash());
      if (undoBlock == null) throw new PrunedException(oldBlock.getHeader().getHash());
      TransactionOutputChanges txOutChanges = undoBlock.getTxOutChanges();
      for (StoredTransactionOutput out : txOutChanges.txOutsSpent)
        blockStore.addUnspentTransactionOutput(out);
      for (StoredTransactionOutput out : txOutChanges.txOutsCreated)
        blockStore.removeUnspentTransactionOutput(out);
    } catch (PrunedException e) {
      blockStore.abortDatabaseBatchWrite();
      throw e;
    } catch (BlockStoreException e) {
      blockStore.abortDatabaseBatchWrite();
      throw e;
    }
  }

  @Override
  protected void doSetChainHead(StoredBlock chainHead) throws BlockStoreException {
    checkState(lock.isHeldByCurrentThread());
    blockStore.setVerifiedChainHead(chainHead);
    blockStore.commitDatabaseBatchWrite();
  }

  @Override
  protected void notSettingChainHead() throws BlockStoreException {
    blockStore.abortDatabaseBatchWrite();
  }

  @Override
  protected StoredBlock getStoredBlockInCurrentScope(Sha256Hash hash) throws BlockStoreException {
    checkState(lock.isHeldByCurrentThread());
    return blockStore.getOnceUndoableStoredBlock(hash);
  }
}
예제 #21
0
  @Override
  /** Used during reorgs to connect a block previously on a fork */
  protected synchronized TransactionOutputChanges connectTransactions(StoredBlock newBlock)
      throws VerificationException, BlockStoreException, PrunedException {
    checkState(lock.isHeldByCurrentThread());
    if (!params.passesCheckpoint(newBlock.getHeight(), newBlock.getHeader().getHash()))
      throw new VerificationException("Block failed checkpoint lockin at " + newBlock.getHeight());

    blockStore.beginDatabaseBatchWrite();
    StoredUndoableBlock block = blockStore.getUndoBlock(newBlock.getHeader().getHash());
    if (block == null) {
      // We're trying to re-org too deep and the data needed has been deleted.
      blockStore.abortDatabaseBatchWrite();
      throw new PrunedException(newBlock.getHeader().getHash());
    }
    TransactionOutputChanges txOutChanges;
    try {
      List<Transaction> transactions = block.getTransactions();
      if (transactions != null) {
        LinkedList<StoredTransactionOutput> txOutsSpent = new LinkedList<StoredTransactionOutput>();
        LinkedList<StoredTransactionOutput> txOutsCreated =
            new LinkedList<StoredTransactionOutput>();
        long sigOps = 0;
        final boolean enforcePayToScriptHash =
            newBlock.getHeader().getTimeSeconds() >= NetworkParameters.BIP16_ENFORCE_TIME;
        if (!params.isCheckpoint(newBlock.getHeight())) {
          for (Transaction tx : transactions) {
            Sha256Hash hash = tx.getHash();
            if (blockStore.hasUnspentOutputs(hash, tx.getOutputs().size()))
              throw new VerificationException("Block failed BIP30 test!");
          }
        }
        Coin totalFees = Coin.ZERO;
        Coin coinbaseValue = null;

        if (scriptVerificationExecutor.isShutdown())
          scriptVerificationExecutor =
              Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
        List<Future<VerificationException>> listScriptVerificationResults =
            new ArrayList<Future<VerificationException>>(transactions.size());
        for (final Transaction tx : transactions) {
          boolean isCoinBase = tx.isCoinBase();
          Coin valueIn = Coin.ZERO;
          Coin valueOut = Coin.ZERO;
          final List<Script> prevOutScripts = new LinkedList<Script>();
          if (!isCoinBase) {
            for (int index = 0; index < tx.getInputs().size(); index++) {
              final TransactionInput in = tx.getInputs().get(index);
              final StoredTransactionOutput prevOut =
                  blockStore.getTransactionOutput(
                      in.getOutpoint().getHash(), in.getOutpoint().getIndex());
              if (prevOut == null)
                throw new VerificationException(
                    "Attempted spend of a non-existent or already spent output!");
              if (newBlock.getHeight() - prevOut.getHeight() < params.getSpendableCoinbaseDepth())
                throw new VerificationException(
                    "Tried to spend coinbase at depth "
                        + (newBlock.getHeight() - prevOut.getHeight()));
              valueIn = valueIn.add(prevOut.getValue());
              if (enforcePayToScriptHash) {
                Script script = new Script(prevOut.getScriptBytes());
                if (script.isPayToScriptHash())
                  sigOps += Script.getP2SHSigOpCount(in.getScriptBytes());
                if (sigOps > Block.MAX_BLOCK_SIGOPS)
                  throw new VerificationException("Too many P2SH SigOps in block");
              }

              prevOutScripts.add(new Script(prevOut.getScriptBytes()));

              blockStore.removeUnspentTransactionOutput(prevOut);
              txOutsSpent.add(prevOut);
            }
          }
          Sha256Hash hash = tx.getHash();
          for (TransactionOutput out : tx.getOutputs()) {
            valueOut = valueOut.add(out.getValue());
            StoredTransactionOutput newOut =
                new StoredTransactionOutput(
                    hash,
                    out.getIndex(),
                    out.getValue(),
                    newBlock.getHeight(),
                    isCoinBase,
                    out.getScriptBytes());
            blockStore.addUnspentTransactionOutput(newOut);
            txOutsCreated.add(newOut);
          }
          // All values were already checked for being non-negative (as it is verified in
          // Transaction.verify())
          // but we check again here just for defence in depth. Transactions with zero output value
          // are OK.
          if (valueOut.signum() < 0 || valueOut.compareTo(NetworkParameters.MAX_MONEY) > 0)
            throw new VerificationException("Transaction output value out of range");
          if (isCoinBase) {
            coinbaseValue = valueOut;
          } else {
            if (valueIn.compareTo(valueOut) < 0
                || valueIn.compareTo(NetworkParameters.MAX_MONEY) > 0)
              throw new VerificationException("Transaction input value out of range");
            totalFees = totalFees.add(valueIn.subtract(valueOut));
          }

          if (!isCoinBase) {
            // Because correctlySpends modifies transactions, this must come after we are done with
            // tx
            FutureTask<VerificationException> future =
                new FutureTask<VerificationException>(
                    new Verifier(tx, prevOutScripts, enforcePayToScriptHash));
            scriptVerificationExecutor.execute(future);
            listScriptVerificationResults.add(future);
          }
        }
        if (totalFees.compareTo(NetworkParameters.MAX_MONEY) > 0
            || newBlock
                    .getHeader()
                    .getBlockInflation(newBlock.getHeight())
                    .add(totalFees)
                    .compareTo(coinbaseValue)
                < 0) throw new VerificationException("Transaction fees out of range");
        txOutChanges = new TransactionOutputChanges(txOutsCreated, txOutsSpent);
        for (Future<VerificationException> future : listScriptVerificationResults) {
          VerificationException e;
          try {
            e = future.get();
          } catch (InterruptedException thrownE) {
            throw new RuntimeException(thrownE); // Shouldn't happen
          } catch (ExecutionException thrownE) {
            log.error("Script.correctlySpends threw a non-normal exception: " + thrownE.getCause());
            throw new VerificationException(
                "Bug in Script.correctlySpends, likely script malformed in some new and interesting way.",
                thrownE);
          }
          if (e != null) throw e;
        }
      } else {
        txOutChanges = block.getTxOutChanges();
        if (!params.isCheckpoint(newBlock.getHeight()))
          for (StoredTransactionOutput out : txOutChanges.txOutsCreated) {
            Sha256Hash hash = out.getHash();
            if (blockStore.getTransactionOutput(hash, out.getIndex()) != null)
              throw new VerificationException("Block failed BIP30 test!");
          }
        for (StoredTransactionOutput out : txOutChanges.txOutsCreated)
          blockStore.addUnspentTransactionOutput(out);
        for (StoredTransactionOutput out : txOutChanges.txOutsSpent)
          blockStore.removeUnspentTransactionOutput(out);
      }
    } catch (VerificationException e) {
      scriptVerificationExecutor.shutdownNow();
      blockStore.abortDatabaseBatchWrite();
      throw e;
    } catch (BlockStoreException e) {
      scriptVerificationExecutor.shutdownNow();
      blockStore.abortDatabaseBatchWrite();
      throw e;
    }
    return txOutChanges;
  }
예제 #22
0
  @Override
  protected TransactionOutputChanges connectTransactions(int height, Block block)
      throws VerificationException, BlockStoreException {
    checkState(lock.isHeldByCurrentThread());
    if (block.transactions == null)
      throw new RuntimeException(
          "connectTransactions called with Block that didn't have transactions!");
    if (!params.passesCheckpoint(height, block.getHash()))
      throw new VerificationException("Block failed checkpoint lockin at " + height);

    blockStore.beginDatabaseBatchWrite();

    LinkedList<StoredTransactionOutput> txOutsSpent = new LinkedList<StoredTransactionOutput>();
    LinkedList<StoredTransactionOutput> txOutsCreated = new LinkedList<StoredTransactionOutput>();
    long sigOps = 0;
    final boolean enforcePayToScriptHash =
        block.getTimeSeconds() >= NetworkParameters.BIP16_ENFORCE_TIME;

    if (scriptVerificationExecutor.isShutdown())
      scriptVerificationExecutor =
          Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());

    List<Future<VerificationException>> listScriptVerificationResults =
        new ArrayList<Future<VerificationException>>(block.transactions.size());
    try {
      if (!params.isCheckpoint(height)) {
        // BIP30 violator blocks are ones that contain a duplicated transaction. They are all in the
        // checkpoints list and we therefore only check non-checkpoints for duplicated transactions
        // here. See the
        // BIP30 document for more details on this:
        // https://github.com/bitcoin/bips/blob/master/bip-0030.mediawiki
        for (Transaction tx : block.transactions) {
          Sha256Hash hash = tx.getHash();
          // If we already have unspent outputs for this hash, we saw the tx already. Either the
          // block is
          // being added twice (bug) or the block is a BIP30 violator.
          if (blockStore.hasUnspentOutputs(hash, tx.getOutputs().size()))
            throw new VerificationException("Block failed BIP30 test!");
          if (enforcePayToScriptHash) // We already check non-BIP16 sigops in
            // Block.verifyTransactions(true)
            sigOps += tx.getSigOpCount();
        }
      }
      Coin totalFees = Coin.ZERO;
      Coin coinbaseValue = null;
      for (final Transaction tx : block.transactions) {
        boolean isCoinBase = tx.isCoinBase();
        Coin valueIn = Coin.ZERO;
        Coin valueOut = Coin.ZERO;
        final List<Script> prevOutScripts = new LinkedList<Script>();
        if (!isCoinBase) {
          // For each input of the transaction remove the corresponding output from the set of
          // unspent
          // outputs.
          for (int index = 0; index < tx.getInputs().size(); index++) {
            TransactionInput in = tx.getInputs().get(index);
            StoredTransactionOutput prevOut =
                blockStore.getTransactionOutput(
                    in.getOutpoint().getHash(), in.getOutpoint().getIndex());
            if (prevOut == null)
              throw new VerificationException(
                  "Attempted to spend a non-existent or already spent output!");
            // Coinbases can't be spent until they mature, to avoid re-orgs destroying entire
            // transaction
            // chains. The assumption is there will ~never be re-orgs deeper than the spendable
            // coinbase
            // chain depth.
            if (height - prevOut.getHeight() < params.getSpendableCoinbaseDepth())
              throw new VerificationException(
                  "Tried to spend coinbase at depth " + (height - prevOut.getHeight()));
            // TODO: Check we're not spending the genesis transaction here. Satoshis code won't
            // allow it.
            valueIn = valueIn.add(prevOut.getValue());
            if (enforcePayToScriptHash) {
              if (new Script(prevOut.getScriptBytes()).isPayToScriptHash())
                sigOps += Script.getP2SHSigOpCount(in.getScriptBytes());
              if (sigOps > Block.MAX_BLOCK_SIGOPS)
                throw new VerificationException("Too many P2SH SigOps in block");
            }

            prevOutScripts.add(new Script(prevOut.getScriptBytes()));

            // in.getScriptSig().correctlySpends(tx, index, new Script(params,
            // prevOut.getScriptBytes(), 0, prevOut.getScriptBytes().length));

            blockStore.removeUnspentTransactionOutput(prevOut);
            txOutsSpent.add(prevOut);
          }
        }
        Sha256Hash hash = tx.getHash();
        for (TransactionOutput out : tx.getOutputs()) {
          valueOut = valueOut.add(out.getValue());
          // For each output, add it to the set of unspent outputs so it can be consumed in future.
          StoredTransactionOutput newOut =
              new StoredTransactionOutput(
                  hash, out.getIndex(), out.getValue(), height, isCoinBase, out.getScriptBytes());
          blockStore.addUnspentTransactionOutput(newOut);
          txOutsCreated.add(newOut);
        }
        // All values were already checked for being non-negative (as it is verified in
        // Transaction.verify())
        // but we check again here just for defence in depth. Transactions with zero output value
        // are OK.
        if (valueOut.signum() < 0 || valueOut.compareTo(NetworkParameters.MAX_MONEY) > 0)
          throw new VerificationException("Transaction output value out of range");
        if (isCoinBase) {
          coinbaseValue = valueOut;
        } else {
          if (valueIn.compareTo(valueOut) < 0 || valueIn.compareTo(NetworkParameters.MAX_MONEY) > 0)
            throw new VerificationException("Transaction input value out of range");
          totalFees = totalFees.add(valueIn.subtract(valueOut));
        }

        if (!isCoinBase && runScripts) {
          // Because correctlySpends modifies transactions, this must come after we are done with tx
          FutureTask<VerificationException> future =
              new FutureTask<VerificationException>(
                  new Verifier(tx, prevOutScripts, enforcePayToScriptHash));
          scriptVerificationExecutor.execute(future);
          listScriptVerificationResults.add(future);
        }
      }
      if (totalFees.compareTo(NetworkParameters.MAX_MONEY) > 0
          || block.getBlockInflation(height).add(totalFees).compareTo(coinbaseValue) < 0)
        throw new VerificationException("Transaction fees out of range");
      for (Future<VerificationException> future : listScriptVerificationResults) {
        VerificationException e;
        try {
          e = future.get();
        } catch (InterruptedException thrownE) {
          throw new RuntimeException(thrownE); // Shouldn't happen
        } catch (ExecutionException thrownE) {
          log.error("Script.correctlySpends threw a non-normal exception: " + thrownE.getCause());
          throw new VerificationException(
              "Bug in Script.correctlySpends, likely script malformed in some new and interesting way.",
              thrownE);
        }
        if (e != null) throw e;
      }
    } catch (VerificationException e) {
      scriptVerificationExecutor.shutdownNow();
      blockStore.abortDatabaseBatchWrite();
      throw e;
    } catch (BlockStoreException e) {
      scriptVerificationExecutor.shutdownNow();
      blockStore.abortDatabaseBatchWrite();
      throw e;
    }
    return new TransactionOutputChanges(txOutsCreated, txOutsSpent);
  }
 /**
  * Creates default implementation of {@link MemoryCache} - {@link LruMemoryCache}<br>
  * Default cache size = 1/8 of available app memory.
  */
 public static MemoryCache createMemoryCache(int memoryCacheSize) {
   if (memoryCacheSize == 0) {
     memoryCacheSize = (int) (Runtime.getRuntime().maxMemory() / 8);
   }
   return new LruMemoryCache(memoryCacheSize);
 }
예제 #24
0
  protected void handleCommand(String command) {
    if (command.contains("__")) {
      namespace = command.split("__")[0];
      command = command.substring(command.indexOf("__") + 2);
    }

    if (echo) {
      if (Thread.currentThread().getName().toLowerCase().indexOf("main") < 0)
        println(" [" + Thread.currentThread().getName() + "] " + command);
      else println(command);
    }
    if (command == null || command.startsWith("//")) return;
    command = command.trim();
    if (command == null || command.length() == 0) {
      return;
    }
    String first = command;
    int spaceIndex = command.indexOf(' ');
    String[] argsSplit = command.split(" ");
    String[] args = new String[argsSplit.length];
    for (int i = 0; i < argsSplit.length; i++) {
      args[i] = argsSplit[i].trim();
    }
    if (spaceIndex != -1) {
      first = args[0];
    }
    if (command.startsWith("help")) {
      handleHelp(command);
    } else if (first.startsWith("#") && first.length() > 1) {
      int repeat = Integer.parseInt(first.substring(1));
      long t0 = Clock.currentTimeMillis();
      for (int i = 0; i < repeat; i++) {
        handleCommand(command.substring(first.length()).replaceAll("\\$i", "" + i));
      }
      println("ops/s = " + repeat * 1000 / (Clock.currentTimeMillis() - t0));
      return;
    } else if (first.startsWith("&") && first.length() > 1) {
      final int fork = Integer.parseInt(first.substring(1));
      ExecutorService pool = Executors.newFixedThreadPool(fork);
      final String threadCommand = command.substring(first.length());
      for (int i = 0; i < fork; i++) {
        final int threadID = i;
        pool.submit(
            new Runnable() {
              public void run() {
                String command = threadCommand;
                String[] threadArgs = command.replaceAll("\\$t", "" + threadID).trim().split(" ");
                // TODO &t #4 m.putmany x k
                if ("m.putmany".equals(threadArgs[0]) || "m.removemany".equals(threadArgs[0])) {
                  if (threadArgs.length < 4) {
                    command += " " + Integer.parseInt(threadArgs[1]) * threadID;
                  }
                }
                handleCommand(command);
              }
            });
      }
      pool.shutdown();
      try {
        pool.awaitTermination(60 * 60, TimeUnit.SECONDS); // wait 1h
      } catch (Exception e) {
        e.printStackTrace();
      }
    } else if (first.startsWith("@")) {
      if (first.length() == 1) {
        println("usage: @<file-name>");
        return;
      }
      File f = new File(first.substring(1));
      println("Executing script file " + f.getAbsolutePath());
      if (f.exists()) {
        try {
          BufferedReader br = new BufferedReader(new FileReader(f));
          String l = br.readLine();
          while (l != null) {
            handleCommand(l);
            l = br.readLine();
          }
          br.close();
        } catch (IOException e) {
          e.printStackTrace();
        }
      } else {
        println("File not found! " + f.getAbsolutePath());
      }
    } else if (command.indexOf(';') != -1) {
      StringTokenizer st = new StringTokenizer(command, ";");
      while (st.hasMoreTokens()) {
        handleCommand(st.nextToken());
      }
      return;
    } else if ("silent".equals(first)) {
      silent = Boolean.parseBoolean(args[1]);
    } else if ("shutdown".equals(first)) {
      hazelcast.getLifecycleService().shutdown();
    } else if ("echo".equals(first)) {
      echo = Boolean.parseBoolean(args[1]);
      println("echo: " + echo);
    } else if ("ns".equals(first)) {
      if (args.length > 1) {
        namespace = args[1];
        println("namespace: " + namespace);
        //                init();
      }
    } else if ("whoami".equals(first)) {
      println(hazelcast.getCluster().getLocalMember());
    } else if ("who".equals(first)) {
      println(hazelcast.getCluster());
    } else if ("jvm".equals(first)) {
      System.gc();
      println("Memory max: " + Runtime.getRuntime().maxMemory() / 1024 / 1024 + "M");
      println(
          "Memory free: "
              + Runtime.getRuntime().freeMemory() / 1024 / 1024
              + "M "
              + (int) (Runtime.getRuntime().freeMemory() * 100 / Runtime.getRuntime().maxMemory())
              + "%");
      long total = Runtime.getRuntime().totalMemory();
      long free = Runtime.getRuntime().freeMemory();
      println("Used Memory:" + ((total - free) / 1024 / 1024) + "MB");
      println("# procs: " + Runtime.getRuntime().availableProcessors());
      println(
          "OS info: "
              + ManagementFactory.getOperatingSystemMXBean().getArch()
              + " "
              + ManagementFactory.getOperatingSystemMXBean().getName()
              + " "
              + ManagementFactory.getOperatingSystemMXBean().getVersion());
      println(
          "JVM: "
              + ManagementFactory.getRuntimeMXBean().getVmVendor()
              + " "
              + ManagementFactory.getRuntimeMXBean().getVmName()
              + " "
              + ManagementFactory.getRuntimeMXBean().getVmVersion());
    } else if (first.indexOf("ock") != -1 && first.indexOf(".") == -1) {
      handleLock(args);
    } else if (first.indexOf(".size") != -1) {
      handleSize(args);
    } else if (first.indexOf(".clear") != -1) {
      handleClear(args);
    } else if (first.indexOf(".destroy") != -1) {
      handleDestroy(args);
    } else if (first.indexOf(".iterator") != -1) {
      handleIterator(args);
    } else if (first.indexOf(".contains") != -1) {
      handleContains(args);
    } else if (first.indexOf(".stats") != -1) {
      handStats(args);
    } else if ("t.publish".equals(first)) {
      handleTopicPublish(args);
    } else if ("q.offer".equals(first)) {
      handleQOffer(args);
    } else if ("q.take".equals(first)) {
      handleQTake(args);
    } else if ("q.poll".equals(first)) {
      handleQPoll(args);
    } else if ("q.peek".equals(first)) {
      handleQPeek(args);
    } else if ("q.capacity".equals(first)) {
      handleQCapacity(args);
    } else if ("q.offermany".equals(first)) {
      handleQOfferMany(args);
    } else if ("q.pollmany".equals(first)) {
      handleQPollMany(args);
    } else if ("s.add".equals(first)) {
      handleSetAdd(args);
    } else if ("s.remove".equals(first)) {
      handleSetRemove(args);
    } else if ("s.addmany".equals(first)) {
      handleSetAddMany(args);
    } else if ("s.removemany".equals(first)) {
      handleSetRemoveMany(args);
    } else if (first.equals("m.replace")) {
      handleMapReplace(args);
    } else if (first.equalsIgnoreCase("m.putIfAbsent")) {
      handleMapPutIfAbsent(args);
    } else if (first.equals("m.putAsync")) {
      handleMapPutAsync(args);
    } else if (first.equals("m.getAsync")) {
      handleMapGetAsync(args);
    } else if (first.equals("m.put")) {
      handleMapPut(args);
    } else if (first.equals("m.get")) {
      handleMapGet(args);
    } else if (first.equalsIgnoreCase("m.getMapEntry")) {
      handleMapGetMapEntry(args);
    } else if (first.equals("m.remove")) {
      handleMapRemove(args);
    } else if (first.equals("m.evict")) {
      handleMapEvict(args);
    } else if (first.equals("m.putmany") || first.equalsIgnoreCase("m.putAll")) {
      handleMapPutMany(args);
    } else if (first.equals("m.getmany")) {
      handleMapGetMany(args);
    } else if (first.equals("m.removemany")) {
      handleMapRemoveMany(args);
    } else if (command.equalsIgnoreCase("m.localKeys")) {
      handleMapLocalKeys();
    } else if (command.equals("m.keys")) {
      handleMapKeys();
    } else if (command.equals("m.values")) {
      handleMapValues();
    } else if (command.equals("m.entries")) {
      handleMapEntries();
    } else if (first.equals("m.lock")) {
      handleMapLock(args);
    } else if (first.equalsIgnoreCase("m.tryLock")) {
      handleMapTryLock(args);
    } else if (first.equals("m.unlock")) {
      handleMapUnlock(args);
    } else if (first.indexOf(".addListener") != -1) {
      handleAddListener(args);
    } else if (first.equals("m.removeMapListener")) {
      handleRemoveListener(args);
    } else if (first.equals("m.unlock")) {
      handleMapUnlock(args);
    } else if (first.equals("l.add")) {
      handleListAdd(args);
    } else if (first.equals("l.set")) {
      handleListSet(args);
    } else if ("l.addmany".equals(first)) {
      handleListAddMany(args);
    } else if (first.equals("l.remove")) {
      handleListRemove(args);
    } else if (first.equals("l.contains")) {
      handleListContains(args);
    } else if ("a.get".equals(first)) {
      handleAtomicNumberGet(args);
    } else if ("a.set".equals(first)) {
      handleAtomicNumberSet(args);
    } else if ("a.inc".equals(first)) {
      handleAtomicNumberInc(args);
    } else if ("a.dec".equals(first)) {
      handleAtomicNumberDec(args);
      //        } else if (first.equals("execute")) {
      //            execute(args);
    } else if (first.equals("partitions")) {
      handlePartitions(args);
      //        } else if (first.equals("txn")) {
      //            hazelcast.getTransaction().begin();
      //        } else if (first.equals("commit")) {
      //            hazelcast.getTransaction().commit();
      //        } else if (first.equals("rollback")) {
      //            hazelcast.getTransaction().rollback();
      //        } else if (first.equalsIgnoreCase("executeOnKey")) {
      //            executeOnKey(args);
      //        } else if (first.equalsIgnoreCase("executeOnMember")) {
      //            executeOnMember(args);
      //        } else if (first.equalsIgnoreCase("executeOnMembers")) {
      //            executeOnMembers(args);
      //        } else if (first.equalsIgnoreCase("longOther") ||
      // first.equalsIgnoreCase("executeLongOther")) {
      //            executeLongTaskOnOtherMember(args);
      //        } else if (first.equalsIgnoreCase("long") || first.equalsIgnoreCase("executeLong"))
      // {
      //            executeLong(args);
    } else if (first.equalsIgnoreCase("instances")) {
      handleInstances(args);
    } else if (first.equalsIgnoreCase("quit") || first.equalsIgnoreCase("exit")) {
      System.exit(0);
    } else {
      println("type 'help' for help");
    }
  }
예제 #25
0
 /**
  * Create a thread group to share among multiple fat pipes
  *
  * @param poolSize the number of threads to create. Less then zero will use all the available
  *     cores-1. There will be at least 1 thread used
  * @param listener in case an exception is thrown while executing one of the Fat Pipes
  */
 public Pool(int poolSize, String name, ExceptionListener listener) {
   if (poolSize < 0) poolSize = Runtime.getRuntime().availableProcessors() - 1;
   poolSize = Math.max(poolSize, 1);
   producer = new ParentProducer(poolSize, listener);
   parent = new PooledFatPipe<>(producer, poolSize, null, name);
 }
예제 #26
0
  /**
   * Reads conll sentences, parses them, and writes the json-serialized results.
   *
   * @param inputSupplier where to read conll sentences from
   * @param outputSupplier where to write the results to
   * @param numThreads the number of threads to use
   * @throws IOException
   * @throws InterruptedException
   */
  public void runParser(
      final InputSupplier<? extends Readable> inputSupplier,
      final OutputSupplier<? extends Writer> outputSupplier,
      final int numThreads)
      throws IOException, InterruptedException {
    // use the producer-worker-consumer pattern to parse all sentences in multiple threads, while
    // keeping
    // output in order.
    final BlockingQueue<Future<Optional<SemaforParseResult>>> results =
        Queues.newLinkedBlockingDeque(5 * numThreads);
    final ExecutorService workerThreadPool = newFixedThreadPool(numThreads);
    // try to shutdown gracefully. don't worry too much if it doesn't work
    Runtime.getRuntime()
        .addShutdownHook(
            new Thread(
                new Runnable() {
                  @Override
                  public void run() {
                    try {
                      workerThreadPool.shutdown();
                      workerThreadPool.awaitTermination(5, TimeUnit.SECONDS);
                    } catch (InterruptedException ignored) {
                    }
                  }
                }));

    final PrintWriter output = new PrintWriter(outputSupplier.getOutput());
    try {
      // Start thread to fetch computed results and write to file
      final Thread consumer =
          new Thread(
              new Runnable() {
                @Override
                public void run() {
                  while (!Thread.currentThread().isInterrupted()) {
                    try {
                      final Optional<SemaforParseResult> oResult = results.take().get();
                      if (!oResult.isPresent()) break; // got poison pill. we're done
                      output.println(oResult.get().toJson());
                      output.flush();
                    } catch (Exception e) {
                      e.printStackTrace();
                      throw new RuntimeException(e);
                    }
                  }
                }
              });
      consumer.start();
      // in main thread, put placeholders on results queue (so results stay in order), then
      // tell a worker thread to fill up the placeholder
      final SentenceCodec.SentenceIterator sentences =
          ConllCodec.readInput(inputSupplier.getInput());
      try {
        int i = 0;
        while (sentences.hasNext()) {
          final Sentence sentence = sentences.next();
          final int sentenceId = i;
          results.put(
              workerThreadPool.submit(
                  new Callable<Optional<SemaforParseResult>>() {
                    @Override
                    public Optional<SemaforParseResult> call() throws Exception {
                      final long start = System.currentTimeMillis();
                      try {
                        final SemaforParseResult result = parseSentence(sentence);
                        final long end = System.currentTimeMillis();
                        System.err.printf(
                            "parsed sentence %d in %d millis.%n", sentenceId, end - start);
                        return Optional.of(result);
                      } catch (Exception e) {
                        e.printStackTrace();
                        throw e;
                      }
                    }
                  }));
          i++;
        }
        // put a poison pill on the queue to signal that we're done
        results.put(
            workerThreadPool.submit(
                new Callable<Optional<SemaforParseResult>>() {
                  @Override
                  public Optional<SemaforParseResult> call() throws Exception {
                    return Optional.absent();
                  }
                }));
        workerThreadPool.shutdown();
      } finally {
        closeQuietly(sentences);
      }
      // wait for consumer to finish
      consumer.join();
    } finally {
      closeQuietly(output);
    }
    System.err.println("Done.");
  }
예제 #27
0
/**
 * e
 *
 * @author <a href="http://tfox.org">Tim Fox</a>
 */
public class DefaultVertx extends VertxInternal {

  private static final Logger log = LoggerFactory.getLogger(DefaultVertx.class);

  private final FileSystem fileSystem = getFileSystem();
  private final EventBus eventBus;
  private final SharedData sharedData = new SharedData();

  private ThreadPoolExecutor backgroundPool =
      VertxExecutorFactory.workerPool("vert.x-worker-thread-");
  private OrderedExecutorFactory orderedFact = new OrderedExecutorFactory(backgroundPool);

  private int corePoolSize = Runtime.getRuntime().availableProcessors();
  private NioWorkerPool corePool =
      new NioWorkerPool(VertxExecutorFactory.eventPool("vert.x-core-thread-"), corePoolSize);

  private ThreadPoolExecutor acceptorPool =
      VertxExecutorFactory.acceptorPool("vert.x-acceptor-thread-");

  private Map<ServerID, DefaultHttpServer> sharedHttpServers = new HashMap<>();
  private Map<ServerID, DefaultNetServer> sharedNetServers = new HashMap<>();

  private final ThreadLocal<Context> contextTL = new ThreadLocal<>();

  // For now we use a hashed wheel with it's own thread for timeouts - ideally the event loop would
  // have
  // it's own hashed wheel
  private HashedWheelTimer timer =
      new HashedWheelTimer(
          new VertxThreadFactory("vert.x-timer-thread"), 1, TimeUnit.MILLISECONDS, 8192);

  {
    timer.start();
  }

  private final AtomicLong timeoutCounter = new AtomicLong(0);
  private final Map<Long, TimeoutHolder> timeouts = new ConcurrentHashMap<>();

  public DefaultVertx() {
    this.eventBus = new DefaultEventBus(this);
  }

  public DefaultVertx(String hostname) {
    this.eventBus = new DefaultEventBus(this, hostname);
  }

  public DefaultVertx(int port, String hostname) {
    this.eventBus = new DefaultEventBus(this, port, hostname);
  }

  /** @return The FileSystem implementation for the OS */
  protected FileSystem getFileSystem() {
    return Windows.isWindows() ? new WindowsFileSystem(this) : new DefaultFileSystem(this);
  }

  public Timer getTimer() {
    return timer;
  }

  public NetServer createNetServer() {
    return new DefaultNetServer(this);
  }

  public NetClient createNetClient() {
    return new DefaultNetClient(this);
  }

  public FileSystem fileSystem() {
    return fileSystem;
  }

  public SharedData sharedData() {
    return sharedData;
  }

  public HttpServer createHttpServer() {
    return new DefaultHttpServer(this);
  }

  public HttpClient createHttpClient() {
    return new DefaultHttpClient(this);
  }

  public SockJSServer createSockJSServer(HttpServer httpServer) {
    return new DefaultSockJSServer(this, httpServer);
  }

  public EventBus eventBus() {
    return eventBus;
  }

  public Context startOnEventLoop(final Runnable runnable) {
    Context context = createEventLoopContext();
    context.execute(runnable);
    return context;
  }

  public Context startInBackground(final Runnable runnable) {
    Context context = createWorkerContext();
    context.execute(runnable);
    return context;
  }

  public boolean isEventLoop() {
    Context context = getContext();
    if (context != null) {
      return context instanceof EventLoopContext;
    }
    return false;
  }

  public boolean isWorker() {
    Context context = getContext();
    if (context != null) {
      return context instanceof WorkerContext;
    }
    return false;
  }

  public long setPeriodic(long delay, final Handler<Long> handler) {
    return setTimeout(delay, true, handler);
  }

  public long setTimer(long delay, final Handler<Long> handler) {
    return setTimeout(delay, false, handler);
  }

  public void runOnLoop(final Handler<Void> handler) {
    Context context = getOrAssignContext();
    context.execute(
        new Runnable() {
          public void run() {
            handler.handle(null);
          }
        });
  }

  // The background pool is used for making blocking calls to legacy synchronous APIs
  public ExecutorService getBackgroundPool() {
    return backgroundPool;
  }

  public NioWorkerPool getCorePool() {
    return corePool;
  }

  // We use a cached pool, but it will never get large since only used for acceptors.
  // There will be one thread for each port listening on
  public Executor getAcceptorPool() {
    return acceptorPool;
  }

  public Context getOrAssignContext() {
    Context ctx = getContext();
    if (ctx == null) {
      // Assign a context
      ctx = createEventLoopContext();
    }
    return ctx;
  }

  public void reportException(Throwable t) {
    Context ctx = getContext();
    if (ctx != null) {
      ctx.reportException(t);
    } else {
      log.error(" default vertx Unhandled exception ", t);
    }
  }

  public Map<ServerID, DefaultHttpServer> sharedHttpServers() {
    return sharedHttpServers;
  }

  public Map<ServerID, DefaultNetServer> sharedNetServers() {
    return sharedNetServers;
  }

  private long setTimeout(final long delay, boolean periodic, final Handler<Long> handler) {
    final Context context = getOrAssignContext();

    InternalTimerHandler myHandler;
    if (periodic) {
      myHandler =
          new InternalTimerHandler(handler) {
            public void run() {
              super.run();
              scheduleTimeout(timerID, context, this, delay); // And reschedule
            }
          };
    } else {
      myHandler =
          new InternalTimerHandler(handler) {
            public void run() {
              super.run();
              timeouts.remove(timerID);
            }
          };
    }
    long timerID = scheduleTimeout(-1, context, myHandler, delay);
    myHandler.timerID = timerID;
    return timerID;
  }

  public boolean cancelTimer(long id) {
    return cancelTimeout(id);
  }

  public Context createEventLoopContext() {
    getBackgroundPool();
    NioWorker worker = getCorePool().nextWorker();
    return new EventLoopContext(this, orderedFact.getExecutor(), worker);
  }

  private boolean cancelTimeout(long id) {
    TimeoutHolder holder = timeouts.remove(id);
    if (holder != null) {
      holder.timeout.cancel();
      return true;
    } else {
      return false;
    }
  }

  private long scheduleTimeout(long id, final Context context, final Runnable task, long delay) {
    TimerTask ttask =
        new TimerTask() {
          public void run(Timeout timeout) throws Exception {
            context.execute(task);
          }
        };
    if (id != -1 && timeouts.get(id) == null) {
      // Been cancelled
      return -1;
    }
    Timeout timeout = timer.newTimeout(ttask, delay, TimeUnit.MILLISECONDS);
    id = id != -1 ? id : timeoutCounter.getAndIncrement();
    timeouts.put(id, new TimeoutHolder(timeout));
    return id;
  }

  private Context createWorkerContext() {
    getBackgroundPool();
    return new WorkerContext(this, orderedFact.getExecutor());
  }

  public void setContext(Context context) {
    contextTL.set(context);
    if (context != null) {
      context.setTCCL();
    }
  }

  public Context getContext() {
    return contextTL.get();
  }

  @Override
  public void stop() {

    if (sharedHttpServers != null) {
      for (HttpServer server : sharedHttpServers.values()) {
        server.close();
      }
      sharedHttpServers = null;
    }

    if (sharedNetServers != null) {
      for (NetServer server : sharedNetServers.values()) {
        server.close();
      }
      sharedNetServers = null;
    }

    if (timer != null) {
      timer.stop();
      timer = null;
    }

    if (eventBus != null) {
      eventBus.close(null);
    }

    if (backgroundPool != null) {
      backgroundPool.shutdown();
    }

    if (acceptorPool != null) {
      acceptorPool.shutdown();
    }

    try {
      if (backgroundPool != null) {
        backgroundPool.awaitTermination(20, TimeUnit.SECONDS);
        backgroundPool = null;
      }
    } catch (InterruptedException ex) {
      // ignore
    }

    try {
      if (acceptorPool != null) {
        acceptorPool.awaitTermination(20, TimeUnit.SECONDS);
        acceptorPool = null;
      }
    } catch (InterruptedException ex) {
      // ignore
    }

    // log.info("Release external resources from worker pool");
    if (corePool != null) {
      corePool.releaseExternalResources();
      corePool = null;
    }
    // log.info("Release external resources: done");

    setContext(null);
  }

  private static class InternalTimerHandler implements Runnable {
    final Handler<Long> handler;
    long timerID;

    InternalTimerHandler(Handler<Long> runnable) {
      this.handler = runnable;
    }

    public void run() {
      handler.handle(timerID);
    }
  }

  private static class TimeoutHolder {
    final Timeout timeout;

    TimeoutHolder(Timeout timeout) {
      this.timeout = timeout;
    }
  }
}
예제 #28
0
  /*
   * Solves for times by processing samples in the active list in parallel.
   */
  private void solveParallelX(
      final ActiveList al,
      final float[][][] t,
      final int m,
      final float[][][] times,
      final int[][][] marks) {
    int nthread = Runtime.getRuntime().availableProcessors();
    ExecutorService es = Executors.newFixedThreadPool(nthread);
    CompletionService<Void> cs = new ExecutorCompletionService<Void>(es);
    ActiveList[] bl = new ActiveList[nthread];
    float[][] d = new float[nthread][];
    for (int ithread = 0; ithread < nthread; ++ithread) {
      bl[ithread] = new ActiveList();
      d[ithread] = new float[6];
    }
    final AtomicInteger ai = new AtomicInteger();
    int ntotal = 0;
    // int niter = 0;
    while (!al.isEmpty()) {
      ai.set(0); // initialize the shared block index to zero
      final int n = al.size(); // number of samples in active (A) list
      ntotal += n;
      final int mb = 32; // size of blocks of samples
      final int nb = 1 + (n - 1) / mb; // number of blocks of samples
      int ntask = min(nb, nthread); // number of tasks (threads to be used)
      for (int itask = 0; itask < ntask; ++itask) { // for each task, ...
        final ActiveList bltask = bl[itask]; // task-specific B list
        final float[] dtask = d[itask]; // task-specific work array
        cs.submit(
            new Callable<Void>() { // submit new task
              public Void call() {
                for (int ib = ai.getAndIncrement(); ib < nb; ib = ai.getAndIncrement()) {
                  int i = ib * mb; // beginning of block
                  int j = min(i + mb, n); // beginning of next block (or end)
                  for (int k = i; k < j; ++k) { // for each sample in block, ...
                    Sample s = al.get(k); // get k'th sample from A list
                    solveOne(t, m, times, marks, s, bltask, dtask); // process sample
                  }
                }
                bltask.setAllAbsent(); // needed when merging B lists below
                return null;
              }
            });
      }
      try {
        for (int itask = 0; itask < ntask; ++itask) cs.take();
      } catch (InterruptedException e) {
        throw new RuntimeException(e);
      }

      // Merge samples from all B lists to a new A list. As samples
      // are appended, their absent flags are set to false, so that
      // each sample is appended no more than once to the new A list.
      al.clear();
      for (int itask = 0; itask < ntask; ++itask) {
        al.appendIfAbsent(bl[itask]);
        bl[itask].clear();
      }
      // ++niter;
    }
    es.shutdown();
    // trace("solveParallel: ntotal="+ntotal);
    // trace("               nratio="+(float)ntotal/(float)(_n1*_n2*_n3));
  }
/**
 * @author: Ivan Voroshilin
 * @email: [email protected]
 * @since 1.8 CaffeineCachedDispatcher
 */
@ThreadSafe
public class CaffeineCachedDispatcher implements Dispatcher {

  private static final Logger log = LoggerFactory.getLogger(CaffeineCachedDispatcher.class);

  private ExecutorService service;

  private IdGenerator idGenerator = new IdGenerator("ID_", new SystemDateSource());
  private int queueSize = 1024;
  private int threadsCount = Runtime.getRuntime().availableProcessors();
  private ConcurrentMap<Object, Object> cachedDispatchQueues;

  private volatile boolean started;
  private volatile boolean stopped;

  private CaffeineCachedDispatcher() {}

  public static Builder newBuilder() {
    return new CaffeineCachedDispatcher().new Builder();
  }

  public class Builder {

    private Builder() {}

    public Builder setQueueSize(int queueSize) {
      CaffeineCachedDispatcher.this.queueSize = queueSize;
      return this;
    }

    public Builder setIdGenerator(IdGenerator idGenerator) {
      CaffeineCachedDispatcher.this.idGenerator = idGenerator;
      return this;
    }

    public Builder setThreadsCount(int threadsCount) {
      CaffeineCachedDispatcher.this.threadsCount = threadsCount;
      return this;
    }

    public Builder setExecutorService(ExecutorService service) {
      CaffeineCachedDispatcher.this.service = service;
      return this;
    }

    public CaffeineCachedDispatcher build() {
      return CaffeineCachedDispatcher.this;
    }
  }

  @Override
  public CompletableFuture<Void> dispatchAsync(Runnable task) {
    return dispatchAsync(idGenerator.nextId(), task);
  }

  @Override
  @SuppressWarnings("unchecked")
  public CompletableFuture<Void> dispatchAsync(String dispatchId, Runnable task) {

    try {
      return (CompletableFuture<Void>)
          cachedDispatchQueues.compute(
              dispatchId,
              (k, queue) -> {
                CompletableFuture<Void> voidCompletableFuture =
                    (queue == null)
                        ? CompletableFuture.runAsync(task)
                        : ((CompletableFuture<Void>) queue).thenRunAsync(task);
                return voidCompletableFuture;
              });
    } catch (Throwable t) {
      log.warn(
          "Exception thrown when calling dispatchAngGetFuture for dispatchId[{}]", dispatchId, t);
      throw t;
    }
  }

  private static ExecutorService newDefaultForkJoinPool(int threadsCount) {
    return Executors.newWorkStealingPool(threadsCount);
  }

  public void start() {

    if (started) {
      throw new RuntimeException("Already started or in progress");
    }

    started = true;

    if (service == null) {
      service = newDefaultForkJoinPool(threadsCount);
    }
    cachedDispatchQueues =
        Caffeine.newBuilder().weakValues().executor(service).maximumSize(queueSize).build().asMap();
  }

  public void stop() {

    if (stopped) {
      throw new RuntimeException("Already stopped or in progress");
    }

    stopped = true;

    CompletableFuture<?>[] futures =
        cachedDispatchQueues
            .values()
            .stream()
            .filter(v -> v != null)
            .toArray(CompletableFuture<?>[]::new);

    CompletableFuture.allOf(futures).join();

    service.shutdown();
  }
}
  @SuppressWarnings({"unchecked"})
  public final void execute(final Callback callback)
      throws MojoExecutionException, MojoFailureException {
    if (!skip) {
      if (header == null) {
        warn("No header file specified to check for license");
        return;
      }
      if (!strictCheck) {
        warn(
            "Property 'strictCheck' is not enabled. Please consider adding <strictCheck>true</strictCheck> in your pom.xml file.");
        warn("See http://mycila.github.io/license-maven-plugin for more information.");
      }

      finder = new ResourceFinder(basedir);
      try {
        finder.setCompileClassPath(project.getCompileClasspathElements());
      } catch (DependencyResolutionRequiredException e) {
        throw new MojoExecutionException(e.getMessage(), e);
      }
      finder.setPluginClassPath(getClass().getClassLoader());

      final Header h = new Header(finder.findResource(this.header), encoding, headerSections);
      debug("Header %s:\n%s", h.getLocation(), h);

      if (this.validHeaders == null) {
        this.validHeaders = new String[0];
      }
      final List<Header> validHeaders = new ArrayList<Header>(this.validHeaders.length);
      for (String validHeader : this.validHeaders) {
        validHeaders.add(new Header(finder.findResource(validHeader), encoding, headerSections));
      }

      final List<PropertiesProvider> propertiesProviders = new LinkedList<PropertiesProvider>();
      for (PropertiesProvider provider :
          ServiceLoader.load(
              PropertiesProvider.class, Thread.currentThread().getContextClassLoader())) {
        propertiesProviders.add(provider);
      }
      final DocumentPropertiesLoader propertiesLoader =
          new DocumentPropertiesLoader() {
            @Override
            public Properties load(Document document) {
              Properties props = new Properties();

              for (Map.Entry<String, String> entry : mergeProperties(document).entrySet()) {
                if (entry.getValue() != null) {
                  props.setProperty(entry.getKey(), entry.getValue());
                } else {
                  props.remove(entry.getKey());
                }
              }
              for (PropertiesProvider provider : propertiesProviders) {
                try {
                  final Map<String, String> providerProperties =
                      provider.getAdditionalProperties(AbstractLicenseMojo.this, props, document);
                  if (getLog().isDebugEnabled()) {
                    getLog()
                        .debug(
                            "provider: "
                                + provider.getClass()
                                + " brought new properties\n"
                                + providerProperties);
                  }
                  for (Map.Entry<String, String> entry : providerProperties.entrySet()) {
                    if (entry.getValue() != null) {
                      props.setProperty(entry.getKey(), entry.getValue());
                    } else {
                      props.remove(entry.getKey());
                    }
                  }
                } catch (Exception e) {
                  getLog().warn("failure occured while calling " + provider.getClass(), e);
                }
              }
              return props;
            }
          };

      final DocumentFactory documentFactory =
          new DocumentFactory(
              basedir,
              buildMapping(),
              buildHeaderDefinitions(),
              encoding,
              keywords,
              propertiesLoader);

      int nThreads = (int) (Runtime.getRuntime().availableProcessors() * concurrencyFactor);
      ExecutorService executorService = Executors.newFixedThreadPool(nThreads);
      CompletionService completionService = new ExecutorCompletionService(executorService);
      int count = 0;
      debug("Number of execution threads: %s", nThreads);

      try {
        for (final String file : listSelectedFiles()) {
          completionService.submit(
              new Runnable() {
                @Override
                public void run() {
                  Document document = documentFactory.createDocuments(file);
                  debug(
                      "Selected file: %s [header style: %s]",
                      document.getFilePath(), document.getHeaderDefinition());
                  if (document.isNotSupported()) {
                    callback.onUnknownFile(document, h);
                  } else if (document.is(h)) {
                    debug("Skipping header file: %s", document.getFilePath());
                  } else if (document.hasHeader(h, strictCheck)) {
                    callback.onExistingHeader(document, h);
                  } else {
                    boolean headerFound = false;
                    for (Header validHeader : validHeaders) {
                      if (headerFound = document.hasHeader(validHeader, strictCheck)) {
                        callback.onExistingHeader(document, h);
                        break;
                      }
                    }
                    if (!headerFound) {
                      callback.onHeaderNotFound(document, h);
                    }
                  }
                }
              },
              null);
          count++;
        }

        while (count-- > 0) {
          try {
            completionService.take().get();
          } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
          } catch (ExecutionException e) {
            Throwable cause = e.getCause();
            if (cause instanceof Error) {
              throw (Error) cause;
            }
            if (cause instanceof MojoExecutionException) {
              throw (MojoExecutionException) cause;
            }
            if (cause instanceof MojoFailureException) {
              throw (MojoFailureException) cause;
            }
            if (cause instanceof RuntimeException) {
              throw (RuntimeException) cause;
            }
            throw new RuntimeException(cause.getMessage(), cause);
          }
        }

      } finally {
        executorService.shutdownNow();
      }
    }
  }