private void initFromDatabase() throws SQLException, BlockStoreException {
    Statement s = conn.get().createStatement();
    ResultSet rs;

    rs = s.executeQuery("SELECT value FROM settings WHERE name = '" + CHAIN_HEAD_SETTING + "'");
    if (!rs.next()) {
      throw new BlockStoreException("corrupt Postgres block store - no chain head pointer");
    }
    Sha256Hash hash = new Sha256Hash(rs.getBytes(1));
    rs.close();
    this.chainHeadBlock = get(hash);
    this.chainHeadHash = hash;
    if (this.chainHeadBlock == null) {
      throw new BlockStoreException("corrupt Postgres block store - head block not found");
    }

    rs =
        s.executeQuery(
            "SELECT value FROM settings WHERE name = '" + VERIFIED_CHAIN_HEAD_SETTING + "'");
    if (!rs.next()) {
      throw new BlockStoreException(
          "corrupt Postgres block store - no verified chain head pointer");
    }
    hash = new Sha256Hash(rs.getBytes(1));
    rs.close();
    s.close();
    this.verifiedChainHeadBlock = get(hash);
    this.verifiedChainHeadHash = hash;
    if (this.verifiedChainHeadBlock == null) {
      throw new BlockStoreException("corrupt Postgres block store - verified head block not found");
    }
  }
 public StoredTransactionOutput getTransactionOutput(Sha256Hash hash, long index)
     throws BlockStoreException {
   maybeConnect();
   PreparedStatement s = null;
   try {
     s =
         conn.get()
             .prepareStatement(
                 "SELECT height, value, scriptBytes FROM openOutputs "
                     + "WHERE hash = ? AND index = ?");
     s.setBytes(1, hash.getBytes());
     // index is actually an unsigned int
     s.setInt(2, (int) index);
     ResultSet results = s.executeQuery();
     if (!results.next()) {
       return null;
     }
     // Parse it.
     int height = results.getInt(1);
     BigInteger value = new BigInteger(results.getBytes(2));
     // Tell the StoredTransactionOutput that we are a coinbase, as that is encoded in height
     StoredTransactionOutput txout =
         new StoredTransactionOutput(hash, index, value, height, true, results.getBytes(3));
     return txout;
   } catch (SQLException ex) {
     throw new BlockStoreException(ex);
   } finally {
     if (s != null)
       try {
         s.close();
       } catch (SQLException e) {
         throw new BlockStoreException("Failed to close PreparedStatement");
       }
   }
 }
  /**
   * Switches the builder's proxyBuilder during the execution of a closure.<br>
   * This is useful to temporary change the building context to another builder without the need for
   * a contrived setup. It will also take care of restoring the previous proxyBuilder when the
   * execution finishes, even if an exception was thrown from inside the closure.
   *
   * @param builder the temporary builder to switch to as proxyBuilder.
   * @param closure the closure to be executed under the temporary builder.
   * @return the execution result of the closure.
   * @throws RuntimeException - any exception the closure might have thrown during execution.
   */
  public Object withBuilder(FactoryBuilderSupport builder, Closure closure) {
    if (builder == null || closure == null) {
      return null;
    }

    Object result = null;
    Object previousContext = getProxyBuilder().getContext();
    FactoryBuilderSupport previousProxyBuilder = localProxyBuilder.get();
    try {
      localProxyBuilder.set(builder);
      closure.setDelegate(builder);
      result = closure.call();
    } catch (RuntimeException e) {
      // remove contexts created after we started
      localProxyBuilder.set(previousProxyBuilder);
      if (getProxyBuilder().getContexts().contains(previousContext)) {
        Map<String, Object> context = getProxyBuilder().getContext();
        while (context != null && context != previousContext) {
          getProxyBuilder().popContext();
          context = getProxyBuilder().getContext();
        }
      }
      throw e;
    } finally {
      localProxyBuilder.set(previousProxyBuilder);
    }

    return result;
  }
    public void invoke(MethodInvocation invocation) throws Throwable {
      if (current.get() != null) {
        // Already invoking a method on the mix-in
        return;
      }

      if (instance == null) {
        instance = DirectInstantiator.INSTANCE.newInstance(mixInClass, proxy);
      }
      MethodInvocation beanInvocation =
          new MethodInvocation(
              invocation.getName(),
              invocation.getReturnType(),
              invocation.getGenericReturnType(),
              invocation.getParameterTypes(),
              instance,
              invocation.getParameters());
      current.set(beanInvocation);
      try {
        next.invoke(beanInvocation);
      } finally {
        current.set(null);
      }
      if (beanInvocation.found()) {
        invocation.setResult(beanInvocation.getResult());
      }
    }
  private void putUpdateStoredBlock(StoredBlock storedBlock, boolean wasUndoable)
      throws SQLException {
    try {
      PreparedStatement s =
          conn.get()
              .prepareStatement(
                  "INSERT INTO headers(hash, chainWork, height, header, wasUndoable)"
                      + " VALUES(?, ?, ?, ?, ?)");
      // We skip the first 4 bytes because (on prodnet) the minimum target has 4 0-bytes
      byte[] hashBytes = new byte[28];
      System.arraycopy(storedBlock.getHeader().getHash().getBytes(), 3, hashBytes, 0, 28);
      s.setBytes(1, hashBytes);
      s.setBytes(2, storedBlock.getChainWork().toByteArray());
      s.setInt(3, storedBlock.getHeight());
      s.setBytes(4, storedBlock.getHeader().unsafeRimbitSerialize());
      s.setBoolean(5, wasUndoable);
      s.executeUpdate();
      s.close();
    } catch (SQLException e) {
      // It is possible we try to add a duplicate StoredBlock if we upgraded
      // In that case, we just update the entry to mark it wasUndoable
      if (!(e.getSQLState().equals(POSTGRES_DUPLICATE_KEY_ERROR_CODE)) || !wasUndoable) throw e;

      PreparedStatement s =
          conn.get().prepareStatement("UPDATE headers SET wasUndoable=? WHERE hash=?");
      s.setBoolean(1, true);
      // We skip the first 4 bytes because (on prodnet) the minimum target has 4 0-bytes
      byte[] hashBytes = new byte[28];
      System.arraycopy(storedBlock.getHeader().getHash().getBytes(), 3, hashBytes, 0, 28);
      s.setBytes(2, hashBytes);
      s.executeUpdate();
      s.close();
    }
  }
  /**
   * The association with the given {@link ObjectAssociation#getId() id}.
   *
   * <p>This is overridable because {@link
   * org.apache.isis.core.metamodel.specloader.specimpl.standalonelist.ObjectSpecificationOnStandaloneList}
   * simply returns <tt>null</tt>.
   *
   * <p>TODO put fields into hash.
   *
   * <p>TODO: could this be made final? (ie does the framework ever call this method for an {@link
   * org.apache.isis.core.metamodel.specloader.specimpl.standalonelist.ObjectSpecificationOnStandaloneList})
   */
  @Override
  public ObjectAssociation getAssociation(final String id) {
    ObjectAssociation oa = getAssociationWithId(id);
    if (oa != null) {
      return oa;
    }
    if (!getDeploymentCategory().isProduction()) {
      // automatically refresh if not in production
      // (better support for jrebel)

      LOG.warn("Could not find association with id '" + id + "'; invalidating cache automatically");
      if (!invalidatingCache.get()) {
        // make sure don't go into an infinite loop, though.
        try {
          invalidatingCache.set(true);
          getSpecificationLookup().invalidateCache(getCorrespondingClass());
        } finally {
          invalidatingCache.set(false);
        }
      } else {
        LOG.warn("... already invalidating cache earlier in stacktrace, so skipped this time");
      }
      oa = getAssociationWithId(id);
      if (oa != null) {
        return oa;
      }
    }
    throw new ObjectSpecificationException(
        "No association called '" + id + "' in '" + getSingularName() + "'");
  }
Beispiel #7
0
  protected void send(Message message, boolean waitForConnected) {
    checkRunning();

    if (waitForConnected) {
      // Make sure we aren't still connecting
      waitForConnected();
    }

    ByteBuffer buffer = dataBuffer.get();
    if (buffer == null) {
      buffer = ByteBuffer.allocate(65536 + 2);
      dataBuffer.set(buffer);
    }
    buffer.clear();

    // Convert the message to bytes
    buffer = MessageProtocol.messageToBuffer(message, buffer);

    // Since we share the buffer between invocations, we will need to
    // copy this message's part out of it.  This is because we actually
    // do the send on a background thread.
    byte[] temp = new byte[buffer.remaining()];
    System.arraycopy(buffer.array(), buffer.position(), temp, 0, buffer.remaining());
    buffer = ByteBuffer.wrap(temp);

    if (message.isReliable() || fast == null) {
      if (reliable == null) throw new RuntimeException("No reliable connector configured");
      reliableAdapter.write(buffer);
    } else {
      fastAdapter.write(buffer);
    }
  }
 /**
  * Returns a thread-local instance of JSON ObjectMapper.
  *
  * @return ObjectMapper.
  */
 public static ObjectMapper getObjectMapper() {
   ObjectMapper objectMapper = tlObjectMapper.get();
   if (objectMapper == null) {
     objectMapper = initObjectMapper();
     tlObjectMapper.set(objectMapper);
   }
   return objectMapper;
 }
  public static Object runInTransaction(Callable fn) throws Exception {
    LockingTransaction t = transaction.get();
    if (t == null) transaction.set(t = new LockingTransaction());

    if (t.info != null) return fn.call();

    return t.run(fn);
  }
 public synchronized void addBuild(JobInvocation build)
     throws ExecutionException, InterruptedException {
   builds.addVertex(build);
   for (JobInvocation up : state.get().getLastCompleted()) {
     String edge = up.toString() + " => " + build.toString();
     LOGGER.fine("added build to execution graph " + edge);
     builds.addEdge(up, build, edge);
   }
   state.get().setLastCompleted(build);
 }
  public static void provideRequestDuring(HttpServletRequest request, Callable<Void> callable)
      throws Exception {
    CURRENT_REQUEST.set(request);

    try {
      callable.call();
    } finally {
      CURRENT_REQUEST.set(null);
    }
  }
Beispiel #12
0
 protected void setup(Slot s, Rendered r) {
   s.r = r;
   Slot pp = s.p = curp;
   if (pp == null) curref.set(this);
   try {
     curp = s;
     s.d = r.setup(this);
   } finally {
     if ((curp = pp) == null) curref.remove();
   }
 }
  private boolean startLoading(String name) {
    Set classesAndResources = (Set) beingLoaded.get();
    if (classesAndResources != null && classesAndResources.contains(name)) return false;

    if (classesAndResources == null) {
      classesAndResources = new HashSet(3);
      beingLoaded.set(classesAndResources);
    }
    classesAndResources.add(name);
    return true;
  }
  public void beginDatabaseBatchWrite() throws BlockStoreException {

    maybeConnect();
    if (log.isDebugEnabled())
      log.debug("Starting database batch write with connection: " + conn.get().toString());

    try {
      conn.get().setAutoCommit(false);
    } catch (SQLException e) {
      throw new BlockStoreException(e);
    }
  }
Beispiel #15
0
 /** 关闭连接 */
 public static final void closeConnection() {
   Connection conn = conns.get();
   try {
     if (conn != null && !conn.isClosed()) {
       conn.setAutoCommit(true);
       conn.close();
       connectionContext.remove(Thread.currentThread().getId());
     }
   } catch (SQLException e) {
     log.error("Unabled to close connection!!! ", e);
   }
   conns.set(null);
 }
 /** Returns the default value for a new LogRecord's threadID. */
 private int defaultThreadID() {
   long tid = Thread.currentThread().getId();
   if (tid < MIN_SEQUENTIAL_THREAD_ID) {
     return (int) tid;
   } else {
     Integer id = threadIds.get();
     if (id == null) {
       id = nextThreadId.getAndIncrement();
       threadIds.set(id);
     }
     return id;
   }
 }
  /** Roll backs current transaction. */
  private void rollbackCurrentTx() {
    try {
      TxContext ctx = txCtx.get();

      if (ctx != null) {
        txCtx.remove();

        GridCacheTx tx = cache.tx();

        if (tx != null) tx.rollback();
      }
    } catch (GridException e) {
      log.error("Failed to rollback cache transaction.", e);
    }
  }
Beispiel #18
0
 // When finished, invoker must ensure that selector is empty
 // by cancelling any related keys and explicitly releasing
 // the selector by invoking releaseTemporarySelector()
 static Selector getTemporarySelector(SelectableChannel sc) throws IOException {
   SoftReference ref = (SoftReference) localSelector.get();
   SelectorWrapper selWrapper = null;
   Selector sel = null;
   if (ref == null
       || ((selWrapper = (SelectorWrapper) ref.get()) == null)
       || ((sel = selWrapper.get()) == null)
       || (sel.provider() != sc.provider())) {
     sel = sc.provider().openSelector();
     localSelector.set(new SoftReference(new SelectorWrapper(sel)));
   } else {
     localSelectorWrapper.set(selWrapper);
   }
   return sel;
 }
  /**
   * Calculate the balance for a coinbase, to-address, or p2sh address.
   *
   * @param address The address to calculate the balance of
   * @return The balance of the address supplied. If the address has not been seen, or there are no
   *     outputs open for this address, the return value is 0
   * @throws BlockStoreException
   */
  public BigInteger calculateBalanceForAddress(Address address) throws BlockStoreException {
    maybeConnect();
    PreparedStatement s = null;

    try {
      s =
          conn.get()
              .prepareStatement(
                  "select sum(('x'||lpad(substr(value::text, 3, 50),16,'0'))::bit(64)::bigint) "
                      + "from openoutputs where toaddress = ?");
      s.setString(1, address.toString());
      ResultSet rs = s.executeQuery();
      if (rs.next()) {
        return BigInteger.valueOf(rs.getLong(1));
      } else {
        throw new BlockStoreException("Failed to execute balance lookup");
      }

    } catch (SQLException ex) {
      throw new BlockStoreException(ex);
    } finally {
      if (s != null)
        try {
          s.close();
        } catch (SQLException e) {
          throw new BlockStoreException("Could not close statement");
        }
    }
  }
Beispiel #20
0
  /** Returns a temporary buffer of at least the given size */
  public static ByteBuffer getTemporaryDirectBuffer(int size) {
    // If a buffer of this size is too large for the cache, there
    // should not be a buffer in the cache that is at least as
    // large. So we'll just create a new one. Also, we don't have
    // to remove the buffer from the cache (as this method does
    // below) given that we won't put the new buffer in the cache.
    if (isBufferTooLarge(size)) {
      return ByteBuffer.allocateDirect(size);
    }

    BufferCache cache = bufferCache.get();
    ByteBuffer buf = cache.get(size);
    if (buf != null) {
      return buf;
    } else {
      // No suitable buffer in the cache so we need to allocate a new
      // one. To avoid the cache growing then we remove the first
      // buffer from the cache and free it.
      if (!cache.isEmpty()) {
        buf = cache.removeFirst();
        free(buf);
      }
      return ByteBuffer.allocateDirect(size);
    }
  }
  private void createTables() throws SQLException, BlockStoreException {
    Statement s = conn.get().createStatement();
    if (log.isDebugEnabled()) log.debug("PostgresFullPrunedBlockStore : CREATE headers table");
    s.executeUpdate(CREATE_HEADERS_TABLE);

    if (log.isDebugEnabled()) log.debug("PostgresFullPrunedBlockStore : CREATE settings table");
    s.executeUpdate(CREATE_SETTINGS_TABLE);

    if (log.isDebugEnabled())
      log.debug("PostgresFullPrunedBlockStore : CREATE undoable block table");
    s.executeUpdate(CREATE_UNDOABLE_TABLE);

    if (log.isDebugEnabled())
      log.debug("PostgresFullPrunedBlockStore : CREATE undoable block index");
    s.executeUpdate(CREATE_UNDOABLE_TABLE_INDEX);
    if (log.isDebugEnabled()) log.debug("PostgresFullPrunedBlockStore : CREATE open output table");
    s.executeUpdate(CREATE_OPEN_OUTPUT_TABLE);

    // Create indexes..
    s.executeUpdate(CREATE_HEADERS_HASH_INDEX);
    s.executeUpdate(CREATE_OUTPUT_ADDRESS_TYPE_INDEX);
    s.executeUpdate(CREATE_OUTPUTS_ADDRESS_INDEX);
    s.executeUpdate(CREATE_OUTPUTS_HASH_INDEX);
    s.executeUpdate(CREATE_OUTPUTS_HASH_INDEX_INDEX);
    s.executeUpdate(CREATE_UNDOABLE_HASH_INDEX);

    s.executeUpdate("INSERT INTO settings(name, value) VALUES('" + CHAIN_HEAD_SETTING + "', NULL)");
    s.executeUpdate(
        "INSERT INTO settings(name, value) VALUES('" + VERIFIED_CHAIN_HEAD_SETTING + "', NULL)");
    s.executeUpdate("INSERT INTO settings(name, value) VALUES('" + VERSION_SETTING + "', '03')");
    s.close();
    createNewStore(params);
  }
Beispiel #22
0
 /** Retrieves the current HTTP context, for the current thread. */
 public static Context current() {
   Context c = current.get();
   if (c == null) {
     throw new RuntimeException("There is no HTTP Context available from here.");
   }
   return c;
 }
Beispiel #23
0
 public void close() {
   cache.remove();
   rootObjectMapper.close();
   for (RootMapper rootMapper : rootMappersOrdered) {
     rootMapper.close();
   }
 }
 protected static HttpServletRequest currentRequest() {
   if (Stapler.getCurrentRequest() == null) {
     return CURRENT_REQUEST.get();
   } else {
     return Stapler.getCurrentRequest();
   }
 }
Beispiel #25
0
  public long getCurrentVersion() {
    if (_currentVersion.get() != null) {
      return _currentVersion.get();
    }

    long version =
        exec(
            new Callable<Long>() {
              public Long call(ConfStoreConnection conn) {
                return conn.getNextVersion();
              }
            });

    _currentVersion.set(version);
    return _currentVersion.get();
  }
Beispiel #26
0
    private double[] setFutureCosts(
        int sourceInputId,
        Derivation<TK, FV> hyp,
        CombinedFeaturizer<TK, FV> featurizer,
        Scorer<FV> scorer) {

      // Do we clear the cache of future cost?
      MutableInteger lastId = tlTranslationId.get();
      @SuppressWarnings("rawtypes")
      Map<SegId, Double> fcCache = tlCache.get();
      if (lastId.intValue() != sourceInputId) {
        fcCache.clear();
        lastId.set(sourceInputId);
      }

      DTURule<TK> opt = (DTURule<TK>) concreteOpt.abstractRule;
      double[] fc = new double[opt.dtus.length];

      assert (segmentIdx == 0);
      for (int i = segmentIdx + 1; i < opt.dtus.length; ++i) {
        SegId<TK> id = new SegId<TK>(opt, i);
        Double score = fcCache.get(id);
        if (score == null) {
          Featurizable<TK, FV> f =
              new DTUFeaturizable<TK, FV>(hyp.sourceSequence, concreteOpt, sourceInputId, i);
          List<FeatureValue<FV>> phraseFeatures = featurizer.ruleFeaturize(f);
          score = scorer.getIncrementalScore(phraseFeatures);
          fcCache.put(id, score);
        }
        fc[i] = score;
        // System.err.printf("Future cost: id=%d phrase={%s} features=%s fc=%.3f\n",
        // translationId, opt.dtus[i], phraseFeatures, fc[i]);
      }
      return fc;
    }
 private void pagination(Invocation invocation, StatementHandler target) throws SQLException {
   final MetaObject metaStatementHandler = getMetaObject(target);
   final BoundSql boundSql = target.getBoundSql();
   Page<?> page = PAGE_THREAD_LOCAL.get();
   if (page == null) {
     page = findPageParameter(boundSql.getParameterObject());
   }
   // 如果传入的参数中有分页对象且sql语句中有select,才做分页处理
   String sql = boundSql.getSql().toLowerCase();
   if (sql.startsWith("select") && page != null) {
     // 采用物理分页后,就不需要mybatis的内存分页了,所以重置下面的两个参数
     metaStatementHandler.setValue("delegate.rowBounds.offset", RowBounds.NO_ROW_OFFSET);
     metaStatementHandler.setValue("delegate.rowBounds.limit", RowBounds.NO_ROW_LIMIT);
     // 设置分页对象里的总记录数和总页数
     Connection connection = (Connection) invocation.getArgs()[0];
     MappedStatement mappedStatement =
         (MappedStatement) metaStatementHandler.getValue("delegate.mappedStatement");
     if (page.isCountTotal()) {
       int recordsTotal = getTotalCount(sql, connection, mappedStatement, boundSql);
       page.setTotalNum(recordsTotal);
     }
     // 最后重写sql
     String pageSql = buildPageSql(sql, page);
     metaStatementHandler.setValue("delegate.boundSql.sql", pageSql);
   }
 }
  private synchronized void maybeConnect() throws BlockStoreException {
    try {
      if (conn.get() != null) return;

      Properties props = new Properties();
      props.setProperty("user", this.username);
      props.setProperty("password", this.password);

      conn.set(DriverManager.getConnection(connectionURL, props));

      Connection connection = conn.get();
      allConnections.add(conn.get());
      log.info("Made a new connection to database " + connectionURL);
    } catch (SQLException ex) {
      throw new BlockStoreException(ex);
    }
  }
 /**
  * Proxy builders are useful for changing the building context, thus enabling mix &amp; match
  * builders.
  *
  * @return the current builder that serves as a proxy.<br>
  */
 protected FactoryBuilderSupport getProxyBuilder() {
   FactoryBuilderSupport proxy = localProxyBuilder.get();
   if (proxy == null) {
     return globalProxyBuilder;
   } else {
     return proxy;
   }
 }
  public void abortDatabaseBatchWrite() throws BlockStoreException {

    maybeConnect();
    if (log.isDebugEnabled())
      log.debug("Rollback database batch write with connection: " + conn.get().toString());

    try {
      if (!conn.get().getAutoCommit()) {
        conn.get().rollback();
        conn.get().setAutoCommit(true);
      } else {
        log.warn("Warning: Rollback attempt without transaction");
      }
    } catch (SQLException e) {
      throw new BlockStoreException(e);
    }
  }