예제 #1
0
  // An inner class for account
  private static class Account {
    private static Lock lock = new ReentrantLock(); // Create a new lock

    // Create a condition
    private static Condition newDeposit = lock.newCondition();

    private int balance = 0;

    public int getBalance() {
      return balance;
    }

    public void withdraw(int amount) {
      lock.lock(); // Acquire the lock
      try {
        while (balance < amount) {
          System.out.println("\t\t\tWait for a deposit");
          newDeposit.await();
        }

        balance -= amount;
        System.out.println("\t\t\tWithdraw " + amount + "\t\t" + getBalance());
      } catch (InterruptedException ex) {
        ex.printStackTrace();
      } finally {
        lock.unlock(); // Release the lock
      }
    }

    public void deposit(int amount) {
      lock.lock(); // Acquire the lock
      try {
        balance += amount;
        System.out.println("Deposit " + amount + "\t\t\t\t\t" + getBalance());

        newDeposit.signalAll();
      } finally {
        lock.unlock(); // Release the lock
      }
    }
  }
public class RandomCharacterGenerator extends Thread implements CharacterSource {
  private static char[] chars;
  private static String charArray = "abcdefghijklmnopqrstuvwxyz0123456789";

  static {
    chars = charArray.toCharArray();
  }

  private Random random;
  private CharacterEventHandler handler;
  private boolean done = true;
  private Lock lock = new ReentrantLock();
  private Condition cv = lock.newCondition();

  public RandomCharacterGenerator() {
    random = new Random();
    handler = new CharacterEventHandler();
  }

  public int getPauseTime() {
    return (int) (Math.max(1000, 5000 * random.nextDouble()));
  }

  public void addCharacterListener(CharacterListener cl) {
    handler.addCharacterListener(cl);
  }

  public void removeCharacterListener(CharacterListener cl) {
    handler.removeCharacterListener(cl);
  }

  public void nextCharacter() {
    handler.fireNewCharacter(this, (int) chars[random.nextInt(chars.length)]);
  }

  public void run() {
    try {
      lock.lock();
      while (true) {
        try {
          if (done) {
            cv.await();
          } else {
            nextCharacter();
            cv.await(getPauseTime(), TimeUnit.MILLISECONDS);
          }
        } catch (InterruptedException ie) {
          return;
        }
      }
    } finally {
      lock.unlock();
    }
  }

  public void setDone(boolean b) {
    try {
      lock.lock();
      done = b;

      if (!done) cv.signal();
    } finally {
      lock.unlock();
    }
  }
}
/** IGFS worker for removal from the trash directory. */
public class IgfsDeleteWorker extends IgfsThread {
  /** Awake frequency, */
  private static final long FREQUENCY = 1000;

  /** How many files/folders to delete at once (i.e in a single transaction). */
  private static final int MAX_DELETE_BATCH = 100;

  /** IGFS context. */
  private final IgfsContext igfsCtx;

  /** Metadata manager. */
  private final IgfsMetaManager meta;

  /** Data manager. */
  private final IgfsDataManager data;

  /** Event manager. */
  private final GridEventStorageManager evts;

  /** Logger. */
  private final IgniteLogger log;

  /** Lock. */
  private final Lock lock = new ReentrantLock();

  /** Condition. */
  private final Condition cond = lock.newCondition();

  /** Force worker to perform actual delete. */
  private boolean force;

  /** Cancellation flag. */
  private volatile boolean cancelled;

  /** Message topic. */
  private Object topic;

  /**
   * Constructor.
   *
   * @param igfsCtx IGFS context.
   */
  IgfsDeleteWorker(IgfsContext igfsCtx) {
    super(
        "igfs-delete-worker%"
            + igfsCtx.igfs().name()
            + "%"
            + igfsCtx.kernalContext().localNodeId()
            + "%");

    this.igfsCtx = igfsCtx;

    meta = igfsCtx.meta();
    data = igfsCtx.data();

    evts = igfsCtx.kernalContext().event();

    String igfsName = igfsCtx.igfs().name();

    topic = F.isEmpty(igfsName) ? TOPIC_IGFS : TOPIC_IGFS.topic(igfsName);

    assert meta != null;
    assert data != null;

    log = igfsCtx.kernalContext().log(IgfsDeleteWorker.class);
  }

  /** {@inheritDoc} */
  @Override
  protected void body() throws InterruptedException {
    if (log.isDebugEnabled()) log.debug("Delete worker started.");

    while (!cancelled) {
      lock.lock();

      try {
        if (!cancelled && !force) cond.await(FREQUENCY, TimeUnit.MILLISECONDS);

        force = false; // Reset force flag.
      } finally {
        lock.unlock();
      }

      if (!cancelled) delete();
    }
  }

  /** Notify the worker that new entry to delete appeared. */
  void signal() {
    lock.lock();

    try {
      force = true;

      cond.signalAll();
    } finally {
      lock.unlock();
    }
  }

  void cancel() {
    cancelled = true;

    interrupt();
  }

  /** Perform cleanup of the trash directory. */
  private void delete() {
    IgfsFileInfo info = null;

    try {
      info = meta.info(TRASH_ID);
    } catch (ClusterTopologyServerNotFoundException e) {
      LT.warn(log, e, "Server nodes not found.");
    } catch (IgniteCheckedException e) {
      U.error(log, "Cannot obtain trash directory info.", e);
    }

    if (info != null) {
      for (Map.Entry<String, IgfsListingEntry> entry : info.listing().entrySet()) {
        IgniteUuid fileId = entry.getValue().fileId();

        if (log.isDebugEnabled())
          log.debug(
              "Deleting IGFS trash entry [name=" + entry.getKey() + ", fileId=" + fileId + ']');

        try {
          if (!cancelled) {
            if (delete(entry.getKey(), fileId)) {
              if (log.isDebugEnabled())
                log.debug(
                    "Sending delete confirmation message [name="
                        + entry.getKey()
                        + ", fileId="
                        + fileId
                        + ']');

              sendDeleteMessage(new IgfsDeleteMessage(fileId));
            }
          } else break;
        } catch (IgniteInterruptedCheckedException ignored) {
          // Ignore this exception while stopping.
        } catch (IgniteCheckedException e) {
          U.error(log, "Failed to delete entry from the trash directory: " + entry.getKey(), e);

          sendDeleteMessage(new IgfsDeleteMessage(fileId, e));
        }
      }
    }
  }

  /**
   * Remove particular entry from the TRASH directory.
   *
   * @param name Entry name.
   * @param id Entry ID.
   * @return {@code True} in case the entry really was deleted form the file system by this call.
   * @throws IgniteCheckedException If failed.
   */
  private boolean delete(String name, IgniteUuid id) throws IgniteCheckedException {
    assert name != null;
    assert id != null;

    while (true) {
      IgfsFileInfo info = meta.info(id);

      if (info != null) {
        if (info.isDirectory()) {
          deleteDirectory(TRASH_ID, id);

          if (meta.delete(TRASH_ID, name, id)) return true;
        } else {
          assert info.isFile();

          // Delete file content first.
          // In case this node crashes, other node will re-delete the file.
          data.delete(info).get();

          boolean ret = meta.delete(TRASH_ID, name, id);

          if (evts.isRecordable(EVT_IGFS_FILE_PURGED)) {
            if (info.path() != null)
              evts.record(
                  new IgfsEvent(
                      info.path(),
                      igfsCtx.kernalContext().discovery().localNode(),
                      EVT_IGFS_FILE_PURGED));
            else LT.warn(log, null, "Removing file without path info: " + info);
          }

          return ret;
        }
      } else return false; // Entry was deleted concurrently.
    }
  }

  /**
   * Remove particular entry from the trash directory or subdirectory.
   *
   * @param parentId Parent ID.
   * @param id Entry id.
   * @throws IgniteCheckedException If delete failed for some reason.
   */
  private void deleteDirectory(IgniteUuid parentId, IgniteUuid id) throws IgniteCheckedException {
    assert parentId != null;
    assert id != null;

    while (true) {
      IgfsFileInfo info = meta.info(id);

      if (info != null) {
        assert info.isDirectory();

        Map<String, IgfsListingEntry> listing = info.listing();

        if (listing.isEmpty()) return; // Directory is empty.

        Map<String, IgfsListingEntry> delListing;

        if (listing.size() <= MAX_DELETE_BATCH) delListing = listing;
        else {
          delListing = new HashMap<>(MAX_DELETE_BATCH, 1.0f);

          int i = 0;

          for (Map.Entry<String, IgfsListingEntry> entry : listing.entrySet()) {
            delListing.put(entry.getKey(), entry.getValue());

            if (++i == MAX_DELETE_BATCH) break;
          }
        }

        GridCompoundFuture<Object, ?> fut = new GridCompoundFuture<>();

        // Delegate to child folders.
        for (IgfsListingEntry entry : delListing.values()) {
          if (!cancelled) {
            if (entry.isDirectory()) deleteDirectory(id, entry.fileId());
            else {
              IgfsFileInfo fileInfo = meta.info(entry.fileId());

              if (fileInfo != null) {
                assert fileInfo.isFile();

                fut.add(data.delete(fileInfo));
              }
            }
          } else return;
        }

        fut.markInitialized();

        // Wait for data cache to delete values before clearing meta cache.
        try {
          fut.get();
        } catch (IgniteFutureCancelledCheckedException ignore) {
          // This future can be cancelled only due to IGFS shutdown.
          cancelled = true;

          return;
        }

        // Actual delete of folder content.
        Collection<IgniteUuid> delIds = meta.delete(id, delListing);

        if (delListing == listing && delListing.size() == delIds.size())
          break; // All entries were deleted.
      } else break; // Entry was deleted concurrently.
    }
  }

  /**
   * Send delete message to all meta cache nodes in the grid.
   *
   * @param msg Message to send.
   */
  private void sendDeleteMessage(IgfsDeleteMessage msg) {
    assert msg != null;

    Collection<ClusterNode> nodes = meta.metaCacheNodes();

    for (ClusterNode node : nodes) {
      try {
        igfsCtx.send(node, topic, msg, GridIoPolicy.SYSTEM_POOL);
      } catch (IgniteCheckedException e) {
        U.warn(
            log,
            "Failed to send IGFS delete message to node [nodeId="
                + node.id()
                + ", msg="
                + msg
                + ", err="
                + e.getMessage()
                + ']');
      }
    }
  }
}
/**
 * Cache sequence implementation.
 *
 * @author 2012 Copyright (C) GridGain Systems
 * @version 4.0.2c.12042012
 */
public final class GridCacheAtomicSequenceImpl extends GridMetadataAwareAdapter
    implements GridCacheAtomicSequenceEx, Externalizable {
  /** Deserialization stash. */
  private static final ThreadLocal<GridTuple2<GridCacheContext, String>> stash =
      new ThreadLocal<GridTuple2<GridCacheContext, String>>() {
        @Override
        protected GridTuple2<GridCacheContext, String> initialValue() {
          return F.t2();
        }
      };

  /** Logger. */
  private GridLogger log;

  /** Sequence name. */
  private String name;

  /** Removed flag. */
  private volatile boolean rmvd;

  /** Sequence key. */
  private GridCacheInternalStorableKey key;

  /** Sequence projection. */
  private GridCacheProjection<GridCacheInternalStorableKey, GridCacheAtomicSequenceValue> seqView;

  /** Cache context. */
  private volatile GridCacheContext ctx;

  /** Local value of sequence. */
  private long locVal;

  /** Upper bound of local counter. */
  private long upBound;

  /** Sequence batch size */
  private volatile int batchSize;

  /** Synchronization lock. */
  private final Lock lock = new ReentrantLock();

  /** Await condition. */
  private Condition cond = lock.newCondition();

  /** Callable for execution {@link #incrementAndGet} operation in async and sync mode. */
  private final Callable<Long> incAndGetCall = internalUpdate(1, true);

  /** Callable for execution {@link #getAndIncrement} operation in async and sync mode. */
  private final Callable<Long> getAndIncCall = internalUpdate(1, false);

  /** Add and get cache call guard. */
  private final AtomicBoolean updateGuard = new AtomicBoolean();

  /** Empty constructor required by {@link Externalizable}. */
  public GridCacheAtomicSequenceImpl() {
    // No-op.
  }

  /**
   * Default constructor.
   *
   * @param name Sequence name.
   * @param key Sequence key.
   * @param seqView Sequence projection.
   * @param ctx CacheContext.
   * @param locVal Local counter.
   * @param upBound Upper bound.
   */
  public GridCacheAtomicSequenceImpl(
      String name,
      GridCacheInternalStorableKey key,
      GridCacheProjection<GridCacheInternalStorableKey, GridCacheAtomicSequenceValue> seqView,
      GridCacheContext ctx,
      long locVal,
      long upBound) {
    assert key != null;
    assert seqView != null;
    assert ctx != null;
    assert locVal <= upBound;

    batchSize = ctx.config().getAtomicSequenceReserveSize();
    this.ctx = ctx;
    this.key = key;
    this.seqView = seqView;
    this.upBound = upBound;
    this.locVal = locVal;
    this.name = name;

    log = ctx.gridConfig().getGridLogger().getLogger(getClass());
  }

  /** {@inheritDoc} */
  @Override
  public String name() {
    return name;
  }

  /** {@inheritDoc} */
  @Override
  public long get() throws GridException {
    checkRemoved();

    lock.lock();

    try {
      return locVal;
    } finally {
      lock.unlock();
    }
  }

  /** {@inheritDoc} */
  @Override
  public long incrementAndGet() throws GridException {
    return internalUpdate(1, incAndGetCall, true);
  }

  /** {@inheritDoc} */
  @Override
  public long getAndIncrement() throws GridException {
    return internalUpdate(1, getAndIncCall, false);
  }

  /** {@inheritDoc} */
  @Override
  public GridFuture<Long> incrementAndGetAsync() throws GridException {
    return internalUpdateAsync(1, incAndGetCall, true);
  }

  /** {@inheritDoc} */
  @Override
  public GridFuture<Long> getAndIncrementAsync() throws GridException {
    return internalUpdateAsync(1, getAndIncCall, false);
  }

  /** {@inheritDoc} */
  @Override
  public long addAndGet(long l) throws GridException {
    A.ensure(l > 0, " Parameter mustn't be less then 1: " + l);

    return internalUpdate(l, null, true);
  }

  /** {@inheritDoc} */
  @Override
  public GridFuture<Long> addAndGetAsync(long l) throws GridException {
    A.ensure(l > 0, " Parameter mustn't be less then 1: " + l);

    return internalUpdateAsync(l, null, true);
  }

  /** {@inheritDoc} */
  @Override
  public long getAndAdd(long l) throws GridException {
    A.ensure(l > 0, " Parameter mustn't be less then 1: " + l);

    return internalUpdate(l, null, false);
  }

  /** {@inheritDoc} */
  @Override
  public GridFuture<Long> getAndAddAsync(long l) throws GridException {
    A.ensure(l > 0, " Parameter mustn't be less then 1: " + l);

    return internalUpdateAsync(l, null, false);
  }

  /**
   * Synchronous sequence update operation. Will add given amount to the sequence value.
   *
   * @param l Increment amount.
   * @param updateCall Cache call that will update sequence reservation count in accordance with l.
   * @param updated If {@code true}, will return sequence value after update, otherwise will return
   *     sequence value prior to update.
   * @return Sequence value.
   * @throws GridException If update failed.
   */
  private long internalUpdate(long l, @Nullable Callable<Long> updateCall, boolean updated)
      throws GridException {
    checkRemoved();

    assert l > 0;

    lock.lock();

    try {
      // If reserved range isn't exhausted.
      if (locVal + l <= upBound) {
        long curVal = locVal;

        locVal += l;

        return updated ? locVal : curVal;
      }
    } finally {
      lock.unlock();
    }

    if (updateCall == null) updateCall = internalUpdate(l, updated);

    while (true) {
      if (updateGuard.compareAndSet(false, true)) {
        try {
          // This call must be outside lock.
          return CU.outTx(updateCall, ctx);
        } finally {
          lock.lock();

          try {
            updateGuard.set(false);

            cond.signalAll();
          } finally {
            lock.unlock();
          }
        }
      } else {
        lock.lock();

        try {
          while (locVal >= upBound && updateGuard.get()) {
            try {
              cond.await(500, MILLISECONDS);
            } catch (InterruptedException e) {
              throw new GridInterruptedException(e);
            }
          }

          checkRemoved();

          // If reserved range isn't exhausted.
          if (locVal + l <= upBound) {
            long curVal = locVal;

            locVal += l;

            return updated ? locVal : curVal;
          }
        } finally {
          lock.unlock();
        }
      }
    }
  }

  /**
   * Asynchronous sequence update operation. Will add given amount to the sequence value.
   *
   * @param l Increment amount.
   * @param updateCall Cache call that will update sequence reservation count in accordance with l.
   * @param updated If {@code true}, will return sequence value after update, otherwise will return
   *     sequence value prior to update.
   * @return Future indicating sequence value.
   * @throws GridException If update failed.
   */
  private GridFuture<Long> internalUpdateAsync(
      long l, @Nullable Callable<Long> updateCall, boolean updated) throws GridException {
    checkRemoved();

    A.ensure(l > 0, " Parameter mustn't be less then 1: " + l);

    lock.lock();

    try {
      // If reserved range isn't exhausted.
      if (locVal + l <= upBound) {
        long curVal = locVal;

        locVal += l;

        return new GridFinishedFuture<Long>(ctx.kernalContext(), updated ? locVal : curVal);
      }
    } finally {
      lock.unlock();
    }

    if (updateCall == null) updateCall = internalUpdate(l, updated);

    while (true) {
      if (updateGuard.compareAndSet(false, true)) {
        try {
          // This call must be outside lock.
          return ctx.closures().callLocalSafe(updateCall, true);
        } finally {
          lock.lock();

          try {
            updateGuard.set(false);

            cond.signalAll();
          } finally {
            lock.unlock();
          }
        }
      } else {
        lock.lock();

        try {
          while (locVal >= upBound && updateGuard.get()) {
            try {
              cond.await(500, MILLISECONDS);
            } catch (InterruptedException e) {
              throw new GridInterruptedException(e);
            }
          }

          checkRemoved();

          // If reserved range isn't exhausted.
          if (locVal + l <= upBound) {
            long curVal = locVal;

            locVal += l;

            return new GridFinishedFuture<Long>(ctx.kernalContext(), updated ? locVal : curVal);
          }
        } finally {
          lock.unlock();
        }
      }
    }
  }

  /**
   * Get local batch size for this sequences.
   *
   * @return Sequence batch size.
   */
  @Override
  public int batchSize() {
    return batchSize;
  }

  /**
   * Set local batch size for this sequences.
   *
   * @param size Sequence batch size. Must be more then 0.
   */
  @Override
  public void batchSize(int size) {
    A.ensure(size > 0, " Batch size can't be less then 0: " + size);

    lock.lock();

    try {
      batchSize = size;
    } finally {
      lock.unlock();
    }
  }

  /**
   * Check removed status.
   *
   * @throws GridException If removed.
   */
  private void checkRemoved() throws GridException {
    if (rmvd)
      throw new GridCacheDataStructureRemovedException("Sequence was removed from cache: " + name);
  }

  /** {@inheritDoc} */
  @Override
  public boolean onRemoved() {
    return rmvd = true;
  }

  /** {@inheritDoc} */
  @Override
  public void onInvalid(@Nullable Exception err) {
    // No-op.
  }

  /** {@inheritDoc} */
  @Override
  public GridCacheInternalStorableKey key() {
    return key;
  }

  /** {@inheritDoc} */
  @Override
  public boolean removed() {
    return rmvd;
  }

  /**
   * Method returns callable for execution all update operations in async and sync mode.
   *
   * @param l Value will be added to sequence.
   * @param updated If {@code true}, will return updated value, if {@code false}, will return
   *     previous value.
   * @return Callable for execution in async and sync mode.
   */
  @SuppressWarnings("TooBroadScope")
  private Callable<Long> internalUpdate(final long l, final boolean updated) {
    return new Callable<Long>() {
      @Override
      public Long call() throws Exception {
        GridCacheTx tx = CU.txStartInternal(ctx, seqView, PESSIMISTIC, REPEATABLE_READ);

        try {
          GridCacheAtomicSequenceValue seq = seqView.get(key);

          checkRemoved();

          assert seq != null;

          long curLocVal;

          long newUpBound;

          lock.lock();

          try {
            curLocVal = locVal;

            // If local range was already reserved in another thread.
            if (locVal + l <= upBound) {
              long retVal = locVal;

              locVal += l;

              return updated ? locVal : retVal;
            }

            long curGlobalVal = seq.get();

            long newLocVal;

            /* We should use offset because we already reserved left side of range.*/
            long off = batchSize > 1 ? batchSize - 1 : 1;

            // Calculate new values for local counter, global counter and upper bound.
            if (curLocVal + l >= curGlobalVal) {
              newLocVal = curLocVal + l;

              newUpBound = newLocVal + off;
            } else {
              newLocVal = curGlobalVal;

              newUpBound = newLocVal + off;
            }

            locVal = newLocVal;
            upBound = newUpBound;

            if (updated) curLocVal = newLocVal;
          } finally {
            lock.unlock();
          }

          // Global counter must be more than reserved upper bound.
          seq.set(newUpBound + 1);

          seqView.put(key, seq);

          tx.commit();

          return curLocVal;
        } catch (Error e) {
          log.error("Failed to get and add: " + this, e);

          throw e;
        } catch (Exception e) {
          log.error("Failed to get and add: " + this, e);

          throw e;
        } finally {
          tx.end();
        }
      }
    };
  }

  /** {@inheritDoc} */
  @Override
  public void writeExternal(ObjectOutput out) throws IOException {
    out.writeObject(ctx);
    out.writeUTF(name);
  }

  /** {@inheritDoc} */
  @Override
  public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
    stash.get().set1((GridCacheContext) in.readObject());
    stash.get().set2(in.readUTF());
  }

  /**
   * Reconstructs object on demarshalling.
   *
   * @return Reconstructed object.
   * @throws ObjectStreamException Thrown in case of demarshalling error.
   */
  private Object readResolve() throws ObjectStreamException {
    GridTuple2<GridCacheContext, String> t = stash.get();

    try {
      return t.get1().dataStructures().sequence(t.get2(), 0L, false, false);
    } catch (GridException e) {
      throw U.withCause(new InvalidObjectException(e.getMessage()), e);
    }
  }

  /** {@inheritDoc} */
  @Override
  public String toString() {
    return S.toString(GridCacheAtomicSequenceImpl.class, this);
  }
}
예제 #5
0
 Barrier(int num) {
   lock = new ReentrantLock();
   NumWait = num;
   workers = lock.newCondition();
 }
class HostConnectionPool {

  private static final Logger logger = LoggerFactory.getLogger(HostConnectionPool.class);

  private static final int MAX_SIMULTANEOUS_CREATION = 1;

  public final Host host;
  public volatile HostDistance hostDistance;
  private final Session.Manager manager;

  private final List<Connection> connections;
  private final AtomicInteger open;
  private final AtomicBoolean isShutdown = new AtomicBoolean();
  private final Set<Connection> trash = new CopyOnWriteArraySet<Connection>();

  private volatile int waiter = 0;
  private final Lock waitLock = new ReentrantLock(true);
  private final Condition hasAvailableConnection = waitLock.newCondition();

  private final Runnable newConnectionTask;

  private final AtomicInteger scheduledForCreation = new AtomicInteger();

  public HostConnectionPool(Host host, HostDistance hostDistance, Session.Manager manager)
      throws ConnectionException {
    assert hostDistance != HostDistance.IGNORED;
    this.host = host;
    this.hostDistance = hostDistance;
    this.manager = manager;

    this.newConnectionTask =
        new Runnable() {
          @Override
          public void run() {
            addConnectionIfUnderMaximum();
            scheduledForCreation.decrementAndGet();
          }
        };

    // Create initial core connections
    List<Connection> l =
        new ArrayList<Connection>(options().getCoreConnectionsPerHost(hostDistance));
    try {
      for (int i = 0; i < options().getCoreConnectionsPerHost(hostDistance); i++)
        l.add(manager.connectionFactory().open(host));
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();
      // If asked to interrupt, we can skip opening core connections, the pool will still work.
      // But we ignore otherwise cause I'm not sure we can do much better currently.
    }
    this.connections = new CopyOnWriteArrayList<Connection>(l);
    this.open = new AtomicInteger(connections.size());

    logger.trace("Created connection pool to host {}", host);
  }

  private PoolingOptions options() {
    return manager.configuration().getPoolingOptions();
  }

  public Connection borrowConnection(long timeout, TimeUnit unit)
      throws ConnectionException, TimeoutException {
    if (isShutdown.get())
      // Note: throwing a ConnectionException is probably fine in practice as it will trigger the
      // creation of a new host.
      // That being said, maybe having a specific exception could be cleaner.
      throw new ConnectionException(host.getAddress(), "Pool is shutdown");

    if (connections.isEmpty()) {
      for (int i = 0; i < options().getCoreConnectionsPerHost(hostDistance); i++) {
        // We don't respect MAX_SIMULTANEOUS_CREATION here because it's  only to
        // protect against creating connection in excess of core too quickly
        scheduledForCreation.incrementAndGet();
        manager.executor().submit(newConnectionTask);
      }
      Connection c = waitForConnection(timeout, unit);
      c.setKeyspace(manager.poolsState.keyspace);
      return c;
    }

    int minInFlight = Integer.MAX_VALUE;
    Connection leastBusy = null;
    for (Connection connection : connections) {
      int inFlight = connection.inFlight.get();
      if (inFlight < minInFlight) {
        minInFlight = inFlight;
        leastBusy = connection;
      }
    }

    if (minInFlight >= options().getMaxSimultaneousRequestsPerConnectionThreshold(hostDistance)
        && connections.size() < options().getMaxConnectionsPerHost(hostDistance))
      maybeSpawnNewConnection();

    while (true) {
      int inFlight = leastBusy.inFlight.get();

      if (inFlight >= Connection.MAX_STREAM_PER_CONNECTION) {
        leastBusy = waitForConnection(timeout, unit);
        break;
      }

      if (leastBusy.inFlight.compareAndSet(inFlight, inFlight + 1)) break;
    }
    leastBusy.setKeyspace(manager.poolsState.keyspace);
    return leastBusy;
  }

  private void awaitAvailableConnection(long timeout, TimeUnit unit) throws InterruptedException {
    waitLock.lock();
    waiter++;
    try {
      hasAvailableConnection.await(timeout, unit);
    } finally {
      waiter--;
      waitLock.unlock();
    }
  }

  private void signalAvailableConnection() {
    // Quick check if it's worth signaling to avoid locking
    if (waiter == 0) return;

    waitLock.lock();
    try {
      hasAvailableConnection.signal();
    } finally {
      waitLock.unlock();
    }
  }

  private void signalAllAvailableConnection() {
    // Quick check if it's worth signaling to avoid locking
    if (waiter == 0) return;

    waitLock.lock();
    try {
      hasAvailableConnection.signalAll();
    } finally {
      waitLock.unlock();
    }
  }

  private Connection waitForConnection(long timeout, TimeUnit unit)
      throws ConnectionException, TimeoutException {
    long start = System.nanoTime();
    long remaining = timeout;
    do {
      try {
        awaitAvailableConnection(remaining, unit);
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        // If we're interrupted fine, check if there is a connection available but stop waiting
        // otherwise
        timeout = 0; // this will make us stop the loop if we don't get a connection right away
      }

      if (isShutdown()) throw new ConnectionException(host.getAddress(), "Pool is shutdown");

      int minInFlight = Integer.MAX_VALUE;
      Connection leastBusy = null;
      for (Connection connection : connections) {
        int inFlight = connection.inFlight.get();
        if (inFlight < minInFlight) {
          minInFlight = inFlight;
          leastBusy = connection;
        }
      }

      while (true) {
        int inFlight = leastBusy.inFlight.get();

        if (inFlight >= Connection.MAX_STREAM_PER_CONNECTION) break;

        if (leastBusy.inFlight.compareAndSet(inFlight, inFlight + 1)) return leastBusy;
      }

      remaining = timeout - Cluster.timeSince(start, unit);
    } while (remaining > 0);

    throw new TimeoutException();
  }

  public void returnConnection(Connection connection) {
    int inFlight = connection.inFlight.decrementAndGet();

    if (connection.isDefunct()) {
      if (host.getMonitor().signalConnectionFailure(connection.lastException())) shutdown();
      else replace(connection);
    } else {

      if (trash.contains(connection) && inFlight == 0) {
        if (trash.remove(connection)) close(connection);
        return;
      }

      if (connections.size() > options().getCoreConnectionsPerHost(hostDistance)
          && inFlight <= options().getMinSimultaneousRequestsPerConnectionThreshold(hostDistance)) {
        trashConnection(connection);
      } else {
        signalAvailableConnection();
      }
    }
  }

  private boolean trashConnection(Connection connection) {
    // First, make sure we don't go below core connections
    for (; ; ) {
      int opened = open.get();
      if (opened <= options().getCoreConnectionsPerHost(hostDistance)) return false;

      if (open.compareAndSet(opened, opened - 1)) break;
    }
    trash.add(connection);
    connections.remove(connection);

    if (connection.inFlight.get() == 0 && trash.remove(connection)) close(connection);
    return true;
  }

  private boolean addConnectionIfUnderMaximum() {

    // First, make sure we don't cross the allowed limit of open connections
    for (; ; ) {
      int opened = open.get();
      if (opened >= options().getMaxConnectionsPerHost(hostDistance)) return false;

      if (open.compareAndSet(opened, opened + 1)) break;
    }

    if (isShutdown()) {
      open.decrementAndGet();
      return false;
    }

    // Now really open the connection
    try {
      connections.add(manager.connectionFactory().open(host));
      signalAvailableConnection();
      return true;
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();
      // Skip the open but ignore otherwise
      open.decrementAndGet();
      return false;
    } catch (ConnectionException e) {
      open.decrementAndGet();
      logger.debug("Connection error to {} while creating additional connection", host);
      if (host.getMonitor().signalConnectionFailure(e)) shutdown();
      return false;
    } catch (AuthenticationException e) {
      // This shouldn't really happen in theory
      open.decrementAndGet();
      logger.error(
          "Authentication error while creating additional connection (error is: {})",
          e.getMessage());
      shutdown();
      return false;
    }
  }

  private void maybeSpawnNewConnection() {
    while (true) {
      int inCreation = scheduledForCreation.get();
      if (inCreation >= MAX_SIMULTANEOUS_CREATION) return;
      if (scheduledForCreation.compareAndSet(inCreation, inCreation + 1)) break;
    }

    logger.debug("Creating new connection on busy pool to {}", host);
    manager.executor().submit(newConnectionTask);
  }

  private void replace(final Connection connection) {
    connections.remove(connection);

    manager
        .executor()
        .submit(
            new Runnable() {
              @Override
              public void run() {
                connection.close();
                addConnectionIfUnderMaximum();
              }
            });
  }

  private void close(final Connection connection) {
    manager
        .executor()
        .submit(
            new Runnable() {
              @Override
              public void run() {
                connection.close();
              }
            });
  }

  public boolean isShutdown() {
    return isShutdown.get();
  }

  public void shutdown() {
    try {
      shutdown(0, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();
    }
  }

  public boolean shutdown(long timeout, TimeUnit unit) throws InterruptedException {
    if (!isShutdown.compareAndSet(false, true)) return true;

    logger.debug("Shutting down pool");

    // Wake up all threads that waits
    signalAllAvailableConnection();
    return discardAvailableConnections(timeout, unit);
  }

  public int opened() {
    return open.get();
  }

  private boolean discardAvailableConnections(long timeout, TimeUnit unit)
      throws InterruptedException {
    long start = System.nanoTime();
    boolean success = true;
    for (Connection connection : connections) {
      success &= connection.close(timeout - Cluster.timeSince(start, unit), unit);
      open.decrementAndGet();
    }
    return success;
  }

  // This creates connections if we have less than core connections (if we
  // have more than core, connection will just get trash when we can).
  public void ensureCoreConnections() {
    if (isShutdown()) return;

    // Note: this process is a bit racy, but it doesn't matter since we're still guaranteed to not
    // create
    // more connection than maximum (and if we create more than core connection due to a race but
    // this isn't
    // justified by the load, the connection in excess will be quickly trashed anyway)
    int opened = open.get();
    for (int i = opened; i < options().getCoreConnectionsPerHost(hostDistance); i++) {
      // We don't respect MAX_SIMULTANEOUS_CREATION here because it's only to
      // protect against creating connection in excess of core too quickly
      scheduledForCreation.incrementAndGet();
      manager.executor().submit(newConnectionTask);
    }
  }

  static class PoolState {

    volatile String keyspace;

    public void setKeyspace(String keyspace) {
      this.keyspace = keyspace;
    }
  }
}