/**
   * This is directly called when the connection to the node is gone, or when the node sends a
   * disconnection. Look for callers of this method!
   */
  public void notifyNodeDown(final long eventTime, final String nodeID) {

    if (!ha) {
      // there's no topology here
      return;
    }

    if (HornetQLogger.LOGGER.isDebugEnabled()) {
      HornetQLogger.LOGGER.debug(
          "nodeDown " + this + " nodeID=" + nodeID + " as being down", new Exception("trace"));
    }

    topology.removeMember(eventTime, nodeID);

    if (clusterConnection) {
      updateArraysAndPairs();
    } else {
      synchronized (this) {
        if (topology.isEmpty()) {
          // Resetting the topology to its original condition as it was brand new
          receivedTopology = false;
          topologyArray = null;
        } else {
          updateArraysAndPairs();

          if (topology.nodes() == 1 && topology.getMember(this.nodeID) != null) {
            // Resetting the topology to its original condition as it was brand new
            receivedTopology = false;
          }
        }
      }
    }
  }
Exemple #2
0
    private void run() {
      try {
        if (HornetQLogger.LOGGER.isDebugEnabled()) {
          HornetQLogger.LOGGER.debug("deleting temporary queue " + bindingName);
        }
        if (postOffice.getBinding(bindingName) != null) {
          postOffice.removeBinding(bindingName);
        }

        queue.deleteAllReferences();
      } catch (Exception e) {
        HornetQLogger.LOGGER.errorRemovingTempQueue(e, bindingName);
      }
    }
  public void notifyNodeUp(
      long uniqueEventID,
      final String nodeID,
      final Pair<TransportConfiguration, TransportConfiguration> connectorPair,
      final boolean last) {
    if (!ha) {
      // there's no topology
      return;
    }

    if (HornetQLogger.LOGGER.isDebugEnabled()) {
      HornetQLogger.LOGGER.debug(
          "NodeUp " + this + "::nodeID=" + nodeID + ", connectorPair=" + connectorPair,
          new Exception("trace"));
    }

    TopologyMember member = new TopologyMember(connectorPair.getA(), connectorPair.getB());

    topology.updateMember(uniqueEventID, nodeID, member);

    TopologyMember actMember = topology.getMember(nodeID);

    if (actMember != null
        && actMember.getConnector().getA() != null
        && actMember.getConnector().getB() != null) {
      HashSet<ClientSessionFactory> clonedFactories = new HashSet<ClientSessionFactory>();
      synchronized (factories) {
        clonedFactories.addAll(factories);
      }

      for (ClientSessionFactory factory : clonedFactories) {
        ((ClientSessionFactoryInternal) factory)
            .setBackupConnector(actMember.getConnector().getA(), actMember.getConnector().getB());
      }
    }

    updateArraysAndPairs();

    if (last) {
      synchronized (this) {
        receivedTopology = true;
        // Notify if waiting on getting topology
        notifyAll();
      }
    }
  }
    public void run() {
      while (true) {
        try {
          if (thread.isInterrupted() && !started) {
            break;
          }

          Thread.sleep(measureInterval);
        } catch (InterruptedException ignore) {
          if (!started) {
            break;
          }
        }

        long maxMemory = runtime.maxMemory();

        long totalMemory = runtime.totalMemory();

        long freeMemory = runtime.freeMemory();

        long availableMemory = freeMemory + maxMemory - totalMemory;

        double availableMemoryPercent = 100.0 * availableMemory / maxMemory;

        StringBuilder info = new StringBuilder();
        info.append(String.format("free memory:      %s%n", SizeFormatterUtil.sizeof(freeMemory)));
        info.append(String.format("max memory:       %s%n", SizeFormatterUtil.sizeof(maxMemory)));
        info.append(String.format("total memory:     %s%n", SizeFormatterUtil.sizeof(totalMemory)));
        info.append(String.format("available memory: %.2f%%%n", availableMemoryPercent));

        if (HornetQLogger.LOGGER.isDebugEnabled()) {
          HornetQLogger.LOGGER.debug(info);
        }

        if (availableMemoryPercent <= memoryWarningThreshold) {
          HornetQLogger.LOGGER.memoryError(memoryWarningThreshold, info.toString());

          low = true;
        } else {
          low = false;
        }
      }
    }
      public ClientSessionFactory tryConnect() throws HornetQException {
        if (HornetQLogger.LOGGER.isDebugEnabled()) {
          HornetQLogger.LOGGER.debug(this + "::Trying to connect to " + factory);
        }
        try {
          ClientSessionFactoryInternal factoryToUse = factory;
          if (factoryToUse != null) {
            addToConnecting(factoryToUse);

            try {
              factoryToUse.connect(1, false);
            } finally {
              removeFromConnecting(factoryToUse);
            }
          }
          return factoryToUse;
        } catch (HornetQException e) {
          HornetQLogger.LOGGER.debug(
              this + "::Exception on establish connector initial connection", e);
          return null;
        }
      }
    public ClientSessionFactory connect() throws HornetQException {
      assertOpen();

      initialise();

      ClientSessionFactory csf = null;

      createConnectors();

      try {

        int retryNumber = 0;
        while (csf == null && !isClosed()) {
          retryNumber++;
          for (Connector conn : connectors) {
            if (HornetQLogger.LOGGER.isDebugEnabled()) {
              HornetQLogger.LOGGER.debug(this + "::Submitting connect towards " + conn);
            }

            csf = conn.tryConnect();

            if (csf != null) {
              csf.getConnection()
                  .addFailureListener(
                      new FailureListener() {
                        // Case the node where the cluster connection was connected is gone, we need
                        // to restart the
                        // connection
                        public void connectionFailed(
                            HornetQException exception, boolean failedOver) {
                          if (clusterConnection
                              && exception.getType() == HornetQExceptionType.DISCONNECTED) {
                            try {
                              ServerLocatorImpl.this.start(startExecutor);
                            } catch (Exception e) {
                              // There isn't much to be done if this happens here
                              HornetQLogger.LOGGER.errorStartingLocator(e);
                            }
                          }
                        }

                        @Override
                        public String toString() {
                          return "FailureListener('restarts cluster connections')";
                        }
                      });

              if (HornetQLogger.LOGGER.isDebugEnabled()) {
                HornetQLogger.LOGGER.debug(
                    "Returning "
                        + csf
                        + " after "
                        + retryNumber
                        + " retries on StaticConnector "
                        + ServerLocatorImpl.this);
              }

              return csf;
            }
          }

          if (initialConnectAttempts >= 0 && retryNumber > initialConnectAttempts) {
            break;
          }

          if (!isClosed()) {
            Thread.sleep(retryInterval);
          }
        }

      } catch (RejectedExecutionException e) {
        HornetQLogger.LOGGER.debug("Rejected execution", e);
        throw e;
      } catch (Exception e) {
        HornetQLogger.LOGGER.errorConnectingToNodes(e);
        throw HornetQMessageBundle.BUNDLE.cannotConnectToStaticConnectors(e);
      }

      if (!isClosed()) {
        HornetQLogger.LOGGER.errorConnectingToNodes(e);
        throw HornetQMessageBundle.BUNDLE.cannotConnectToStaticConnectors2();
      }
      return null;
    }
  private void doClose(final boolean sendClose) {
    if (state == STATE.CLOSED) {
      if (HornetQLogger.LOGGER.isDebugEnabled()) {
        HornetQLogger.LOGGER.debug(this + " is already closed when calling closed");
      }
      return;
    }

    state = STATE.CLOSING;

    if (discoveryGroup != null) {
      synchronized (this) {
        try {
          discoveryGroup.stop();
        } catch (Exception e) {
          HornetQLogger.LOGGER.failedToStopDiscovery(e);
        }
      }
    } else {
      staticConnector.disconnect();
    }

    synchronized (connectingFactories) {
      for (ClientSessionFactoryInternal csf : connectingFactories) {
        csf.close();
      }
      connectingFactories.clear();
    }

    Set<ClientSessionFactoryInternal> clonedFactory;
    synchronized (factories) {
      clonedFactory = new HashSet<ClientSessionFactoryInternal>(factories);

      factories.clear();
    }

    for (ClientSessionFactory factory : clonedFactory) {
      if (sendClose) {
        factory.close();
      } else {
        factory.cleanup();
      }
    }

    if (shutdownPool) {
      if (threadPool != null) {
        threadPool.shutdown();

        try {
          if (!threadPool.awaitTermination(10000, TimeUnit.MILLISECONDS)) {
            HornetQLogger.LOGGER.timedOutWaitingForTermination();
          }
        } catch (InterruptedException ignore) {
        }
      }

      if (scheduledThreadPool != null) {
        scheduledThreadPool.shutdown();

        try {
          if (!scheduledThreadPool.awaitTermination(10000, TimeUnit.MILLISECONDS)) {
            HornetQLogger.LOGGER.timedOutWaitingForScheduledPoolTermination();
          }
        } catch (InterruptedException ignore) {
        }
      }
    }
    readOnly = false;

    state = STATE.CLOSED;
  }