Beispiel #1
0
  /**
   * This would force a journal duplication on bindings even with the scenario that generated fixed,
   * the server shouldn't hold of from starting
   *
   * @throws Exception
   */
  @Test
  public void testForceDuplicationOnBindings() throws Exception {
    queue = server.createQueue(QUEUE, QUEUE, null, true, false);

    ClientSessionFactory factory = locator.createSessionFactory();
    ClientSession session = factory.createSession(false, false, false);

    ClientProducer producer = session.createProducer(QUEUE);

    producer.send(session.createMessage(true));
    session.commit();

    long queueID = server.getStorageManager().generateUniqueID();
    long txID = server.getStorageManager().generateUniqueID();

    // Forcing a situation where the server would unexpectedly create a duplicated queue. The server
    // should still start normally
    LocalQueueBinding newBinding =
        new LocalQueueBinding(
            QUEUE,
            new QueueImpl(queueID, QUEUE, QUEUE, null, true, false, null, null, null, null, null),
            server.getNodeID());
    server.getStorageManager().addQueueBinding(txID, newBinding);
    server.getStorageManager().commitBindings(txID);

    server.stop();

    // a duplicate binding would impede the server from starting
    server.start();
    waitForServer(server);

    server.stop();
  }
Beispiel #2
0
  public synchronized void deployBridge(final BridgeConfiguration config, final boolean start)
      throws Exception {
    if (config.getName() == null) {
      ClusterManagerImpl.log.warn(
          "Must specify a unique name for each bridge. This one will not be deployed.");

      return;
    }

    if (config.getQueueName() == null) {
      ClusterManagerImpl.log.warn(
          "Must specify a queue name for each bridge. This one will not be deployed.");

      return;
    }

    if (config.getForwardingAddress() == null) {
      ClusterManagerImpl.log.debug(
          "Forward address is not specified. Will use original message address instead");
    }

    if (bridges.containsKey(config.getName())) {
      ClusterManagerImpl.log.warn(
          "There is already a bridge with name "
              + config.getName()
              + " deployed. This one will not be deployed.");

      return;
    }

    Transformer transformer = instantiateTransformer(config.getTransformerClassName());

    Binding binding = postOffice.getBinding(new SimpleString(config.getQueueName()));

    if (binding == null) {
      ClusterManagerImpl.log.warn(
          "No queue found with name " + config.getQueueName() + " bridge will not be deployed.");

      return;
    }

    Queue queue = (Queue) binding.getBindable();

    ServerLocatorInternal serverLocator;

    if (config.getDiscoveryGroupName() != null) {
      DiscoveryGroupConfiguration discoveryGroupConfiguration =
          configuration.getDiscoveryGroupConfigurations().get(config.getDiscoveryGroupName());
      if (discoveryGroupConfiguration == null) {
        ClusterManagerImpl.log.warn(
            "No discovery group configured with name '"
                + config.getDiscoveryGroupName()
                + "'. The bridge will not be deployed.");

        return;
      }

      if (config.isHA()) {
        serverLocator =
            (ServerLocatorInternal)
                HornetQClient.createServerLocatorWithHA(discoveryGroupConfiguration);
      } else {
        serverLocator =
            (ServerLocatorInternal)
                HornetQClient.createServerLocatorWithoutHA(discoveryGroupConfiguration);
      }

    } else {
      TransportConfiguration[] tcConfigs = connectorNameListToArray(config.getStaticConnectors());

      if (tcConfigs == null) {
        return;
      }

      if (config.isHA()) {
        serverLocator = (ServerLocatorInternal) HornetQClient.createServerLocatorWithHA(tcConfigs);
      } else {
        serverLocator =
            (ServerLocatorInternal) HornetQClient.createServerLocatorWithoutHA(tcConfigs);
      }
    }

    serverLocator.setConfirmationWindowSize(config.getConfirmationWindowSize());

    // We are going to manually retry on the bridge in case of failure
    serverLocator.setReconnectAttempts(0);
    serverLocator.setInitialConnectAttempts(-1);
    serverLocator.setRetryInterval(config.getRetryInterval());
    serverLocator.setMaxRetryInterval(config.getMaxRetryInterval());
    serverLocator.setRetryIntervalMultiplier(config.getRetryIntervalMultiplier());
    serverLocator.setClientFailureCheckPeriod(config.getClientFailureCheckPeriod());
    serverLocator.setBlockOnDurableSend(!config.isUseDuplicateDetection());
    serverLocator.setBlockOnNonDurableSend(!config.isUseDuplicateDetection());
    serverLocator.setMinLargeMessageSize(config.getMinLargeMessageSize());
    // disable flow control
    serverLocator.setProducerWindowSize(-1);

    // This will be set to 30s unless it's changed from embedded / testing
    // there is no reason to exception the config for this timeout
    // since the Bridge is supposed to be non-blocking and fast
    // We may expose this if we find a good use case
    serverLocator.setCallTimeout(config.getCallTimeout());
    if (!config.isUseDuplicateDetection()) {
      log.debug(
          "Bridge "
              + config.getName()
              + " is configured to not use duplicate detecion, it will send messages synchronously");
    }

    clusterLocators.add(serverLocator);

    Bridge bridge =
        new BridgeImpl(
            serverLocator,
            config.getReconnectAttempts(),
            config.getRetryInterval(),
            config.getRetryIntervalMultiplier(),
            config.getMaxRetryInterval(),
            nodeUUID,
            new SimpleString(config.getName()),
            queue,
            executorFactory.getExecutor(),
            SimpleString.toSimpleString(config.getFilterString()),
            SimpleString.toSimpleString(config.getForwardingAddress()),
            scheduledExecutor,
            transformer,
            config.isUseDuplicateDetection(),
            config.getUser(),
            config.getPassword(),
            !backup,
            server.getStorageManager());

    bridges.put(config.getName(), bridge);

    managementService.registerBridge(bridge, config);

    if (start) {
      bridge.start();
    }
  }
Beispiel #3
0
  @Test
  public void testSendPackets() throws Exception {
    setupServer(true);

    StorageManager storage = getStorage();

    manager = liveServer.getReplicationManager();
    waitForComponent(manager);

    Journal replicatedJournal = new ReplicatedJournal((byte) 1, new FakeJournal(), manager);

    replicatedJournal.appendPrepareRecord(1, new FakeData(), false);

    replicatedJournal.appendAddRecord(1, (byte) 1, new FakeData(), false);
    replicatedJournal.appendUpdateRecord(1, (byte) 2, new FakeData(), false);
    replicatedJournal.appendDeleteRecord(1, false);
    replicatedJournal.appendAddRecordTransactional(2, 2, (byte) 1, new FakeData());
    replicatedJournal.appendUpdateRecordTransactional(2, 2, (byte) 2, new FakeData());
    replicatedJournal.appendCommitRecord(2, false);

    replicatedJournal.appendDeleteRecordTransactional(3, 4, new FakeData());
    replicatedJournal.appendPrepareRecord(3, new FakeData(), false);
    replicatedJournal.appendRollbackRecord(3, false);

    blockOnReplication(storage, manager);

    Assert.assertTrue(
        "Expecting no active tokens:" + manager.getActiveTokens(),
        manager.getActiveTokens().isEmpty());

    ServerMessage msg = new ServerMessageImpl(1, 1024);

    SimpleString dummy = new SimpleString("dummy");
    msg.setAddress(dummy);

    replicatedJournal.appendAddRecordTransactional(23, 24, (byte) 1, new FakeData());

    PagedMessage pgmsg = new PagedMessageImpl(msg, new long[0]);
    manager.pageWrite(pgmsg, 1);
    manager.pageWrite(pgmsg, 2);
    manager.pageWrite(pgmsg, 3);
    manager.pageWrite(pgmsg, 4);

    blockOnReplication(storage, manager);

    PagingManager pagingManager =
        createPageManager(
            backupServer.getStorageManager(), backupServer.getConfiguration(),
            backupServer.getExecutorFactory(), backupServer.getAddressSettingsRepository());

    PagingStore store = pagingManager.getPageStore(dummy);
    store.start();
    Assert.assertEquals(4, store.getNumberOfPages());
    store.stop();

    manager.pageDeleted(dummy, 1);
    manager.pageDeleted(dummy, 2);
    manager.pageDeleted(dummy, 3);
    manager.pageDeleted(dummy, 4);
    manager.pageDeleted(dummy, 5);
    manager.pageDeleted(dummy, 6);

    blockOnReplication(storage, manager);

    ServerMessageImpl serverMsg = new ServerMessageImpl();
    serverMsg.setMessageID(500);
    serverMsg.setAddress(new SimpleString("tttt"));

    HornetQBuffer buffer = HornetQBuffers.dynamicBuffer(100);
    serverMsg.encodeHeadersAndProperties(buffer);

    manager.largeMessageBegin(500);

    manager.largeMessageWrite(500, new byte[1024]);

    manager.largeMessageDelete(Long.valueOf(500));

    blockOnReplication(storage, manager);

    store.start();

    Assert.assertEquals(0, store.getNumberOfPages());
  }
Beispiel #4
0
  /**
   * This would recreate the scenario where a queue was duplicated
   *
   * @throws Exception
   */
  @Test
  public void testHangDuplicateQueues() throws Exception {
    final Semaphore blocked = new Semaphore(1);
    final CountDownLatch latchDelete = new CountDownLatch(1);
    class MyQueueWithBlocking extends QueueImpl {

      /**
       * @param id
       * @param address
       * @param name
       * @param filter
       * @param pageSubscription
       * @param durable
       * @param temporary
       * @param scheduledExecutor
       * @param postOffice
       * @param storageManager
       * @param addressSettingsRepository
       * @param executor
       */
      public MyQueueWithBlocking(
          final long id,
          final SimpleString address,
          final SimpleString name,
          final Filter filter,
          final PageSubscription pageSubscription,
          final boolean durable,
          final boolean temporary,
          final ScheduledExecutorService scheduledExecutor,
          final PostOffice postOffice,
          final StorageManager storageManager,
          final HierarchicalRepository<AddressSettings> addressSettingsRepository,
          final Executor executor) {
        super(
            id,
            address,
            name,
            filter,
            pageSubscription,
            durable,
            temporary,
            scheduledExecutor,
            postOffice,
            storageManager,
            addressSettingsRepository,
            executor);
      }

      @Override
      public synchronized int deleteMatchingReferences(final int flushLimit, final Filter filter)
          throws Exception {
        latchDelete.countDown();
        blocked.acquire();
        blocked.release();
        return super.deleteMatchingReferences(flushLimit, filter);
      }
    }

    class LocalFactory extends QueueFactoryImpl {
      public LocalFactory(
          final ExecutorFactory executorFactory,
          final ScheduledExecutorService scheduledExecutor,
          final HierarchicalRepository<AddressSettings> addressSettingsRepository,
          final StorageManager storageManager) {
        super(executorFactory, scheduledExecutor, addressSettingsRepository, storageManager);
      }

      @Override
      public Queue createQueue(
          final long persistenceID,
          final SimpleString address,
          final SimpleString name,
          final Filter filter,
          final PageSubscription pageSubscription,
          final boolean durable,
          final boolean temporary) {
        queue =
            new MyQueueWithBlocking(
                persistenceID,
                address,
                name,
                filter,
                pageSubscription,
                durable,
                temporary,
                scheduledExecutor,
                postOffice,
                storageManager,
                addressSettingsRepository,
                executorFactory.getExecutor());
        return queue;
      }
    }

    LocalFactory queueFactory =
        new LocalFactory(
            server.getExecutorFactory(),
            server.getScheduledPool(),
            server.getAddressSettingsRepository(),
            server.getStorageManager());

    queueFactory.setPostOffice(server.getPostOffice());

    ((HornetQServerImpl) server).replaceQueueFactory(queueFactory);

    queue = server.createQueue(QUEUE, QUEUE, null, true, false);

    blocked.acquire();

    ClientSessionFactory factory = locator.createSessionFactory();
    ClientSession session = factory.createSession(false, false, false);

    ClientProducer producer = session.createProducer(QUEUE);

    producer.send(session.createMessage(true));
    session.commit();

    Thread tDelete =
        new Thread() {
          @Override
          public void run() {
            try {
              server.destroyQueue(QUEUE);
            } catch (Exception e) {
              e.printStackTrace();
            }
          }
        };

    tDelete.start();

    Assert.assertTrue(latchDelete.await(10, TimeUnit.SECONDS));

    try {
      server.createQueue(QUEUE, QUEUE, null, true, false);
    } catch (Exception expected) {
    }

    blocked.release();

    server.stop();

    tDelete.join();

    session.close();

    // a duplicate binding would impede the server from starting
    server.start();
    waitForServer(server);

    server.stop();
  }
Beispiel #5
0
  public synchronized void deployBridge(final BridgeConfiguration config) throws Exception {
    if (config.getName() == null) {
      HornetQServerLogger.LOGGER.bridgeNotUnique();

      return;
    }

    if (config.getQueueName() == null) {
      HornetQServerLogger.LOGGER.bridgeNoQueue(config.getName());

      return;
    }

    if (config.getForwardingAddress() == null) {
      HornetQServerLogger.LOGGER.bridgeNoForwardAddress(config.getName());
    }

    if (bridges.containsKey(config.getName())) {
      HornetQServerLogger.LOGGER.bridgeAlreadyDeployed(config.getName());

      return;
    }

    Transformer transformer = instantiateTransformer(config.getTransformerClassName());

    Binding binding = postOffice.getBinding(new SimpleString(config.getQueueName()));

    if (binding == null) {
      HornetQServerLogger.LOGGER.bridgeQueueNotFound(config.getQueueName(), config.getName());

      return;
    }

    Queue queue = (Queue) binding.getBindable();

    ServerLocatorInternal serverLocator;

    if (config.getDiscoveryGroupName() != null) {
      DiscoveryGroupConfiguration discoveryGroupConfiguration =
          configuration.getDiscoveryGroupConfigurations().get(config.getDiscoveryGroupName());
      if (discoveryGroupConfiguration == null) {
        HornetQServerLogger.LOGGER.bridgeNoDiscoveryGroup(config.getDiscoveryGroupName());

        return;
      }

      if (config.isHA()) {
        serverLocator =
            (ServerLocatorInternal)
                HornetQClient.createServerLocatorWithHA(discoveryGroupConfiguration);
      } else {
        serverLocator =
            (ServerLocatorInternal)
                HornetQClient.createServerLocatorWithoutHA(discoveryGroupConfiguration);
      }

    } else {
      TransportConfiguration[] tcConfigs = connectorNameListToArray(config.getStaticConnectors());

      if (tcConfigs == null) {
        HornetQServerLogger.LOGGER.bridgeCantFindConnectors(config.getName());
        return;
      }

      if (config.isHA()) {
        serverLocator = (ServerLocatorInternal) HornetQClient.createServerLocatorWithHA(tcConfigs);
      } else {
        serverLocator =
            (ServerLocatorInternal) HornetQClient.createServerLocatorWithoutHA(tcConfigs);
      }
    }

    if (config.getForwardingAddress() != null) {
      AddressSettings addressConfig =
          configuration.getAddressesSettings().get(config.getForwardingAddress());

      // The address config could be null on certain test cases or some Embedded environment
      if (addressConfig == null) {
        // We will certainly have this warning on testcases which is ok
        HornetQServerLogger.LOGGER.bridgeCantFindAddressConfig(
            config.getName(), config.getForwardingAddress());
      } else {
        final int windowSize = config.getConfirmationWindowSize();
        final long maxBytes = addressConfig.getMaxSizeBytes();

        if (maxBytes != -1 && maxBytes < windowSize) {
          HornetQServerLogger.LOGGER.bridgeConfirmationWindowTooSmall(
              config.getName(), config.getForwardingAddress(), windowSize, maxBytes);
        }
      }
    }

    serverLocator.setIdentity("Bridge " + config.getName());
    serverLocator.setConfirmationWindowSize(config.getConfirmationWindowSize());

    // We are going to manually retry on the bridge in case of failure
    serverLocator.setReconnectAttempts(0);
    serverLocator.setInitialConnectAttempts(0);
    serverLocator.setRetryInterval(config.getRetryInterval());
    serverLocator.setMaxRetryInterval(config.getMaxRetryInterval());
    serverLocator.setRetryIntervalMultiplier(config.getRetryIntervalMultiplier());
    serverLocator.setClientFailureCheckPeriod(config.getClientFailureCheckPeriod());
    serverLocator.setConnectionTTL(config.getConnectionTTL());
    serverLocator.setBlockOnDurableSend(!config.isUseDuplicateDetection());
    serverLocator.setBlockOnNonDurableSend(!config.isUseDuplicateDetection());
    serverLocator.setMinLargeMessageSize(config.getMinLargeMessageSize());
    // disable flow control
    serverLocator.setProducerWindowSize(-1);

    // This will be set to 30s unless it's changed from embedded / testing
    // there is no reason to exception the config for this timeout
    // since the Bridge is supposed to be non-blocking and fast
    // We may expose this if we find a good use case
    serverLocator.setCallTimeout(config.getCallTimeout());
    if (!config.isUseDuplicateDetection()) {
      HornetQServerLogger.LOGGER.debug(
          "Bridge "
              + config.getName()
              + " is configured to not use duplicate detecion, it will send messages synchronously");
    }

    clusterLocators.add(serverLocator);

    Bridge bridge =
        new BridgeImpl(
            serverLocator,
            config.getReconnectAttempts(),
            config.getReconnectAttemptsOnSameNode(),
            config.getRetryInterval(),
            config.getRetryIntervalMultiplier(),
            config.getMaxRetryInterval(),
            nodeManager.getUUID(),
            new SimpleString(config.getName()),
            queue,
            executorFactory.getExecutor(),
            FilterImpl.createFilter(config.getFilterString()),
            SimpleString.toSimpleString(config.getForwardingAddress()),
            scheduledExecutor,
            transformer,
            config.isUseDuplicateDetection(),
            config.getUser(),
            config.getPassword(),
            !backup,
            server.getStorageManager());

    bridges.put(config.getName(), bridge);

    managementService.registerBridge(bridge, config);

    bridge.start();
  }
  private void createNewRecord(
      final long eventUID,
      final String targetNodeID,
      final TransportConfiguration connector,
      final SimpleString queueName,
      final Queue queue,
      final boolean start)
      throws Exception {
    String nodeId;

    synchronized (this) {
      if (!started) {
        return;
      }

      if (serverLocator == null) {
        return;
      }

      nodeId = serverLocator.getNodeID();
    }

    final ServerLocatorInternal targetLocator = new ServerLocatorImpl(topology, true, connector);

    targetLocator.setReconnectAttempts(0);

    targetLocator.setInitialConnectAttempts(0);
    targetLocator.setClientFailureCheckPeriod(clientFailureCheckPeriod);
    targetLocator.setConnectionTTL(connectionTTL);
    targetLocator.setInitialConnectAttempts(0);

    targetLocator.setConfirmationWindowSize(confirmationWindowSize);
    targetLocator.setBlockOnDurableSend(!useDuplicateDetection);
    targetLocator.setBlockOnNonDurableSend(!useDuplicateDetection);

    targetLocator.setRetryInterval(retryInterval);
    targetLocator.setMaxRetryInterval(maxRetryInterval);
    targetLocator.setRetryIntervalMultiplier(retryIntervalMultiplier);
    targetLocator.setMinLargeMessageSize(minLargeMessageSize);

    // No producer flow control on the bridges, as we don't want to lock the queues
    targetLocator.setProducerWindowSize(-1);

    targetLocator.setAfterConnectionInternalListener(this);

    targetLocator.setNodeID(nodeId);

    targetLocator.setClusterTransportConfiguration(
        serverLocator.getClusterTransportConfiguration());

    if (retryInterval > 0) {
      targetLocator.setRetryInterval(retryInterval);
    }

    targetLocator.disableFinalizeCheck();
    targetLocator.addIncomingInterceptor(
        new IncomingInterceptorLookingForExceptionMessage(manager, executorFactory.getExecutor()));
    MessageFlowRecordImpl record =
        new MessageFlowRecordImpl(
            targetLocator, eventUID, targetNodeID, connector, queueName, queue);

    ClusterConnectionBridge bridge =
        new ClusterConnectionBridge(
            this,
            manager,
            targetLocator,
            serverLocator,
            reconnectAttempts,
            retryInterval,
            retryIntervalMultiplier,
            maxRetryInterval,
            nodeManager.getUUID(),
            record.getEventUID(),
            record.getTargetNodeID(),
            record.getQueueName(),
            record.getQueue(),
            executorFactory.getExecutor(),
            null,
            null,
            scheduledExecutor,
            null,
            useDuplicateDetection,
            clusterUser,
            clusterPassword,
            server.getStorageManager(),
            managementService.getManagementAddress(),
            managementService.getManagementNotificationAddress(),
            record,
            record.getConnector());

    targetLocator.setIdentity(
        "(Cluster-connection-bridge::" + bridge.toString() + "::" + this.toString() + ")");

    if (HornetQServerLogger.LOGGER.isDebugEnabled()) {
      HornetQServerLogger.LOGGER.debug(
          "creating record between " + this.connector + " and " + connector + bridge);
    }

    record.setBridge(bridge);

    records.put(targetNodeID, record);

    if (start) {
      bridge.start();
    }
  }