Example #1
0
  @Test
  @Ignore
  public void runCanalInstanceTest() {
    ActionFactory<TableAction> actionFactory =
        new SingleSchemaActionFactory<>(
            Schemas.buildSchema("autoparts", TableAction.class)
                .addTable(
                    Schemas.buildTable("db_goods_stock")
                        .action(
                            new AbstractTableAction(
                                new SingleThreadCurrentHandleTable<TableAction>()) {
                              @Override
                              public void onAction(List<? extends RowChangedData> changedData) {
                                System.out.println("currentTable: " + getCurrentTable());
                                System.out.println(changedData);
                              }
                            })
                        .columns("id", "goods_id", "goods_number")
                        .columnCondition(Schemas.DEFAULT_DELETE_COLUMN_CONDITION))
                .create());
    CANAL_EXECUTOR.addInstanceHandle(new TableSectionHandle(LOCAL_ADDRESS, "shop", actionFactory));
    ActionFactory<EventTypeAction> eventTypeFactory =
        new SingleSchemaActionFactory<>(
            Schemas.buildSchema("autoparts", EventTypeAction.class)
                .addTable(
                    Schemas.buildTable("db_goods")
                        .action(
                            new EventTypeAction() {
                              @Override
                              public void onUpdateAction(List<RowChangedData.Update> updatedData) {
                                System.out.println("db_goods.onUpdateAction: " + updatedData);
                              }

                              @Override
                              public void onInsertAction(List<RowChangedData.Insert> insertedData) {
                                System.out.println("db_goods.onInsertAction: " + insertedData);
                              }

                              @Override
                              public void onDeleteAction(List<RowChangedData.Delete> deletedData) {
                                System.out.println("db_goods.onDeleteAction: " + deletedData);
                              }
                            })
                        .columns("goods_id", "goods_name", "cat_id", "new_goods_sn")
                        .columnCondition(
                            Conditions.unmodifiableContainer()
                                .mustCondition(Conditions.equal("is_delete", false))
                                .mustCondition(Conditions.equal("seller_id", 1))
                                .create()))
                .create());
    CANAL_EXECUTOR.addInstanceHandle(
        new EventTypeSectionHandle(LOCAL_ADDRESS, "shop_goods", eventTypeFactory));
    CANAL_EXECUTOR.startAllInstance(0L);
    try {
      TimeUnit.MINUTES.sleep(100);
    } catch (InterruptedException e) {
      e.printStackTrace();
    }
  }
Example #2
0
 @Test
 public void run() throws InterruptedException {
   //        new LogbackConfigurer();
   //        new ClassPathXmlApplicationContext("/jar/config/spring/applicationContext.xml");
   //        new KafkaSpider("3");
   new ActiveMQSpider("3").start();
   TimeUnit.MINUTES.sleep(300000000);
 }
    private void notifyNamespace(PnfsId pnfsid, FileAttributes fileAttributes)
        throws InterruptedException {
      while (true) {
        try {
          pnfs.fileFlushed(pnfsid, fileAttributes);
          break;
        } catch (CacheException e) {
          if (e.getRc() == CacheException.FILE_NOT_FOUND
              || e.getRc() == CacheException.NOT_IN_TRASH) {
            /* In case the file was deleted, we are presented
             * with the problem that the file is now on tape,
             * however the location has not been registered
             * centrally. Hence the copy on tape will not be
             * removed by the HSM cleaner. The sensible thing
             * seems to be to remove the file from tape here.
             * For now we ignore this issue (REVISIT).
             */
            break;
          }

          /* The message to the PnfsManager failed. There are several
           * possible reasons for this; we may have lost the
           * connection to the PnfsManager; the PnfsManager may have
           * lost its connection to the namespace or otherwise be in
           * trouble; bugs; etc.
           *
           * We keep retrying until we succeed. This will effectively
           * block this thread from flushing any other files, which
           * seems sensible when we have trouble talking to the
           * PnfsManager. If the pool crashes or gets restarted while
           * waiting here, we will end up flushing the file again. We
           * assume that the nearline storage is able to eliminate the
           * duplicate; or at least tolerate the duplicate (given that
           * this situation should be rare, we can live with a little
           * bit of wasted tape).
           */
          LOGGER.error(
              "Error notifying pnfsmanager about a flushed file: {} ({})",
              e.getMessage(),
              e.getRc());
        }
        TimeUnit.MINUTES.sleep(2);
      }
    }
Example #4
0
 private void replaceBadHosts(int expectedNumHosts) throws Exception {
   Set<Host> goodHosts = Sets.newHashSet();
   for (HostExecutor hostExecutor : ImmutableList.copyOf(hostExecutors)) {
     if (hostExecutor.isBad()) {
       logger.info("Removing host during execution phase: " + hostExecutor.getHost());
       executionContext.addBadHost(hostExecutor.getHost());
       hostExecutors.remove(hostExecutor);
     } else {
       goodHosts.add(hostExecutor.getHost());
     }
   }
   long start = System.currentTimeMillis();
   while (hostExecutors.size() < expectedNumHosts) {
     if (System.currentTimeMillis() - start > FOUR_HOURS) {
       throw new RuntimeException(
           "Waited over fours for hosts, still have only "
               + hostExecutors.size()
               + " hosts out of an expected "
               + expectedNumHosts);
     }
     logger.warn(
         "Only "
             + hostExecutors.size()
             + " hosts out of an expected "
             + expectedNumHosts
             + ", attempting to replace bad hosts");
     TimeUnit.MINUTES.sleep(1);
     executionContext.replaceBadHosts();
     for (Host host : executionContext.getHosts()) {
       if (!goodHosts.contains(host)) {
         HostExecutor hostExecutor = hostExecutorBuilder.build(host);
         initalizeHost(hostExecutor);
         if (hostExecutor.isBad()) {
           executionContext.addBadHost(hostExecutor.getHost());
         } else {
           logger.info("Adding new host during execution phase: " + host);
           hostExecutors.add(hostExecutor);
         }
       }
     }
   }
 }
  private void run(String topicName) {
    ActiveMQConnectionFactory activeMqFactory =
        new ActiveMQConnectionFactory(ActiveMQConnection.DEFAULT_BROKER_URL);
    CachingConnectionFactory factory = new CachingConnectionFactory(activeMqFactory);

    DefaultMessageListenerContainer container = new DefaultMessageListenerContainer();
    container.setConnectionFactory(factory);
    container.setDestination(new ActiveMQTopic(topicName));
    container.setMessageListener(new LoggerMessageListener());
    container.start();
    container.initialize();

    try {
      TimeUnit.MINUTES.sleep(1);
    } catch (InterruptedException ex) {
      LOGGER.warn("Thread ended with exception", ex);
    }

    LOGGER.info("Done");
  }
  @Test
  public void testEntriesAddition() throws Exception {

    createSchema(baseDocumentTx);
    createSchema(testDocumentTx);

    System.out.println("Start data propagation");

    List<Future> futures = new ArrayList<Future>();
    for (int i = 0; i < 5; i++) {
      futures.add(executorService.submit(new DataPropagationTask(baseDocumentTx, testDocumentTx)));
    }

    TimeUnit.MINUTES.sleep(5);

    System.out.println("Wait for process to destroy");
    serverProcess.destroy();

    serverProcess.waitFor();
    System.out.println("Process was destroyed");

    for (Future future : futures) {
      try {
        future.get();
      } catch (Exception e) {
        future.cancel(true);
      }
    }

    testDocumentTx =
        new ODatabaseDocumentTx(
            "plocal:" + buildDir.getAbsolutePath() + "/testUniqueIndexCrashRestore");
    testDocumentTx.open("admin", "admin");
    testDocumentTx.close();

    testDocumentTx.open("admin", "admin");

    System.out.println("Start data comparison.");
    compareIndexes();
  }
Example #7
0
  @Override
  public void doTest() throws Throwable {
    ThreadMXBean tbean;
    tbean = ManagementFactory.getThreadMXBean();

    int nonDaemonThreadCountA = tbean.getThreadCount() - tbean.getDaemonThreadCount();
    int daemonThreadCountA = tbean.getDaemonThreadCount();
    long[] listA = tbean.getAllThreadIds();
    for (int loopNumber = 0; loopNumber < 4; loopNumber++) {
      cacheManager =
          new CacheManager(
              DaemonThreadsWriteBehindTestClient.class.getResourceAsStream("/ehcache-config.xml"));
      int daemonThreadCountB = tbean.getDaemonThreadCount();
      Assert.assertTrue(daemonThreadCountA < daemonThreadCountB);
      Cache cache = cacheManager.getCache("test");
      cache.registerCacheWriter(new WriteBehindCacheWriter(this));
      Assert.assertNotNull(cache.getWriterManager());
      Assert.assertTrue(cache.getWriterManager() instanceof WriteBehindManager);
      for (int i = 0; i < 10; i++) {
        cache.putWithWriter(new Element(i, i));
      }
      while (getWriteCount() < 10) {
        Thread.sleep(200);
      }
      resetWriteCount();
      cacheManager.shutdown();
      System.out.println("done with iteration " + loopNumber);
    }
    TimeUnit.MINUTES.sleep(1L);
    long[] listC = tbean.getAllThreadIds();
    int daemonThreadCountC = tbean.getDaemonThreadCount();
    int nonDaemonThreadCountC = tbean.getThreadCount() - tbean.getDaemonThreadCount();
    List<Long> listIntA = new ArrayList<Long>();
    for (long listAItrator : listA) {
      listIntA.add(new Long(listAItrator));
    }
    List<Long> listIntC = new ArrayList<Long>();
    for (long listAItrator : listC) {
      listIntC.add(new Long(listAItrator));
    }
    listIntC.removeAll(listIntA);
    Set<String> knownThreads = getKnownThreads();
    int skipThreadCount = 0;
    StringBuffer threadsInfo = new StringBuffer();
    System.out.println(
        "\n\n" + listIntC.size() + " Start Printing Stack Trace\n--------------------");
    for (int i = 0; i < listIntC.size(); i++) {
      ThreadInfo tinfo = tbean.getThreadInfo(listIntC.get(i));
      if (knownThreads.contains(tinfo.getThreadName().trim())) {
        ++skipThreadCount;
        continue;
      }
      String info = "Thread name: " + tinfo.getThreadName() + " | " + tinfo.getThreadId();
      threadsInfo.append(info);
      for (StackTraceElement e : tinfo.getStackTrace()) {
        threadsInfo.append(e + "\n\n");
      }
    }
    System.out.println(threadsInfo + "\n\n-----------------------\n\n");
    Assert.assertEquals(
        threadsInfo.toString(), daemonThreadCountA, daemonThreadCountC - skipThreadCount);
    Assert.assertEquals(nonDaemonThreadCountA, nonDaemonThreadCountC);
  }