Ejemplo n.º 1
0
 /**
  * Drives the actual test on an Executor and verifies the result
  *
  * @param maps the caches to be tested
  * @throws IOException
  * @throws InterruptedException
  */
 private void testConcurrentLocking(List<ConcurrentMap<String, String>> maps)
     throws IOException, InterruptedException {
   SharedStats stats = new SharedStats();
   ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(NODES_NUM);
   List<StressingThread> threads = new ArrayList<StressingThread>();
   for (ConcurrentMap<String, String> map : maps) {
     StressingThread thread = new StressingThread(stats, map);
     threads.add(thread);
     executor.execute(thread);
   }
   executor.shutdown();
   Thread.sleep(5000);
   int putsAfter5Seconds = stats.succesfullPutsCounter.get();
   System.out.println("\nSituation after 5 seconds:");
   System.out.println(stats.toString());
   executor.awaitTermination(STRESS_TIME_MINUTES, TimeUnit.MINUTES);
   stats.globalQuit = true;
   executor.awaitTermination(10, TimeUnit.SECONDS); // give some time to awake and quit
   executor.shutdownNow();
   System.out.println("\nFinal situation:");
   System.out.println(stats.toString());
   assert !stats.seenFailures : "at least one thread has seen unexpected state";
   assert stats.succesfullPutsCounter.get() > 0 : "the lock should have been taken at least once";
   assert stats.succesfullPutsCounter.get() > putsAfter5Seconds
       : "the lock count didn't improve since the first 5 seconds. Deadlock?";
   assert stats.succesfullPutsCounter.get() == stats.lockReleasedCounter.get()
       : "there's a mismatch in acquires and releases count";
   assert stats.lockOwnersCounter.get() == 0 : "the lock is still held at test finish";
 }
Ejemplo n.º 2
0
  private static void realMain(String[] args) throws Throwable {
    final int n = 4;
    final CyclicBarrier barrier = new CyclicBarrier(2 * n + 1);
    final ThreadPoolExecutor pool =
        new ThreadPoolExecutor(
            n, 2 * n, KEEPALIVE_MS, MILLISECONDS, new SynchronousQueue<Runnable>());
    final Runnable r =
        new Runnable() {
          public void run() {
            try {
              barrier.await();
              barrier.await();
            } catch (Throwable t) {
              unexpected(t);
            }
          }
        };

    for (int i = 0; i < 2 * n; i++) pool.execute(r);
    barrier.await();
    checkPoolSizes(pool, 2 * n, n, 2 * n);
    barrier.await();
    long nap = KEEPALIVE_MS + (KEEPALIVE_MS >> 2);
    for (long sleepyTime = 0L; pool.getPoolSize() > n; ) {
      check((sleepyTime += nap) <= LONG_DELAY_MS);
      Thread.sleep(nap);
    }
    checkPoolSizes(pool, n, n, 2 * n);
    Thread.sleep(nap);
    checkPoolSizes(pool, n, n, 2 * n);
    pool.shutdown();
    check(pool.awaitTermination(LONG_DELAY_MS, MILLISECONDS));
  }
Ejemplo n.º 3
0
  @Test
  public void testConcurrentPutGet() throws NetInfCheckedException, InterruptedException {
    List<InformationObject> insertedIOs = new ArrayList<InformationObject>();
    ThreadPoolExecutor executor =
        new ThreadPoolExecutor(
            5, 10, 10, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(10000));

    for (int i = 0; i < NODE_NUMBER; i++) {
      for (int j = 0; j < IOS_PER_NODE; j++) {
        InformationObject io = createUniqueIO();
        executor.execute(new NodePutCommand(resolutionServices.get(i), io));
        insertedIOs.add(io);
      }
    }

    for (InformationObject io : insertedIOs) {
      NodeGetCommand getter = new NodeGetCommand(resolutionServices.get(0), io);
      executor.execute(getter);
      getterCommands.add(getter);
    }
    executor.shutdown();
    executor.awaitTermination(30, TimeUnit.SECONDS);

    for (NodeGetCommand getter : getterCommands) {
      Assert.assertTrue(getter.isCorrect());
    }
  }
Ejemplo n.º 4
0
 public static void execute(
     final String fromDirectoryPath,
     final String toFirstPartDirectoryPath,
     final String toSecondPartDirectoryPath) {
   ThreadPoolExecutor executor =
       new ThreadPoolExecutor(
           100, 500, Long.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>());
   long start = System.currentTimeMillis();
   List<String> fileNames = produceInput(fromDirectoryPath);
   long mid = System.currentTimeMillis();
   System.out.println(
       "Time Taken for producing input: " + Double.valueOf((mid - start) / 1000) + " seconds");
   for (String filePath : fileNames) {
     EtlTask readerTask =
         new FileSystemEtlTask(filePath, toFirstPartDirectoryPath, toSecondPartDirectoryPath);
     executor.submit(readerTask);
   }
   long end = System.currentTimeMillis();
   System.out.println(
       "Time Taken for creating tasks: " + Double.valueOf((end - start) / 1000) + " seconds");
   executor.shutdown();
   try {
     executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
     long end2 = System.currentTimeMillis();
     System.out.println(
         "Time Taken till termination: " + Double.valueOf((end2 - start) / 1000) + " seconds");
   } catch (InterruptedException e) {
     System.err.println("ThreadPoolExecutor was interrupted");
     e.printStackTrace();
   }
 }
  @Test
  @Ignore("NXP-20582: timeout waiting termination")
  public void testConcurrency() throws Exception {
    final String seqName = "mt";
    int nbCalls = 5000;

    final UIDSequencer seq = uidGeneratorService.getSequencer();
    ThreadPoolExecutor tpe =
        new ThreadPoolExecutor(
            5, 5, 500L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(nbCalls + 1));

    for (int i = 0; i < nbCalls; i++) {
      tpe.submit(
          new Runnable() {
            @Override
            public void run() {
              seq.getNext(seqName);
            }
          });
    }

    tpe.shutdown();
    boolean finish = tpe.awaitTermination(20, TimeUnit.SECONDS);
    assertTrue("timeout", finish);

    assertEquals(nbCalls + 1, seq.getNext(seqName));
  }
Ejemplo n.º 6
0
 public void close() {
   _executorService.shutdown();
   try {
     _executorService.awaitTermination(60, TimeUnit.SECONDS);
   } catch (final InterruptedException e) {
     log.warn("waiting for termination of executor service interrupted", e);
   }
 }
Ejemplo n.º 7
0
 public void shutDown() {
   threadPool.shutdownNow();
   try {
     threadPool.awaitTermination(10, TimeUnit.SECONDS);
   } catch (InterruptedException e) {
     throw new RuntimeException("Workers-pool termination failed", e);
   }
 }
Ejemplo n.º 8
0
 public synchronized void exit() {
   mStopped = true;
   try {
     mExecutor.shutdown();
     mExecutor.awaitTermination(30, TimeUnit.SECONDS);
     mExecutor.shutdownNow();
   } catch (Exception ignored) {
   }
 }
Ejemplo n.º 9
0
  public void shutdown() {
    _shutdown = true;
    try {
      _effectsScheduledThreadPool.awaitTermination(1, TimeUnit.SECONDS);
      _generalScheduledThreadPool.awaitTermination(1, TimeUnit.SECONDS);
      _generalPacketsThreadPool.awaitTermination(1, TimeUnit.SECONDS);
      _ioPacketsThreadPool.awaitTermination(1, TimeUnit.SECONDS);
      _generalThreadPool.awaitTermination(1, TimeUnit.SECONDS);
      _effectsScheduledThreadPool.shutdown();
      _generalScheduledThreadPool.shutdown();
      _generalPacketsThreadPool.shutdown();
      _ioPacketsThreadPool.shutdown();
      _generalThreadPool.shutdown();
      _log.info("All ThreadPools are now stopped");

    } catch (InterruptedException e) {
      _log.log(Level.WARNING, "", e);
    }
  }
Ejemplo n.º 10
0
  public void destroy() {
    queryExecutor.shutdown(); // wait for all submitted task to finish.

    // Block until shutdown() complete, or the timeout occurs, or
    // the current thread is interrupted, whichever happens first.
    try {
      queryExecutor.awaitTermination(2L, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
      throw new IllegalStateException(QUERY_POOL_NAME + " graceful shutdown interrupted.", e);
    }
  }
Ejemplo n.º 11
0
  public void shutdown() {
    _shutdown = true;
    try {
      _effectsScheduledThreadPool.awaitTermination(1, TimeUnit.SECONDS);
      _generalScheduledThreadPool.awaitTermination(1, TimeUnit.SECONDS);
      _generalPacketsThreadPool.awaitTermination(1, TimeUnit.SECONDS);
      _ioPacketsThreadPool.awaitTermination(1, TimeUnit.SECONDS);
      _generalThreadPool.awaitTermination(1, TimeUnit.SECONDS);
      _aiThreadPool.awaitTermination(1, TimeUnit.SECONDS);
      _effectsScheduledThreadPool.shutdown();
      _generalScheduledThreadPool.shutdown();
      _generalPacketsThreadPool.shutdown();
      _ioPacketsThreadPool.shutdown();
      _generalThreadPool.shutdown();
      _aiThreadPool.shutdown();
      System.out.println("All ThreadPools are now stoped");

    } catch (InterruptedException e) {
      _log.log(Level.SEVERE, e.getLocalizedMessage(), e);
    }
  }
Ejemplo n.º 12
0
 public void abortConnection() throws SQLException {
   Connection connection = DriverManager.getConnection("jdbc:derby://localhost/java7book");
   ThreadPoolExecutor executor =
       new DebugExecutorService(2, 10, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>());
   connection.abort(executor);
   executor.shutdown();
   try {
     executor.awaitTermination(5, TimeUnit.MINUTES);
     System.out.println(executor.getCompletedTaskCount());
   } catch (InterruptedException e) {
     e.printStackTrace();
   }
 }
Ejemplo n.º 13
0
 private void waitFor(ThreadPoolExecutor t, String name) {
   boolean done = false;
   while (!done) {
     try {
       done = t.awaitTermination(60, TimeUnit.SECONDS);
       LOG.info("Waiting for " + name + " to finish...");
       if (!done) {
         t.shutdownNow();
       }
     } catch (InterruptedException ie) {
       LOG.warn("Interrupted waiting for " + name + " to finish...");
     }
   }
 }
Ejemplo n.º 14
0
  /**
   * Stops the timer, cancelling all tasks
   *
   * @throws InterruptedException if interrupted while waiting for thread to return
   */
  public void stop() {
    stopRunner();

    List<Runnable> remaining_tasks = pool.shutdownNow();
    for (Runnable task : remaining_tasks) {
      if (task instanceof Future) {
        Future future = (Future) task;
        future.cancel(true);
      }
    }
    pool.getQueue().clear();
    try {
      pool.awaitTermination(Global.THREADPOOL_SHUTDOWN_WAIT_TIME, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
    }
  }
Ejemplo n.º 15
0
  /** 主函数 */
  public static void main(String[] args) {

    final ThreadPoolExecutor pool = ApplyThreadPool.getThreadPoolExector();

    Runtime.getRuntime()
        .addShutdownHook(
            new Thread(
                new Runnable() {
                  @Override
                  public void run() {
                    pool.shutdown();
                  }
                }));

    /** 从150机器上获取皮皮用户uid和token */
    logger.info("从150机器上获取皮皮用户uid: ");
    TokenService tokenService = new TokenService();

    /** 连接48数据库 */
    logger.info("连接48数据库: ");
    WeiboJDBC weiboJDBC =
        new WeiboJDBC("192.168.1.48", "pp_fenxi", "q#tmuYzC@sqB6!ok@sHd", "pp_fenxi");

    // 创建粉丝分析结果数据表
    try {
      weiboJDBC.createFansAnalysisTable(FansAnalysisInfoUtils.PP_SINA_FANS_ANALYSIS);
    } catch (SQLException e) {
      throw new RuntimeException(e);
    }
    weiboJDBC.dbClose();

    /** 循环计算 */
    logger.info("循环计算: ");
    for (Long uid : tokenService.getSinaUids()) {
      if (!pool.isShutdown()) {
        pool.execute(new FansAnalysisRun(uid));
      }
    }

    pool.shutdown();
    try {
      pool.awaitTermination(100, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
      e.printStackTrace();
    }
  }
Ejemplo n.º 16
0
 @Test
 public void testShutdownWithReferencesDoesNotStopExecutor() throws Exception {
   Map<String, Object> cache = new HashMap<String, Object>();
   ThreadPoolBuilder builder =
       new ThreadPoolBuilder(name.getTableNameString(), new Configuration(false));
   ThreadPoolExecutor exec = ThreadPoolManager.getExecutor(builder, cache);
   assertNotNull("Got a null exector from the pool!", exec);
   ThreadPoolExecutor exec2 = ThreadPoolManager.getExecutor(builder, cache);
   assertTrue("Should have gotten the same executor", exec2 == exec);
   exec.shutdown();
   assertFalse(
       "Executor is shutting down, even though we have a live reference!",
       exec.isShutdown() || exec.isTerminating());
   exec2.shutdown();
   // wait 5 minutes for thread pool to shutdown
   assertTrue(
       "Executor is NOT shutting down, after releasing live reference!",
       exec.awaitTermination(300, TimeUnit.SECONDS));
 }
Ejemplo n.º 17
0
 private void assertConcurrentUpdates(Function<Integer, String> keyGenerator) throws Exception {
   final AtomicInteger safeIndex = new AtomicInteger(-1);
   List<String> keys = Lists.newArrayListWithCapacity(ATTEMPTS);
   ThreadPoolExecutor executor =
       new ThreadPoolExecutor(
           3, 3, 5, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(ATTEMPTS));
   synchronized (indexer) {
     for (int i = 0; i < ATTEMPTS; i++) {
       final String key = keyGenerator.apply(i);
       keys.add(key);
       executor.execute(
           new Runnable() {
             @Override
             public void run() {
               int index = indexer.getOrCreateIndex(key);
               if (safeIndex.get() < index) {
                 safeIndex.set(index);
               }
               indexer.addString(key);
             }
           });
     }
   }
   try {
     while (!executor.getQueue().isEmpty()) {
       // Validate that we can execute concurrent queries too.
       if (safeIndex.get() >= 0) {
         int index = safeIndex.get();
         // Retrieve string using random existing index and validate reverse mapping.
         String key = indexer.getStringForIndex(index);
         assertNotNull(key);
         assertEquals(index, indexer.getIndex(key));
       }
     }
   } finally {
     executor.shutdown();
     executor.awaitTermination(TestUtils.WAIT_TIMEOUT_SECONDS, TimeUnit.SECONDS);
   }
   for (String key : keys) {
     // Validate mapping between keys and indices.
     assertEquals(key, indexer.getStringForIndex(indexer.getIndex(key)));
   }
 }
 @Override
 public synchronized void stop() {
   super.stop();
   shutdown = true;
   if (backgroundOperationPool != null) {
     backgroundOperationPool.shutdown();
     long timeout =
         hiveConf.getTimeVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT, TimeUnit.SECONDS);
     try {
       backgroundOperationPool.awaitTermination(timeout, TimeUnit.SECONDS);
     } catch (InterruptedException e) {
       LOG.warn(
           "HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT = "
               + timeout
               + " seconds has been exceeded. RUNNING background operations will be shut down",
           e);
     }
     backgroundOperationPool = null;
   }
   cleanupLoggingRootDir();
 }
Ejemplo n.º 19
0
  public void processMTRoot(String path) {
    long t0 = System.currentTimeMillis();
    DateFormat dateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss");
    Date date = new Date();
    try {
      ts.addIndexPath(new File(path).getCanonicalPath());
      System.out.println("MediaIndexer.processMTRoot()" + path);
      System.out.println("MediaIndexer.processMTRoot() started at time " + dateFormat.format(date));
      System.out.println("MediaIndexer.processMTRoot() computing number of files...");
      totalNumberOfFiles = this.countFiles(path);
      lastProgressTime = System.currentTimeMillis();
      System.out.println("Number of files to explore " + totalNumberOfFiles);
      if (executorService.isShutdown()) {
        executorService =
            new ThreadPoolExecutor(
                maxThreads, maxThreads, 0L, TimeUnit.MILLISECONDS, new LimitedQueue<Runnable>(50));
      }

      this.processedFiles = 0;
      // this.processMT(new File(path));
      TreeWalker t = new TreeWalker(this);
      t.walk(path);
    } catch (IOException e) {
      e.printStackTrace();
    }
    executorService.shutdown();
    try {
      executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException e) {
      e.printStackTrace();
    }
    long t1 = System.currentTimeMillis();
    date = new Date();
    System.out.println("MediaIndexer.processMTRoot() finished at time " + dateFormat.format(date));
    System.out.println("MediaIndexer.processMTRoot() found " + newFiles + " new files");
    System.out.println("MediaIndexer.processMTRoot() updated " + updatedFiles + " files");
    System.out.println("MediaIndexer.processMTRoot() total " + ts.size() + " files");
    System.out.println("MediaIndexer.processMTRoot took " + (t1 - t0) / 1000 + " s");
  }
Ejemplo n.º 20
0
  public void testRehash() throws IOException, InterruptedException {
    EmbeddedCacheManager cacheManager = buildCacheManager();
    cacheManagers.addLast(cacheManager);
    cacheManager.getCache("serviceGroup");

    new AddNodeTask().run();

    new AddNodeTask().run();

    new AddNodeTask().run();

    Thread.sleep(3000);
    log.info("Start testing");

    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(TEST_THREADS);
    executor.prestartAllCoreThreads();
    for (int i = 0; i < TEST_LOOPS; i++) {
      executor.submit(new SimulateTask());
    }

    for (int i = 0; i < 10; i++) {
      try {
        Thread.sleep(3000);
        if (i != 1) {
          new AddNodeTask().run(); // 2
        } else {
          new RemoveNodeTask().run();
        }
      } catch (RuntimeException e) {
        log.warn("Error during add/remove node", e);
      }
    }

    log.info("Rehash phase is completed...");
    executor.shutdown();
    executor.awaitTermination(1, TimeUnit.DAYS);
  }
Ejemplo n.º 21
0
 public void loadSavedMetrics() {
   List<RetentionLevel> accessLevels = environment.retentions().getAllAccessLevels();
   int numberOfAccessLevels = accessLevels.size();
   log.info("trying to load Metrics for " + numberOfAccessLevels + " accessLevels");
   List<Future<?>> futures = new ArrayList<>();
   try {
     for (RetentionLevel rlevel : accessLevels) {
       futures.addAll(loadSavedMetricsToCaches(rlevel.name()));
     }
   } catch (Exception e) {
     loadMetricsThreadPool.shutdownNow();
     try {
       loadMetricsThreadPool.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
     } catch (InterruptedException e1) {
       log.error("Waiting for loadMetrics thread pool shutdown was interrupted", e1);
     }
     for (LevelCache cache : caches.values()) {
       cache.shutdown();
     }
     throw e;
   }
   log.info("Submitted a total of {} futures to pre-load metrics", futures.size());
   int logEvery = Math.max(1, futures.size() / 10);
   int futuresDone = 0;
   for (Future<?> f : futures) {
     try {
       f.get();
     } catch (InterruptedException | ExecutionException e) {
       log.warn("Exception while waiting for load metrics threads", e);
     }
     futuresDone++;
     if (futuresDone % logEvery == 0) {
       log.info("Preloaded {} of {} metrics", futuresDone, futures.size());
     }
   }
   log.info("all metrics preloaded!");
 }
Ejemplo n.º 22
0
  @Override
  public synchronized void close() throws SecurityException {
    if (pool.isShutdown()) {
      return;
    }

    try {
      // first, anything in the retry queue should be tried one last time and then we give up on it
      allowRetry = false;
      for (LogglySample sample : retryQueue) {
        pool.submit(sample);
      }
      retryQueue.clear();

      System.out.println(
          "Shutting down Loggly handler - waiting 90 seconds for "
              + pool.getQueue().size()
              + " logs to finish");
      pool.shutdown();
      try {
        boolean result = pool.awaitTermination(90, TimeUnit.SECONDS);
        if (!result) {
          System.out.println(
              "Not all Loggly messages sent out - still had "
                  + pool.getQueue().size()
                  + " left :(");
          pool.shutdownNow();
        }
      } catch (InterruptedException e) {
        // ignore
      }
    } finally {
      httpClient.getConnectionManager().shutdown();
      System.out.println("Loggly handler shut down");
    }
  }
Ejemplo n.º 23
0
  /**
   * if no surveyId is passed in, this will check for new surveys and, if there are some new ones,
   * downloads them to the DATA_DIR. If a surveyId is passed in, then that specific survey will be
   * downloaded. If it's already on the device, the survey will be replaced with the new one.
   */
  private void checkAndDownload(String surveyId) {
    if (isAbleToRun()) {
      try {
        lock.acquire();
        databaseAdaptor = new SurveyDbAdapter(this);
        databaseAdaptor.open();
        int precacheOption =
            Integer.parseInt(databaseAdaptor.findPreference(ConstantUtil.PRECACHE_SETTING_KEY));
        String serverBase = databaseAdaptor.findPreference(ConstantUtil.SERVER_SETTING_KEY);
        if (serverBase != null && serverBase.trim().length() > 0) {
          serverBase = getResources().getStringArray(R.array.servers)[Integer.parseInt(serverBase)];
        } else {
          serverBase = props.getProperty(ConstantUtil.SERVER_BASE);
        }

        int surveyCheckOption =
            Integer.parseInt(databaseAdaptor.findPreference(ConstantUtil.CHECK_FOR_SURVEYS));
        String deviceId = databaseAdaptor.findPreference(ConstantUtil.DEVICE_IDENT_KEY);
        ArrayList<Survey> surveys = null;

        if (surveyId != null && surveyId.trim().length() > 0) {
          surveys = getSurveyHeader(serverBase, surveyId, deviceId);
          if (surveys != null && surveys.size() > 0) {
            // if we already have the survey, delete it first
            databaseAdaptor.deleteSurvey(surveyId.trim(), true);
          }
        } else {
          if (canDownload(surveyCheckOption)) {
            surveys = checkForSurveys(serverBase, deviceId);
          }
        }
        if (surveys != null && surveys.size() > 0) {
          // if there are surveys for this device, see if we need
          // them
          surveys = databaseAdaptor.checkSurveyVersions(surveys);
          int updateCount = 0;
          if (surveys != null && surveys.size() > 0) {
            for (int i = 0; i < surveys.size(); i++) {
              Survey survey = surveys.get(i);
              try {
                if (downloadSurvey(serverBase, survey)) {
                  databaseAdaptor.saveSurvey(survey);
                  downloadHelp(survey, precacheOption);
                  updateCount++;
                }
              } catch (Exception e) {
                Log.e(TAG, "Could not download survey", e);
                PersistentUncaughtExceptionHandler.recordException(e);
              }
            }
            if (updateCount > 0) {
              fireNotification(updateCount);
            }
          }
        }

        // now check if any previously downloaded surveys still need
        // don't have their help media pre-cached
        if (canDownload(precacheOption)) {
          surveys = databaseAdaptor.listSurveys(null);
          if (surveys != null) {
            for (int i = 0; i < surveys.size(); i++) {
              if (!surveys.get(i).isHelpDownloaded()) {
                downloadHelp(surveys.get(i), precacheOption);
              }
            }
          }
        }

      } catch (Exception e) {
        Log.e(TAG, "Could not update surveys", e);
        PersistentUncaughtExceptionHandler.recordException(e);
      } finally {
        databaseAdaptor.close();
        lock.release();
      }
    }
    try {
      downloadExecutor.shutdown();
      // wait up to 30 minutes to download the media
      downloadExecutor.awaitTermination(1800, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
      Log.e(TAG, "Error while waiting for download executor to terminate", e);
    }
    stopSelf();
  }
  private void start() {
    BimServerConfig config = new BimServerConfig();
    Path homeDir = Paths.get("home");
    try {
      if (Files.isDirectory(homeDir)) {
        PathUtils.removeDirectoryWithContent(homeDir);
      }
    } catch (IOException e) {
      e.printStackTrace();
    }
    config.setClassPath(System.getProperty("java.class.path"));
    config.setHomeDir(homeDir);
    config.setPort(8080);
    config.setStartEmbeddedWebServer(true);
    config.setResourceFetcher(new LocalDevelopmentResourceFetcher(Paths.get("../")));
    final BimServer bimServer = new BimServer(config);
    try {
      LocalDevPluginLoader.loadPlugins(bimServer.getPluginManager(), null);
      bimServer.start();
      if (bimServer.getServerInfo().getServerState() == ServerState.NOT_SETUP) {
        bimServer
            .getService(AdminInterface.class)
            .setup(
                "http://localhost",
                "localhost",
                "*****@*****.**",
                "Administrator",
                "*****@*****.**",
                "admin");
      }
    } catch (PluginException e2) {
      e2.printStackTrace();
    } catch (ServerException e) {
      e.printStackTrace();
    } catch (DatabaseInitException e) {
      e.printStackTrace();
    } catch (BimserverDatabaseException e) {
      e.printStackTrace();
    } catch (DatabaseRestartRequiredException e) {
      e.printStackTrace();
    } catch (UserException e) {
      e.printStackTrace();
    }

    try {
      final ServiceMap serviceMap = bimServer.getServiceFactory().get(AccessMethod.INTERNAL);
      ServiceInterface serviceInterface = serviceMap.get(ServiceInterface.class);
      SettingsInterface settingsInterface = serviceMap.get(SettingsInterface.class);
      final Bimsie1AuthInterface authInterface = serviceMap.get(Bimsie1AuthInterface.class);
      serviceInterface =
          bimServer
              .getServiceFactory()
              .get(authInterface.login("*****@*****.**", "admin"), AccessMethod.INTERNAL)
              .get(ServiceInterface.class);
      settingsInterface.setCacheOutputFiles(true);
      settingsInterface.setGenerateGeometryOnCheckin(false);
      final SProject project =
          serviceMap.getBimsie1ServiceInterface().addProject("test", "ifc2x3tc1");
      SDeserializerPluginConfiguration deserializerByName =
          serviceMap.getBimsie1ServiceInterface().getDeserializerByName("IfcStepDeserializer");
      Path file = Paths.get("../TestData/data/AC11-Institute-Var-2-IFC.ifc");
      serviceInterface.checkin(
          project.getOid(),
          "test",
          deserializerByName.getOid(),
          file.toFile().length(),
          file.getFileName().toString(),
          new DataHandler(new FileDataSource(file.toFile())),
          false,
          true);
      final SProject projectUpdate =
          serviceMap.getBimsie1ServiceInterface().getProjectByPoid(project.getOid());
      ThreadPoolExecutor executor =
          new ThreadPoolExecutor(20, 20, 1, TimeUnit.HOURS, new ArrayBlockingQueue<Runnable>(1000));
      for (int i = 0; i < 20; i++) {
        executor.execute(
            new Runnable() {
              @Override
              public void run() {
                try {
                  ServiceMap serviceMap2 =
                      bimServer
                          .getServiceFactory()
                          .get(
                              authInterface.login("*****@*****.**", "admin"),
                              AccessMethod.INTERNAL);
                  SSerializerPluginConfiguration serializerPluginConfiguration =
                      serviceMap.getBimsie1ServiceInterface().getSerializerByName("Ifc2x3");
                  Long download =
                      serviceMap2
                          .getBimsie1ServiceInterface()
                          .download(
                              projectUpdate.getLastRevisionId(),
                              serializerPluginConfiguration.getOid(),
                              true,
                              true);
                  SDownloadResult downloadData =
                      serviceMap2.getBimsie1ServiceInterface().getDownloadData(download);
                  if (downloadData.getFile().getDataSource()
                      instanceof CacheStoringEmfSerializerDataSource) {
                    CacheStoringEmfSerializerDataSource c =
                        (CacheStoringEmfSerializerDataSource)
                            downloadData.getFile().getDataSource();
                    try {
                      ByteArrayOutputStream baos = new ByteArrayOutputStream();
                      c.writeToOutputStream(baos, null);
                      System.out.println(baos.size());
                    } catch (SerializerException e) {
                      e.printStackTrace();
                    }
                  } else {
                    ByteArrayOutputStream baos = new ByteArrayOutputStream();
                    IOUtils.copy(downloadData.getFile().getInputStream(), baos);
                    System.out.println(baos.size());
                  }
                  serviceMap2.getServiceInterface().cleanupLongAction(download);
                } catch (ServerException e) {
                  e.printStackTrace();
                } catch (UserException e) {
                  e.printStackTrace();
                } catch (FileNotFoundException e) {
                  e.printStackTrace();
                } catch (IOException e) {
                  e.printStackTrace();
                } catch (PublicInterfaceNotFoundException e1) {
                  e1.printStackTrace();
                }
              }
            });
      }
      executor.shutdown();
      executor.awaitTermination(1, TimeUnit.HOURS);
      bimServer.stop();
    } catch (ServerException e1) {
      e1.printStackTrace();
    } catch (UserException e1) {
      e1.printStackTrace();
    } catch (InterruptedException e) {
      e.printStackTrace();
    }
  }
Ejemplo n.º 25
0
 /** Start scheduling jobs for processing. */
 public void terminate() throws Exception {
   if (controller.getException() != null) {
     throw controller.getException();
   }
   service.awaitTermination(0, TimeUnit.MILLISECONDS);
 }
Ejemplo n.º 26
0
  @Override
  public void stop() {

    if (sharedHttpServers != null) {
      for (HttpServer server : sharedHttpServers.values()) {
        server.close();
      }
      sharedHttpServers = null;
    }

    if (sharedNetServers != null) {
      for (NetServer server : sharedNetServers.values()) {
        server.close();
      }
      sharedNetServers = null;
    }

    if (timer != null) {
      timer.stop();
      timer = null;
    }

    if (eventBus != null) {
      eventBus.close(null);
    }

    if (backgroundPool != null) {
      backgroundPool.shutdown();
    }

    if (acceptorPool != null) {
      acceptorPool.shutdown();
    }

    try {
      if (backgroundPool != null) {
        backgroundPool.awaitTermination(20, TimeUnit.SECONDS);
        backgroundPool = null;
      }
    } catch (InterruptedException ex) {
      // ignore
    }

    try {
      if (acceptorPool != null) {
        acceptorPool.awaitTermination(20, TimeUnit.SECONDS);
        acceptorPool = null;
      }
    } catch (InterruptedException ex) {
      // ignore
    }

    // log.info("Release external resources from worker pool");
    if (corePool != null) {
      corePool.releaseExternalResources();
      corePool = null;
    }
    // log.info("Release external resources: done");

    setContext(null);
  }
  @Override
  public synchronized long claimRecords(SDFSEvent evt, LargeBloomFilter bf) throws IOException {
    if (this.isClosed()) throw new IOException("Hashtable " + this.fileName + " is close");
    executor =
        new ThreadPoolExecutor(
            Main.writeThreads + 1,
            Main.writeThreads + 1,
            10,
            TimeUnit.SECONDS,
            worksQueue,
            new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY),
            executionHandler);
    csz = new AtomicLong(0);
    Lock l = this.gcLock.writeLock();
    l.lock();
    this.runningGC = true;
    lbf = null;
    lbf = new LargeBloomFilter(maxSz, .01);
    l.unlock();
    try {
      SDFSLogger.getLog()
          .info("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]");
      SDFSEvent tEvt =
          SDFSEvent.claimInfoEvent(
              "Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]", evt);
      tEvt.maxCt = this.maps.size();
      Iterator<ProgressiveFileByteArrayLongMap> iter = maps.iterator();
      while (iter.hasNext()) {
        tEvt.curCt++;
        ProgressiveFileByteArrayLongMap m = null;
        try {
          m = iter.next();
          executor.execute(new ClaimShard(m, bf, lbf, csz));
        } catch (Exception e) {
          tEvt.endEvent(
              "Unable to claim records for " + m + " because : [" + e.toString() + "]",
              SDFSEvent.ERROR);
          SDFSLogger.getLog().error("Unable to claim records for " + m, e);
          throw new IOException(e);
        }
      }
      executor.shutdown();
      try {
        while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
          SDFSLogger.getLog().debug("Awaiting fdisk completion of threads.");
        }
      } catch (InterruptedException e) {
        throw new IOException(e);
      }
      this.kSz.getAndAdd(-1 * csz.get());
      tEvt.endEvent("removed [" + csz.get() + "] records");
      SDFSLogger.getLog().info("removed [" + csz.get() + "] records");
      iter = maps.iterator();
      while (iter.hasNext()) {
        ProgressiveFileByteArrayLongMap m = null;
        try {
          m = iter.next();
          if (m.isFull() && !m.isActive()) {
            double pf = (double) m.size() / (double) m.maxSize();
            // SDFSLogger.getLog().info("pfull=" + pf);
            if (pf < .4 || pf == Double.NaN) {
              // SDFSLogger.getLog().info("deleting " +
              // m.toString());
              m.iterInit();
              KVPair p = m.nextKeyValue();
              while (p != null) {
                ProgressiveFileByteArrayLongMap _m = this.getWriteMap();
                try {
                  _m.put(p.key, p.value);
                } catch (HashtableFullException e) {
                  _m.setActive(false);
                  _m = this.createWriteMap();
                  _m.put(p.key, p.value);
                } finally {
                  this.activeWriteMaps.offer(_m);
                }
                p = m.nextKeyValue();
              }
              int mapsz = maps.size();
              maps.remove(m);
              mapsz = mapsz - maps.size();
              // SDFSLogger.getLog().info(
              // "removing map " + m.toString() + " sz="
              // + maps.size() + " rm=" + mapsz);
              m.vanish();

              m = null;
            }
          }
        } catch (Exception e) {
          tEvt.endEvent(
              "Unable to compact " + m + " because : [" + e.toString() + "]", SDFSEvent.ERROR);
          SDFSLogger.getLog().error("to compact " + m, e);
          throw new IOException(e);
        }
      }
      return csz.get();
    } finally {
      l.lock();
      this.runningGC = false;
      l.unlock();
      executor = null;
    }
  }
 protected void doStop() throws Exception {
   executor.shutdown();
   if (!executor.awaitTermination(60, TimeUnit.SECONDS)) executor.shutdownNow();
 }
 public void join() throws InterruptedException {
   executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
 }
  /**
   * initializes the Object set of this hash table.
   *
   * @param initialCapacity an <code>int</code> value
   * @return an <code>int</code> value
   * @throws HashtableFullException
   * @throws FileNotFoundException
   */
  public long setUp() throws Exception {
    File _fs = new File(fileName);
    if (!_fs.getParentFile().exists()) {
      _fs.getParentFile().mkdirs();
    }
    SDFSLogger.getLog().info("Folder = " + _fs.getPath());
    SDFSLogger.getLog().info("Loading freebits bitset");
    long rsz = 0;
    long _tbs = maxSz / (256);
    int max = Integer.MAX_VALUE / ProgressiveFileByteArrayLongMap.EL;
    if (_tbs > max) {
      this.hashTblSz = max;
    } else if (_tbs > this.hashTblSz) {
      this.hashTblSz = (int) _tbs;
    }
    SDFSLogger.getLog()
        .info(
            "table setup max="
                + max
                + " maxsz="
                + this.maxSz
                + " _tbs="
                + _tbs
                + " hashTblSz="
                + this.hashTblSz);
    this.hashTblSz = NextPrime.getNextPrimeI((int) (this.hashTblSz));
    File[] files = _fs.getParentFile().listFiles(new DBFileFilter());
    if (files.length > 0) {
      CommandLineProgressBar bar =
          new CommandLineProgressBar("Loading Existing Hash Tables", files.length, System.out);
      this.loadEvent.maxCt = files.length + 128;

      for (int i = 0; i < files.length; i++) {
        this.loadEvent.curCt = this.loadEvent.curCt + 1;
        int sz = NextPrime.getNextPrimeI((int) (this.hashTblSz));
        // SDFSLogger.getLog().debug("will create byte array of size "
        // + sz + " propsize was " + propsize);
        ProgressiveFileByteArrayLongMap m = null;
        String pth = files[i].getPath();
        String pfx = pth.substring(0, pth.length() - 5);
        m = new ProgressiveFileByteArrayLongMap(pfx, sz);
        long mep = m.setUp();
        if (mep > endPos) endPos = mep;
        maps.add(m);
        rsz = rsz + m.size();
        bar.update(i);
        if (!m.isFull() && this.activeWriteMaps.remainingCapacity() > 0) {
          m.setActive(true);
          this.activeWriteMaps.add(m);
          this.loadCacheExecutor.execute(m);
        } else {
          m.setActive(false);
          m.full = true;
        }
      }
      bar.finish();
    }

    this.loadEvent.shortMsg = "Loading BloomFilters";
    if (maps.size() == 0) lbf = new LargeBloomFilter(maxSz, .01);
    else {
      try {
        lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, true);
      } catch (Exception e) {
        SDFSLogger.getLog().warn("Recreating BloomFilters...");
        this.loadEvent.shortMsg = "Recreating BloomFilters";
        lbf = new LargeBloomFilter(maxSz, .01);
        executor =
            new ThreadPoolExecutor(
                Main.writeThreads,
                Main.writeThreads,
                10,
                TimeUnit.SECONDS,
                worksQueue,
                new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY),
                executionHandler);
        CommandLineProgressBar bar =
            new CommandLineProgressBar("ReCreating BloomFilters", maps.size(), System.out);
        Iterator<ProgressiveFileByteArrayLongMap> iter = maps.iterator();
        int i = 0;
        ArrayList<LBFReconstructThread> al = new ArrayList<LBFReconstructThread>();
        while (iter.hasNext()) {
          ProgressiveFileByteArrayLongMap m = iter.next();
          LBFReconstructThread th = new LBFReconstructThread(lbf, m);
          executor.execute(th);
          al.add(th);
          i++;
          bar.update(i);
        }
        executor.shutdown();
        bar.finish();
        try {
          System.out.print("Waiting for all BloomFilters creation threads to finish");
          while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
            SDFSLogger.getLog().debug("Awaiting fdisk completion of threads.");
            System.out.print(".");
          }
          for (LBFReconstructThread th : al) {
            if (th.ex != null) throw th.ex;
          }
          System.out.println(" done");
        } catch (Exception e1) {
          throw new IOException(e1);
        }
      }
    }
    while (this.activeWriteMaps.remainingCapacity() > 0) {
      String guid = null;
      boolean written = false;
      while (!written) {
        guid = RandomGUID.getGuid();

        File f = new File(fileName + "-" + guid + ".keys");
        if (!f.exists()) {
          ProgressiveFileByteArrayLongMap activeWMap =
              new ProgressiveFileByteArrayLongMap(fileName + "-" + guid, this.hashTblSz);
          activeWMap.setUp();
          this.maps.add(activeWMap);
          written = true;
          activeWMap.setActive(true);
          this.activeWriteMaps.offer(activeWMap);
        }
      }
    }
    if (SDFSLogger.isDebug()) {
      long mem = MemoryMeasurer.measureBytes(lbf);
      long mmem = MemoryMeasurer.measureBytes(maps);
      SDFSLogger.getLog().debug("Large BloomFilter Size=" + StorageUnit.of(mem).format(mem));
      SDFSLogger.getLog().debug("Maps Size=" + StorageUnit.of(mmem).format(mmem));
    }
    this.loadEvent.endEvent("Loaded entries " + rsz);
    System.out.println("Loaded entries " + rsz);
    SDFSLogger.getLog().info("Active Maps " + this.activeWriteMaps.size());
    SDFSLogger.getLog().info("Loaded entries " + rsz);
    SDFSLogger.getLog().info("Loading BloomFilters " + rsz);
    this.kSz.set(rsz);
    this.closed = false;
    return size;
  }