/** 現在のメイン画面を更新します */
  @Override
  public void run() {

    try {
      long nextUpdateTime = 0;
      long counter = 1;
      final ApplicationMain main = this.main;

      while (true) {
        final long current = counter++;

        Display.getDefault()
            .syncExec(
                new Runnable() {
                  @Override
                  public void run() {
                    // タイマー更新
                    TimerContext.get().update();

                    // 保有アイテム数を更新する
                    new UpdateItemCountTask(main).run();
                    // 保有艦娘数を更新する
                    new UpdateShipCountTask(main).run();
                    // 艦隊タブを更新する
                    new UpdateFleetTabTask(main).run();
                    // 遠征と入渠を更新する
                    new UpdateDeckNdockTask(main).run();

                    try {
                      // 更新日時が実装されているファイルたちはすぐに保存
                      ShipGroupConfig.store();
                      MasterData.store();
                      EnemyData.store();
                      ShipParameterRecord.store();
                      ScriptData.store();

                      if ((current % 10) == 0) {
                        // メニューから終了しなかった場合を考慮して定期的にウィンドウ位置を記憶
                        main.saveWindows();
                      }

                    } catch (IOException e) {
                      LOG.get().fatal("ファイル更新に失敗しました", e);
                    }
                  }
                });

        long currentTime = Calendar.getInstance().getTimeInMillis();
        // 次のアップデートは1秒後
        nextUpdateTime += TimeUnit.SECONDS.toMillis(1);
        if (nextUpdateTime < currentTime)
          nextUpdateTime = currentTime + TimeUnit.SECONDS.toMillis(1);

        Thread.sleep(nextUpdateTime - currentTime);
      }
    } catch (Exception e) {
      LOG.get().fatal("スレッドが異常終了しました", e);
      throw new RuntimeException(e);
    }
  }
  @Test(timeout = 10000)
  public void testTimeoutsSetFromConnectionInfo() throws IOException, JMSException {
    final long CONNECT_TIMEOUT = TimeUnit.SECONDS.toMillis(4);
    final long CLOSE_TIMEOUT = TimeUnit.SECONDS.toMillis(5);
    final long SEND_TIMEOUT = TimeUnit.SECONDS.toMillis(6);
    final long REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(7);

    provider = new FailoverProvider(uris, Collections.<String, String>emptyMap());
    provider.setProviderListener(
        new DefaultProviderListener() {

          @Override
          public void onConnectionEstablished(URI remoteURI) {}
        });

    provider.connect();
    provider.start();

    JmsConnectionInfo connectionInfo = createConnectionInfo();

    connectionInfo.setConnectTimeout(CONNECT_TIMEOUT);
    connectionInfo.setCloseTimeout(CLOSE_TIMEOUT);
    connectionInfo.setSendTimeout(SEND_TIMEOUT);
    connectionInfo.setRequestTimeout(REQUEST_TIMEOUT);

    ProviderFuture request = new ProviderFuture();
    provider.create(connectionInfo, request);
    request.sync();

    assertEquals(CONNECT_TIMEOUT, provider.getConnectTimeout());
    assertEquals(CLOSE_TIMEOUT, provider.getCloseTimeout());
    assertEquals(SEND_TIMEOUT, provider.getSendTimeout());
    assertEquals(REQUEST_TIMEOUT, provider.getRequestTimeout());
  }
  private Properties getDefaultProperties() {
    Properties defaultProps = new Properties();

    // testing params, DONT TOUCH !!!
    defaultProps.setProperty("openkad.keyfactory.keysize", "50");
    defaultProps.setProperty("openkad.keyfactory.hashalgo", "SHA-256");
    defaultProps.setProperty("openkad.bucket.kbuckets.maxsize", "16");
    defaultProps.setProperty("openkad.color.nrcolors", "19");
    defaultProps.setProperty("openkad.scheme.name", "openkad.udp");

    // performance params

    // handling incoming messages
    defaultProps.setProperty("openkad.executors.server.nrthreads", "8");
    defaultProps.setProperty("openkad.executors.server.max_pending", "512");
    // handling registered callback
    defaultProps.setProperty("openkad.executors.client.nrthreads", "1");
    defaultProps.setProperty("openkad.executors.client.max_pending", "20");
    // forwarding find node requests
    defaultProps.setProperty("openkad.executors.forward.nrthreads", "2");
    defaultProps.setProperty("openkad.executors.forward.max_pending", "2");
    // executing the long find node operations
    defaultProps.setProperty("openkad.executors.op.nrthreads", "1");
    defaultProps.setProperty("openkad.executors.op.max_pending", "2");
    // sending back pings
    defaultProps.setProperty("openkad.executors.ping.nrthreads", "1");
    defaultProps.setProperty("openkad.executors.ping.max_pending", "16");
    // cache settings
    defaultProps.setProperty("openkad.cache.validtime", TimeUnit.HOURS.toMillis(10) + "");
    defaultProps.setProperty("openkad.cache.size", "100");
    defaultProps.setProperty("openkad.cache.share", "1");
    // minimum time between successive pings
    defaultProps.setProperty("openkad.bucket.valid_timespan", TimeUnit.MINUTES.toMillis(1) + "");
    // network timeouts and concurrency level
    defaultProps.setProperty("openkad.net.concurrency", "1");
    defaultProps.setProperty("openkad.net.timeout", TimeUnit.SECONDS.toMillis(1000) + "");
    defaultProps.setProperty("openkad.net.forwarded.timeout", TimeUnit.SECONDS.toMillis(1200) + "");

    defaultProps.setProperty("openkad.color.candidates", "1");
    defaultProps.setProperty("openkad.color.slack.size", "1");
    defaultProps.setProperty("openkad.color.allcolors", "95");
    // interval between successive find node operations for refresh buckets
    defaultProps.setProperty("openkad.refresh.interval", TimeUnit.SECONDS.toMillis(30000) + "");

    // local configuration, please touch
    defaultProps.setProperty("openkad.net.udp.port", "-1");
    defaultProps.setProperty("openkad.local.key", "");
    defaultProps.setProperty("openkad.file.nodes.path", "nodes");

    // misc
    defaultProps.setProperty("openkad.seed", "0");

    // shani Skademia

    defaultProps.setProperty("skademlia.siblinglist.size", "8");
    defaultProps.setProperty("dht.storage.checkInterval", "" + TimeUnit.SECONDS.toMillis(30000));

    return defaultProps;
  }
Exemple #4
0
  /** Returns the source to satisfy {@code request} given this cached response. */
  public ResponseSource chooseResponseSource(long nowMillis, RequestHeaders request) {
    /*
     * If this response shouldn't have been stored, it should never be used
     * as a response source. This check should be redundant as long as the
     * persistence store is well-behaved and the rules are constant.
     */
    if (!isCacheable(request)) {
      return ResponseSource.NETWORK;
    }

    if (request.isNoCache() || request.hasConditions()) {
      return ResponseSource.NETWORK;
    }

    long ageMillis = computeAge(nowMillis);
    long freshMillis = computeFreshnessLifetime();

    if (request.getMaxAgeSeconds() != -1) {
      freshMillis = Math.min(freshMillis, TimeUnit.SECONDS.toMillis(request.getMaxAgeSeconds()));
    }

    long minFreshMillis = 0;
    if (request.getMinFreshSeconds() != -1) {
      minFreshMillis = TimeUnit.SECONDS.toMillis(request.getMinFreshSeconds());
    }

    long maxStaleMillis = 0;
    if (!mustRevalidate && request.getMaxStaleSeconds() != -1) {
      maxStaleMillis = TimeUnit.SECONDS.toMillis(request.getMaxStaleSeconds());
    }

    if (!noCache && ageMillis + minFreshMillis < freshMillis + maxStaleMillis) {
      if (ageMillis + minFreshMillis >= freshMillis) {
        headers.add("Warning", "110 HttpURLConnection \"Response is stale\"");
      }
      /*
       * not available in API 8
      if (ageMillis > TimeUnit.HOURS.toMillis(24) && isFreshnessLifetimeHeuristic()) {
      */
      if (ageMillis > 24L * 60L * 60L * 1000L && isFreshnessLifetimeHeuristic()) {
        headers.add("Warning", "113 HttpURLConnection \"Heuristic expiration\"");
      }
      return ResponseSource.CACHE;
    }

    if (etag != null) {
      request.setIfNoneMatch(etag);
    } else if (lastModified != null) {
      request.setIfModifiedSince(lastModified);
    } else if (servedDate != null) {
      request.setIfModifiedSince(servedDate);
    }

    return request.hasConditions() ? ResponseSource.CONDITIONAL_CACHE : ResponseSource.NETWORK;
  }
  /**
   * Tests update of session expiration in memcached (like {@link
   * #testExpirationOfSessionsInMemcachedIfBackupWasSkippedSimple()}) but for the scenario where
   * many readonly requests occur: in this case, we cannot just use <em>maxInactiveInterval -
   * secondsSinceLastBackup</em> (in {@link MemcachedSessionService#updateExpirationInMemcached}) to
   * determine if an expiration update is required, but we must use the last expiration time sent to
   * memcached.
   *
   * @throws Exception if something goes wrong with the http communication with tomcat
   */
  @Test(enabled = true, dataProviderClass = TestUtils.class, dataProvider = STICKYNESS_PROVIDER)
  public void testExpirationOfSessionsInMemcachedIfBackupWasSkippedManyReadonlyRequests(
      final SessionAffinityMode stickyness) throws Exception {

    final SessionManager manager = _tomcat1.getManager();
    setStickyness(stickyness);

    // set to 1 sec above (in setup), default is 10 seconds
    final int delay = manager.getContainer().getBackgroundProcessorDelay();
    manager.setMaxInactiveInterval(delay * 4);

    final String sessionId1 = makeRequest(_httpClient, _portTomcat1, null);
    assertNotNull(sessionId1, "No session created.");
    assertWaitingWithProxy(Predicates.<MemcachedClientIF>notNull(), 200l, _memcached)
        .get(sessionId1);

    /* after 3 seconds make another request without changing the session, so that
     * it's not sent to memcached
     */
    Thread.sleep(TimeUnit.SECONDS.toMillis(delay * 3));
    assertEquals(
        makeRequest(_httpClient, _portTomcat1, sessionId1),
        sessionId1,
        "SessionId should be the same");
    assertNotNull(_memcached.get(sessionId1), "Session should still exist in memcached.");

    /* after another 3 seconds make another request without changing the session
     */
    Thread.sleep(TimeUnit.SECONDS.toMillis(delay * 3));
    assertEquals(
        makeRequest(_httpClient, _portTomcat1, sessionId1),
        sessionId1,
        "SessionId should be the same");
    assertNotNull(_memcached.get(sessionId1), "Session should still exist in memcached.");

    /* after another nearly 4 seconds (maxInactiveInterval) check that the session is still alive in memcached,
     * this would have been expired without an updated expiration
     */
    Thread.sleep(TimeUnit.SECONDS.toMillis(manager.getMaxInactiveInterval()) - 500);
    assertNotNull(_memcached.get(sessionId1), "Session should still exist in memcached.");

    /* after another second in sticky mode (more than 4 seconds since the last request), or an two times the
     * maxInactiveInterval in non-sticky mode (we must keep sessions in memcached with double expirationtime)
     * the session must be expired in memcached
     */
    Thread.sleep(TimeUnit.SECONDS.toMillis(delay) + 500);
    assertNotSame(
        makeRequest(_httpClient, _portTomcat1, sessionId1),
        sessionId1,
        "The sessionId should have changed due to expired sessin");
  }
  /**
   * Used to do scan of test files.
   *
   * @param files
   */
  public void simpleScan(Iterable<File> files) {
    SourceProject project = (SourceProject) index.search("Java Project");
    VisitorContext context = new VisitorContext(project);
    visitor.setContext(context);

    ProgressReport progressReport =
        new ProgressReport(
            "Report about progress of Java AST analyzer", TimeUnit.SECONDS.toMillis(10));
    progressReport.start(Lists.newArrayList(files));

    boolean successfulyCompleted = false;
    try {
      for (File file : files) {
        simpleScan(file, context);
        progressReport.nextFile();
      }
      successfulyCompleted = true;
    } finally {
      if (successfulyCompleted) {
        progressReport.stop();
      } else {
        progressReport.cancel();
      }
    }
  }
Exemple #7
0
 private void sleepForProxyInitRetry() {
   try {
     Thread.sleep(TimeUnit.SECONDS.toMillis(ClientInvocation.RETRY_WAIT_TIME_IN_SECONDS));
   } catch (InterruptedException ignored) {
     EmptyStatement.ignore(ignored);
   }
 }
  protected void showInReplyToTweetDetails() {
    Tweet tweet = null;
    if (this.inReplyToSid != null && this.inReplyToUid > 0L) {
      final View view = findViewById(R.id.tweetReplyToDetails);
      view.setVisibility(View.VISIBLE);
      tweet = getDb().getTweetDetails(this.inReplyToUid);
      if (tweet != null) {
        LOG.i("inReplyTo:%s", tweet.toFullString());
        if (!this.enabledPostToAccounts.isServicesPreSpecified()) {
          final Meta serviceMeta = tweet.getFirstMetaOfType(MetaType.SERVICE);
          if (serviceMeta != null)
            setPostToAccountExclusive(ServiceRef.parseServiceMeta(serviceMeta));
        }

        ((TextView) view.findViewById(R.id.tweetDetailBody)).setText(tweet.getBody());
        if (tweet.getAvatarUrl() != null)
          loadImage(
              new ImageLoadRequest(
                  tweet.getAvatarUrl(), (ImageView) view.findViewById(R.id.tweetDetailAvatar)));
        ((TextView) view.findViewById(R.id.tweetDetailName)).setText(tweet.getFullname());
        ((TextView) view.findViewById(R.id.tweetDetailDate))
            .setText(
                DateFormat.getDateTimeInstance()
                    .format(new Date(TimeUnit.SECONDS.toMillis(tweet.getTime()))));
      }
    }
    initBody(tweet);
    this.txtBody.setSelection(this.txtBody.getText().length());
  }
 public static void setServiceUnavailable() {
   synchronized (serviceAvailableLock) {
     mServiceAvailable = false;
     timeWhenTheServiceWillBeAvailable =
         System.currentTimeMillis() + java.util.concurrent.TimeUnit.SECONDS.toMillis(15 * 60);
   }
 }
  @Override
  public void run() {
    try {
      m_channelManager = new ClientChannelManager();

      long expireTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(1);

      while (m_channelManager.getActiveChannel() == null
          && System.currentTimeMillis() < expireTime) {
        TimeUnit.MILLISECONDS.sleep(1);
      }

      m_warmup.countDown();
      run0();
    } catch (Throwable e) {
      m_logger.error(e.getMessage(), e);
      m_warmup.countDown();
    } finally {
      if (m_channelManager != null) {
        m_channelManager.close();
      }

      m_latch.countDown();
    }
  }
public class Defaults {

  public static final boolean DEBUG = false;

  public static final String HOST = "https://api.segment.io";

  public static final int FLUSH_AT = 20;
  public static final int FLUSH_AFTER = (int) TimeUnit.SECONDS.toMillis(10);

  @SuppressWarnings("serial")
  public static final Map<String, String> ENDPOINTS =
      new HashMap<String, String>() {
        {
          this.put("identify", "/v1/identify");
          this.put("alias", "/v1/alias");
          this.put("track", "/v1/track");
          this.put("import", "/v1/import");
        }
      };

  public static String getSettingsEndpoint(String secret) {
    return "/project/" + secret + "/settings";
  }

  public static final int MAX_QUEUE_SIZE = 10000;

  // cache the settings for 1 hour before reloading
  public static final int SETTINGS_CACHE_EXPIRY = 1000 * 60 * 60;
}
  @Override
  public void execute(SensorContext context) {
    List<TreeVisitor> treeVisitors = Lists.newArrayList();
    boolean isAtLeastSq62 = context.getSonarQubeVersion().isGreaterThanOrEqual(V6_2);

    MetricsVisitor metricsVisitor =
        new MetricsVisitor(
            context,
            noSonarFilter,
            context.settings().getBoolean(JavaScriptPlugin.IGNORE_HEADER_COMMENTS),
            fileLinesContextFactory,
            isAtLeastSq62);

    treeVisitors.add(metricsVisitor);
    treeVisitors.add(new HighlighterVisitor(context, fileSystem));
    treeVisitors.add(new SeChecksDispatcher(checks.seChecks()));
    treeVisitors.add(new CpdVisitor(fileSystem, context));
    treeVisitors.addAll(checks.visitorChecks());

    for (TreeVisitor check : treeVisitors) {
      if (check instanceof ParsingErrorCheck) {
        parsingErrorRuleKey = checks.ruleKeyFor((JavaScriptCheck) check);
        break;
      }
    }

    ProgressReport progressReport =
        new ProgressReport(
            "Report about progress of Javascript analyzer", TimeUnit.SECONDS.toMillis(10));
    progressReport.start(Lists.newArrayList(fileSystem.files(mainFilePredicate)));

    analyseFiles(context, treeVisitors, fileSystem.inputFiles(mainFilePredicate), progressReport);

    executeCoverageSensors(context, metricsVisitor.linesOfCode(), isAtLeastSq62);
  }
  private void waitForWorkersStartup(WorkerJvm worker, int workerTimeoutSec) {
    int loopCount =
        (int) TimeUnit.SECONDS.toMillis(workerTimeoutSec) / WAIT_FOR_WORKER_STARTUP_INTERVAL_MILLIS;
    for (int i = 0; i < loopCount; i++) {
      if (hasExited(worker)) {
        throw new SpawnWorkerFailedException(
            format(
                "Startup of Worker on host %s failed, check log files in %s for more information!",
                agent.getPublicAddress(), worker.getWorkerHome()));
      }

      String address = readAddress(worker);
      if (address != null) {
        worker.setHzAddress(address);
        LOGGER.info(format("Worker %s started", worker.getId()));
        return;
      }

      sleepMillis(WAIT_FOR_WORKER_STARTUP_INTERVAL_MILLIS);
    }

    throw new SpawnWorkerFailedException(
        format(
            "Worker %s of Testsuite %s on Agent %s didn't start within %s seconds",
            worker.getId(),
            agent.getTestSuite().getId(),
            agent.getPublicAddress(),
            workerTimeoutSec));
  }
  private void tryToJoinPossibleAddresses(Collection<Address> possibleAddresses)
      throws InterruptedException {
    long connectionTimeoutMillis = TimeUnit.SECONDS.toMillis(getConnTimeoutSeconds());
    long start = Clock.currentTimeMillis();

    while (!node.joined() && Clock.currentTimeMillis() - start < connectionTimeoutMillis) {
      Address masterAddress = node.getMasterAddress();
      if (isAllBlacklisted(possibleAddresses) && masterAddress == null) {
        return;
      }

      if (masterAddress != null) {
        if (logger.isFinestEnabled()) {
          logger.finest("Sending join request to " + masterAddress);
        }
        clusterJoinManager.sendJoinRequest(masterAddress, true);
      } else {
        sendMasterQuestion(possibleAddresses);
      }

      if (!node.joined()) {
        Thread.sleep(JOIN_RETRY_WAIT_TIME);
      }
    }
  }
 @Override
 protected void lockEntry(
     PersistentEntity persistentEntity, String entityFamily, Serializable id, int timeout) {
   String redisKey = getRedisKey(entityFamily, id);
   final TimeUnit milliUnit = TimeUnit.MILLISECONDS;
   final long waitTime = TimeUnit.SECONDS.toMillis(timeout);
   final String lockName = lockName(redisKey);
   int sleepTime = 0;
   while (true) {
     if (redisTemplate.setnx(lockName, System.currentTimeMillis())
         && redisTemplate.expire(lockName, timeout)) {
       break;
     } else {
       if (redisTemplate.ttl(lockName) > 0) {
         try {
           if (sleepTime > waitTime) {
             throw new CannotAcquireLockException(
                 "Failed to acquire lock on key [" + redisKey + "]. Wait time exceeded timeout.");
           } else {
             // wait for previous lock to expire
             sleepTime += 500;
             milliUnit.sleep(500);
           }
         } catch (InterruptedException e) {
           throw new CannotAcquireLockException(
               "Failed to acquire lock on key [" + redisKey + "]: " + e.getMessage(), e);
         }
       } else {
         if (redisTemplate.getset(lockName, System.currentTimeMillis()) != null
             && redisTemplate.expire(lockName, timeout)) break;
       }
     }
   }
 }
Exemple #16
0
 protected final long getMaxJoinTimeToMasterNode() {
   // max join time to found master node,
   // this should be significantly greater than MAX_WAIT_SECONDS_BEFORE_JOIN property
   // hence we add 10 seconds more
   return TimeUnit.SECONDS.toMillis(MIN_WAIT_SECONDS_BEFORE_JOIN)
       + node.getGroupProperties().getMillis(GroupProperty.MAX_WAIT_SECONDS_BEFORE_JOIN);
 }
 /**
  * Indicates if a peer needs a maintenance check.
  *
  * @param peerStatatistic The peer with its statistics
  * @return True if the peer needs a maintenance check
  */
 protected boolean needMaintenance(final PeerStatatistic peerStatatistic) {
   final int onlineSec = peerStatatistic.onlineTime() / 1000;
   int index;
   if (onlineSec <= 0) {
     index = 0;
   } else {
     index = intervalSeconds.length - 1;
     for (int i = 0; i < intervalSeconds.length; i++) {
       // interval is 2,4,8,16,32,64
       // examples
       // I have seen a peer online for 5 sec -> next interval to check is 8
       // I have seen a peer online for 4 sec -> next interval to check is 4
       // I have seen a peer online for 17 sec -> next interval to check is 32
       // I have seen a peer online for 112321 sec -> next interval to check is 64
       if (intervalSeconds[i] >= onlineSec) {
         index = i;
         break;
       }
     }
   }
   final int time = intervalSeconds[index];
   final long lastTimeWhenChecked =
       Timings.currentTimeMillis() - peerStatatistic.getLastSeenOnline();
   return lastTimeWhenChecked > TimeUnit.SECONDS.toMillis(time);
 }
  private void testHash(HashRequester hasher, String path) throws IOException {
    HashRequest hashRequest = new HashRequest();
    hashRequest.setPath(path);

    logger.info(String.format("Hash Request: %s", hashRequest));
    HashResponse hashResponse = hasher.hashDirectory(hashRequest);
    logger.info(String.format("Hash Response: %s", hashResponse));

    if (!hashResponse.isQueued()) {
      throw new ErrorDeletingFileException(
          String.format(
              "Error hashing '%s' in '%s'", hashRequest.getPath(), hashRequest.getBaseDir()));
    }

    try {
      Thread.sleep(TimeUnit.SECONDS.toMillis(5L));
    } catch (InterruptedException ee) {
    }

    HashStatus hashStatus = hasher.getHashStatus(hashRequest.getId());
    logger.info(String.format("Hash Status: %s", hashStatus));

    if (hashStatus.isFailed()) {
      throw new ErrorHashingDirectoryException(
          String.format(
              "Hashing failed '%s' in '%s': %s",
              hashRequest.getPath(), hashRequest.getBaseDir(), hashStatus.getFailureMessage()));
    } else if (!hashStatus.isStarted()) {
      throw new ErrorHashingDirectoryException(
          String.format(
              "Hashing not started '%s' in '%s'", hashRequest.getPath(), hashRequest.getBaseDir()));
    }
  }
 /**
  * @param id the session id
  * @param allowExpired if true, will also include expired sessions that have not been deleted. If
  *     false, will ensure expired sessions are not returned.
  * @return
  */
 private RedisSession getSession(String id, boolean allowExpired) {
   Map<Object, Object> entries = getSessionBoundHashOperations(id).entries();
   if (entries.isEmpty()) {
     return null;
   }
   MapSession loaded = new MapSession();
   loaded.setId(id);
   for (Map.Entry<Object, Object> entry : entries.entrySet()) {
     String key = (String) entry.getKey();
     if (CREATION_TIME_ATTR.equals(key)) {
       loaded.setCreationTime((Long) entry.getValue());
     } else if (MAX_INACTIVE_ATTR.equals(key)) {
       loaded.setMaxInactiveIntervalInSeconds((Integer) entry.getValue());
     } else if (LAST_ACCESSED_ATTR.equals(key)) {
       loaded.setLastAccessedTime((Long) entry.getValue());
     } else if (key.startsWith(SESSION_ATTR_PREFIX)) {
       loaded.setAttribute(key.substring(SESSION_ATTR_PREFIX.length()), entry.getValue());
     }
   }
   if (!allowExpired && loaded.isExpired()) {
     return null;
   }
   RedisSession result = new RedisSession(loaded);
   result.originalLastAccessTime =
       loaded.getLastAccessedTime()
           + TimeUnit.SECONDS.toMillis(loaded.getMaxInactiveIntervalInSeconds());
   result.setLastAccessedTime(System.currentTimeMillis());
   return result;
 }
 /*
  * Returns TTL value of ElasticSearch index in milliseconds when TTL
  * specifier is "ms" / "s" / "m" / "h" / "d" / "w". In case of unknown
  * specifier TTL is not set. When specifier is not provided it defaults to
  * days in milliseconds where the number of days is parsed integer from TTL
  * string provided by user. <p> Elasticsearch supports ttl values being
  * provided in the format: 1d / 1w / 1ms / 1s / 1h / 1m specify a time unit
  * like d (days), m (minutes), h (hours), ms (milliseconds) or w (weeks),
  * milliseconds is used as default unit.
  * http://www.elasticsearch.org/guide/reference/mapping/ttl-field/.
  *
  * @param ttl TTL value provided by user in flume configuration file for the
  * sink
  *
  * @return the ttl value in milliseconds
  */
 private long parseTTL(String ttl) {
   matcher = matcher.reset(ttl);
   while (matcher.find()) {
     if (matcher.group(2).equals("ms")) {
       return Long.parseLong(matcher.group(1));
     } else if (matcher.group(2).equals("s")) {
       return TimeUnit.SECONDS.toMillis(Integer.parseInt(matcher.group(1)));
     } else if (matcher.group(2).equals("m")) {
       return TimeUnit.MINUTES.toMillis(Integer.parseInt(matcher.group(1)));
     } else if (matcher.group(2).equals("h")) {
       return TimeUnit.HOURS.toMillis(Integer.parseInt(matcher.group(1)));
     } else if (matcher.group(2).equals("d")) {
       return TimeUnit.DAYS.toMillis(Integer.parseInt(matcher.group(1)));
     } else if (matcher.group(2).equals("w")) {
       return TimeUnit.DAYS.toMillis(7 * Integer.parseInt(matcher.group(1)));
     } else if (matcher.group(2).equals("")) {
       logger.info("TTL qualifier is empty. Defaulting to day qualifier.");
       return TimeUnit.DAYS.toMillis(Integer.parseInt(matcher.group(1)));
     } else {
       logger.debug("Unknown TTL qualifier provided. Setting TTL to 0.");
       return 0;
     }
   }
   logger.info("TTL not provided. Skipping the TTL config by returning 0.");
   return 0;
 }
  public void testBasicTargetRemoteDistributedCallable() throws Exception {
    long taskTimeout = TimeUnit.SECONDS.toMillis(15);
    EmbeddedCacheManager cacheManager1 = manager(0);
    final EmbeddedCacheManager cacheManager2 = manager(1);

    Cache<Object, Object> cache1 = cacheManager1.getCache();
    Cache<Object, Object> cache2 = cacheManager2.getCache();
    DistributedExecutorService des = null;

    try {
      des = new DefaultExecutorService(cache1);
      Address target = cache2.getAdvancedCache().getRpcManager().getAddress();

      DistributedTaskBuilder<Integer> builder =
          des.createDistributedTaskBuilder(new SimpleCallable())
              .failoverPolicy(DefaultExecutorService.RANDOM_NODE_FAILOVER)
              .timeout(taskTimeout, TimeUnit.MILLISECONDS);

      Future<Integer> future = des.submit(target, builder.build());
      AssertJUnit.assertEquals((Integer) 1, future.get());
    } catch (Exception ex) {
      AssertJUnit.fail("Task did not failover properly " + ex);
    } finally {
      des.shutdown();
    }
  }
    @Override
    void doRun() {
      long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(DURATION_SECONDS);

      Random random = new Random();
      while (true) {
        int key = random.nextInt(itemCount);
        int increment = random.nextInt(100);
        values[key] += increment;

        while (true) {
          Integer value = map.get(key);
          if (value == null) {
            value = 0;
          }
          if (map.replace(key, value, value + increment)) {
            break;
          }
        }

        if (System.currentTimeMillis() > endTime) {
          break;
        }
      }
    }
Exemple #23
0
  /**
   * A wrapper for the function {@code execute(HttpRequestBase request)}. It retries using
   * exponential retry strategy with the following intervals:<br>
   * {@code baseIntervalSeconds, baseIntervalSeconds * 2, baseIntervalSeconds * 2^2,
   * baseIntervalSeconds * 2^3 ...}
   *
   * @param request {@link HttpRequestBase} specifies the HTTP request expected to execute.
   * @param maxRetries specifies the maximum number of retries.
   * @param baseIntervalSeconds specifies the interval base for the exponential retry strategy
   * @return the {@link HttpResponse} if the execution is successful with maximum number of retries.
   * @throws IOException
   */
  public HttpResponse executeWithRetries(
      HttpRequestBase request, int maxRetries, long baseIntervalSeconds) throws IOException {
    Preconditions.checkArgument(maxRetries > 0, "maxRetries must be > 1");
    Preconditions.checkArgument(baseIntervalSeconds > 0, "baseIntervalSeconds must be > 0");

    HttpResponse response = null;
    IOException exception = null;
    long sleepMillis = TimeUnit.SECONDS.toMillis(baseIntervalSeconds);
    for (int i = 0; i < maxRetries; ++i) {
      try {
        response = _httpClient.execute(request);
      } catch (IOException e) {
        exception = e;
        response = null;
        try {
          Thread.sleep(sleepMillis);
          sleepMillis *= 2;
        } catch (InterruptedException ie) {
          // no-op
        }
      }
      if (null != response) {
        return response;
      }
    }
    // If it can not get any response after several retries, re-throw the the exception.
    throw new IOException(exception);
  }
 static {
   NAMESPACE = gi.al("com.google.cast.media");
   Cp = TimeUnit.HOURS.toMillis(24);
   Cq = TimeUnit.HOURS.toMillis(24);
   Cr = TimeUnit.HOURS.toMillis(24);
   Cs = TimeUnit.SECONDS.toMillis(1);
 }
 @Override
 public String call() {
   synchronized (NearlineStorageHandler.this) {
     removeTimeout = TimeUnit.SECONDS.toMillis(timeout);
   }
   return "";
 }
 /**
  * This gathers more information about a vm than is available by querying the host list table
  * directly.
  *
  * @param vm The vm to populate with more information
  * @param items The data for a given vm.
  * @return The vm object with more information attached.
  */
 private VmDeployed fullyDescribeVM(VmDeployed vm, Collection<MetricValue> items) {
   for (MetricValue item : items) {
     if (item.getKey().equals(MEMORY_TOTAL_KPI_NAME)) { // Convert to Mb
       // Original value given in bytes. 1024 * 1024 = 1048576
       vm.setRamMb((int) (Double.valueOf(item.getValue()) / 1048576));
     }
     if (item.getKey().equals(DISK_TOTAL_KPI_NAME)) { // covert to Gb
       // Original value given in bytes. 1024 * 1024 * 1024 = 1073741824
       vm.setDiskGb((Double.valueOf(item.getValue()) / 1073741824));
     }
     if (item.getKey().equals(BOOT_TIME_KPI_NAME)) {
       Calendar cal = new GregorianCalendar();
       // This converts from milliseconds into the correct time value
       cal.setTimeInMillis(TimeUnit.SECONDS.toMillis(Long.valueOf(item.getValueAsString())));
       vm.setCreated(cal);
     }
     if (item.getKey().equals(VM_PHYSICAL_HOST_NAME)) {
       vm.setAllocatedTo(getHostByName(item.getValueAsString()));
     }
     if (item.getKey().equals(CPU_COUNT_KPI_NAME)) {
       vm.setCpus(Integer.valueOf(item.getValueAsString()));
     }
     // TODO set the information correctly below!
     vm.setIpAddress("127.0.0.1");
     vm.setState("Work in Progress");
   }
   // A fall back incase the information is not available!
   if (vm.getCpus() == 0) {
     vm.setCpus(Integer.valueOf(1));
   }
   return vm;
 }
 @Override
 public void run() throws HiveException, ExampleException, IOException {
   try {
     hiveClient.authenticate(LOGIN, PASSWORD);
     HiveMessageHandler<DeviceNotification> notificationsHandler =
         new HiveMessageHandler<DeviceNotification>() {
           @Override
           public void handle(DeviceNotification notification) {
             print("Notification received: {}" + notification);
           }
         };
     Timestamp serverTimestamp = hiveClient.getInfo().getServerTimestamp();
     SubscriptionFilter notificationSubscriptionFilter =
         new SubscriptionFilter(null, null, serverTimestamp);
     hiveClient
         .getNotificationsController()
         .subscribeForNotifications(notificationSubscriptionFilter, notificationsHandler);
     ScheduledExecutorService commandsExecutor = Executors.newSingleThreadScheduledExecutor();
     commandsExecutor.scheduleAtFixedRate(new CommandTask(), 3, 3, TimeUnit.SECONDS);
     Thread.currentThread().join(TimeUnit.SECONDS.toMillis(30));
     commandsExecutor.shutdownNow();
   } catch (InterruptedException e) {
     throw new ExampleException(e.getMessage(), e);
   } finally {
     hiveClient.close();
   }
 }
  @Test
  public void testReceivingFiles() throws Throwable {
    final Set<String> files = new ConcurrentSkipListSet<String>();
    integrationTestUtils.createConsumer(
        this.messageChannel,
        new MessageHandler() {
          @Override
          public void handleMessage(Message<?> message) throws MessagingException {
            File file = (File) message.getPayload();
            String filePath = file.getPath();
            files.add(filePath);
          }
        });

    int cnt = 10;
    for (int i = 0; i < cnt; i++) {
      File out = new File(directoryToMonitor, i + ".txt");
      Writer w = new BufferedWriter(new FileWriter(out));
      IOUtils.write("test" + i, w);
      IOUtils.closeQuietly(w);
    }

    Thread.sleep(TimeUnit.SECONDS.toMillis(20));
    Assert.assertEquals(cnt, files.size());
  }
  private void waitUntilServerIsRunning(boolean reconnect)
      throws IOException, InterruptedException, TimeoutException {

    Thread.sleep(500); // this value is taken from implementation of CLI "reload"

    if (reconnect) {
      client.reconnect(timeoutInSeconds);
    }

    long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(timeoutInSeconds);
    while (System.currentTimeMillis() < endTime) {
      try {
        if (isServerRunning()) {
          break;
        }
      } catch (Throwable ignored) {
        // server is probably down, will retry
      }

      Thread.sleep(200); // this value is completely arbitrary
    }

    boolean running = false;
    try {
      running = isServerRunning();
    } catch (Throwable ignored) {
      // server probably down
    }
    if (!running) {
      throw new TimeoutException("Waiting for server timed out");
    }
  }
 private void sleep() {
   try {
     Thread.sleep(TimeUnit.SECONDS.toMillis(RETRY_WAIT_TIME_IN_SECONDS));
   } catch (InterruptedException ignored) {
     EmptyStatement.ignore(ignored);
   }
 }