Exemplo n.º 1
0
  public ServerObject serverFor(Object o) {

    ServerObject server = serving.get(o);

    if (server == null) {

      server = new ServerObject(objectsId.getAndIncrement(), o);

      serving.put(o, server);
      servingById.put(server.getId(), server);
    }

    return server;
  }
  /**
   * @param updateSeq Update sequence.
   * @return {@code True} if entry has been transitioned to state EVICTED.
   */
  boolean tryEvict(boolean updateSeq) {
    if (state.getReference() != RENTING || state.getStamp() != 0 || groupReserved()) return false;

    // Attempt to evict partition entries from cache.
    clearAll();

    if (map.isEmpty() && state.compareAndSet(RENTING, EVICTED, 0, 0)) {
      if (log.isDebugEnabled()) log.debug("Evicted partition: " + this);

      if (!GridQueryProcessor.isEnabled(cctx.config())) clearSwap();

      if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id);

      cctx.dataStructures().onPartitionEvicted(id);

      rent.onDone();

      ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq);

      clearDeferredDeletes();

      return true;
    }

    return false;
  }
  /**
   * @param updateSeq Update sequence.
   * @return Future for evict attempt.
   */
  IgniteInternalFuture<Boolean> tryEvictAsync(boolean updateSeq) {
    if (map.isEmpty()
        && !GridQueryProcessor.isEnabled(cctx.config())
        && state.compareAndSet(RENTING, EVICTED, 0, 0)) {
      if (log.isDebugEnabled()) log.debug("Evicted partition: " + this);

      clearSwap();

      if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id);

      cctx.dataStructures().onPartitionEvicted(id);

      rent.onDone();

      ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq);

      clearDeferredDeletes();

      return new GridFinishedFuture<>(true);
    }

    return cctx.closures()
        .callLocalSafe(
            new GPC<Boolean>() {
              @Override
              public Boolean call() {
                return tryEvict(true);
              }
            }, /*system pool*/
            true);
  }
Exemplo n.º 4
0
  /**
   * @param cancel {@code True} to close with cancellation.
   * @throws GridException If failed.
   */
  @Override
  public void close(boolean cancel) throws GridException {
    if (!closed.compareAndSet(false, true)) return;

    busyLock.block();

    if (log.isDebugEnabled())
      log.debug("Closing data loader [ldr=" + this + ", cancel=" + cancel + ']');

    GridException e = null;

    try {
      // Assuming that no methods are called on this loader after this method is called.
      if (cancel) {
        cancelled = true;

        for (Buffer buf : bufMappings.values()) buf.cancelAll();
      } else doFlush();

      ctx.event().removeLocalEventListener(discoLsnr);

      ctx.io().removeMessageListener(topic);
    } catch (GridException e0) {
      e = e0;
    }

    fut.onDone(null, e);

    if (e != null) throw e;
  }
  /** @param entry Entry to add. */
  void onAdded(GridDhtCacheEntry entry) {
    GridDhtPartitionState state = state();

    if (state == EVICTED)
      throw new GridDhtInvalidPartitionException(
          id, "Adding entry to invalid partition [part=" + id + ']');

    map.put(entry.key(), entry);

    if (!entry.isInternal()) mapPubSize.increment();
  }
Exemplo n.º 6
0
  /**
   * Flushes every internal buffer if buffer was flushed before passed in threshold.
   *
   * <p>Does not wait for result and does not fail on errors assuming that this method should be
   * called periodically.
   */
  @Override
  public void tryFlush() throws GridInterruptedException {
    if (!busyLock.enterBusy()) return;

    try {
      for (Buffer buf : bufMappings.values()) buf.flush();

      lastFlushTime = U.currentTimeMillis();
    } finally {
      leaveBusy();
    }
  }
Exemplo n.º 7
0
  private void processRequest(DataInputStream in) throws IOException {

    long serverId = in.readLong();

    ServerObject server = servingById.get(serverId);

    long requestId = in.readLong();
    Request r = new Request(this, server.getObject());
    r.deserialize(in);
    r.invoke();
    if (requestId != -1) {
      sendResponse(requestId, r.getResult(), r.getResultDeclaredType());
    }
  }
  /** @param entry Entry to remove. */
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
  void onRemoved(GridDhtCacheEntry entry) {
    assert entry.obsolete();

    // Make sure to remove exactly this entry.
    synchronized (entry) {
      map.remove(entry.key(), entry);

      if (!entry.isInternal() && !entry.deleted()) mapPubSize.decrement();
    }

    // Attempt to evict.
    tryEvict(true);
  }
Exemplo n.º 9
0
  protected Request request(long objectId, Method method, Object[] args, boolean expectsResponse)
      throws IOException {
    Request r = new Request(this, method, args);
    long requestId = -1;

    if (expectsResponse) {
      requestId = nextRequest.getAndIncrement();
      requests.put(requestId, r);
    }

    sendRequest(objectId, requestId, r);

    return r;
  }
Exemplo n.º 10
0
  private void processResponse(DataInputStream in) throws IOException {
    long requestId = in.readLong();
    Request r = requests.remove(requestId);

    if (r == null) {
      throw new IllegalStateException(
          "Request " + requestId + " is unknown (last request generated was " + nextRequest.get());
    }

    Object o = null;
    if (in.readBoolean()) {
      o = serializerFor(classForName(in.readUTF()), r.getResultDeclaredType()).deserialize(in);
    }
    r.set(o);
  }
Exemplo n.º 11
0
 /** {@inheritDoc} */
 @Override
 public String toString() {
   return S.toString(
       GridDhtLocalPartition.class,
       this,
       "state",
       state(),
       "reservations",
       reservations(),
       "empty",
       map.isEmpty(),
       "createTime",
       U.format(createTime),
       "mapPubSize",
       mapPubSize);
 }
Exemplo n.º 12
0
  /**
   * Performs flush.
   *
   * @throws GridException If failed.
   */
  private void doFlush() throws GridException {
    lastFlushTime = U.currentTimeMillis();

    List<GridFuture> activeFuts0 = null;

    int doneCnt = 0;

    for (GridFuture<?> f : activeFuts) {
      if (!f.isDone()) {
        if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2));

        activeFuts0.add(f);
      } else {
        f.get();

        doneCnt++;
      }
    }

    if (activeFuts0 == null || activeFuts0.isEmpty()) return;

    while (true) {
      Queue<GridFuture<?>> q = null;

      for (Buffer buf : bufMappings.values()) {
        GridFuture<?> flushFut = buf.flush();

        if (flushFut != null) {
          if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2);

          q.add(flushFut);
        }
      }

      if (q != null) {
        assert !q.isEmpty();

        boolean err = false;

        for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) {
          try {
            fut.get();
          } catch (GridException e) {
            if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e);

            err = true;
          }
        }

        if (err)
          // Remaps needed - flush buffers.
          continue;
      }

      doneCnt = 0;

      for (int i = 0; i < activeFuts0.size(); i++) {
        GridFuture f = activeFuts0.get(i);

        if (f == null) doneCnt++;
        else if (f.isDone()) {
          f.get();

          doneCnt++;

          activeFuts0.set(i, null);
        } else break;
      }

      if (doneCnt == activeFuts0.size()) return;
    }
  }
Exemplo n.º 13
0
  /**
   * @param entries Entries.
   * @param resFut Result future.
   * @param activeKeys Active keys.
   * @param remaps Remaps count.
   */
  private void load0(
      Collection<? extends Map.Entry<K, V>> entries,
      final GridFutureAdapter<Object> resFut,
      final Collection<K> activeKeys,
      final int remaps) {
    assert entries != null;

    if (remaps >= MAX_REMAP_CNT) {
      resFut.onDone(new GridException("Failed to finish operation (too many remaps): " + remaps));

      return;
    }

    Map<GridNode, Collection<Map.Entry<K, V>>> mappings = new HashMap<>();

    boolean initPda = ctx.deploy().enabled() && jobPda == null;

    for (Map.Entry<K, V> entry : entries) {
      GridNode node;

      try {
        K key = entry.getKey();

        assert key != null;

        if (initPda) {
          jobPda = new DataLoaderPda(key, entry.getValue(), updater);

          initPda = false;
        }

        node = ctx.affinity().mapKeyToNode(cacheName, key);
      } catch (GridException e) {
        resFut.onDone(e);

        return;
      }

      if (node == null) {
        resFut.onDone(
            new GridTopologyException(
                "Failed to map key to node "
                    + "(no nodes with cache found in topology) [infos="
                    + entries.size()
                    + ", cacheName="
                    + cacheName
                    + ']'));

        return;
      }

      Collection<Map.Entry<K, V>> col = mappings.get(node);

      if (col == null) mappings.put(node, col = new ArrayList<>());

      col.add(entry);
    }

    for (final Map.Entry<GridNode, Collection<Map.Entry<K, V>>> e : mappings.entrySet()) {
      final UUID nodeId = e.getKey().id();

      Buffer buf = bufMappings.get(nodeId);

      if (buf == null) {
        Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey()));

        if (old != null) buf = old;
      }

      final Collection<Map.Entry<K, V>> entriesForNode = e.getValue();

      GridInClosure<GridFuture<?>> lsnr =
          new GridInClosure<GridFuture<?>>() {
            @Override
            public void apply(GridFuture<?> t) {
              try {
                t.get();

                for (Map.Entry<K, V> e : entriesForNode) activeKeys.remove(e.getKey());

                if (activeKeys.isEmpty()) resFut.onDone();
              } catch (GridException e1) {
                if (log.isDebugEnabled())
                  log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']');

                if (cancelled) {
                  resFut.onDone(
                      new GridException(
                          "Data loader has been cancelled: " + GridDataLoaderImpl.this, e1));
                } else load0(entriesForNode, resFut, activeKeys, remaps + 1);
              }
            }
          };

      GridFutureAdapter<?> f;

      try {
        f = buf.update(entriesForNode, lsnr);
      } catch (GridInterruptedException e1) {
        resFut.onDone(e1);

        return;
      }

      if (ctx.discovery().node(nodeId) == null) {
        if (bufMappings.remove(nodeId, buf)) buf.onNodeLeft();

        if (f != null)
          f.onDone(
              new GridTopologyException(
                  "Failed to wait for request completion " + "(node has left): " + nodeId));
      }
    }
  }
Exemplo n.º 14
0
  /** Clears values for this partition. */
  private void clearAll() {
    GridCacheVersion clearVer = cctx.versions().next();

    boolean swap = cctx.isSwapOrOffheapEnabled();

    boolean rec = cctx.events().isRecordable(EVT_CACHE_REBALANCE_OBJECT_UNLOADED);

    Iterator<GridDhtCacheEntry> it = map.values().iterator();

    GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> swapIt = null;

    if (swap
        && GridQueryProcessor.isEnabled(cctx.config())) { // Indexing needs to unswap cache values.
      Iterator<GridDhtCacheEntry> unswapIt = null;

      try {
        swapIt = cctx.swap().iterator(id);
        unswapIt = unswapIterator(swapIt);
      } catch (Exception e) {
        U.error(log, "Failed to clear swap for evicted partition: " + this, e);
      }

      if (unswapIt != null) it = F.concat(it, unswapIt);
    }

    try {
      while (it.hasNext()) {
        GridDhtCacheEntry cached = it.next();

        try {
          if (cached.clearInternal(clearVer, swap)) {
            map.remove(cached.key(), cached);

            if (!cached.isInternal()) {
              mapPubSize.decrement();

              if (rec)
                cctx.events()
                    .addEvent(
                        cached.partition(),
                        cached.key(),
                        cctx.localNodeId(),
                        (IgniteUuid) null,
                        null,
                        EVT_CACHE_REBALANCE_OBJECT_UNLOADED,
                        null,
                        false,
                        cached.rawGet(),
                        cached.hasValue(),
                        null,
                        null,
                        null);
            }
          }
        } catch (IgniteCheckedException e) {
          U.error(log, "Failed to clear cache entry for evicted partition: " + cached, e);
        }
      }
    } finally {
      U.close(swapIt, log);
    }
  }
Exemplo n.º 15
0
  /**
   * Grabs local events and detects if events was lost since last poll.
   *
   * @param ignite Target grid.
   * @param evtOrderKey Unique key to take last order key from node local map.
   * @param evtThrottleCntrKey Unique key to take throttle count from node local map.
   * @param evtTypes Event types to collect.
   * @param evtMapper Closure to map grid events to Visor data transfer objects.
   * @return Collections of node events
   */
  public static Collection<VisorGridEvent> collectEvents(
      Ignite ignite,
      String evtOrderKey,
      String evtThrottleCntrKey,
      final int[] evtTypes,
      IgniteClosure<Event, VisorGridEvent> evtMapper) {
    assert ignite != null;
    assert evtTypes != null && evtTypes.length > 0;

    ConcurrentMap<String, Long> nl = ignite.cluster().nodeLocalMap();

    final long lastOrder = getOrElse(nl, evtOrderKey, -1L);
    final long throttle = getOrElse(nl, evtThrottleCntrKey, 0L);

    // When we first time arrive onto a node to get its local events,
    // we'll grab only last those events that not older than given period to make sure we are
    // not grabbing GBs of data accidentally.
    final long notOlderThan = System.currentTimeMillis() - EVENTS_COLLECT_TIME_WINDOW;

    // Flag for detecting gaps between events.
    final AtomicBoolean lastFound = new AtomicBoolean(lastOrder < 0);

    IgnitePredicate<Event> p =
        new IgnitePredicate<Event>() {
          /** */
          private static final long serialVersionUID = 0L;

          @Override
          public boolean apply(Event e) {
            // Detects that events were lost.
            if (!lastFound.get() && (lastOrder == e.localOrder())) lastFound.set(true);

            // Retains events by lastOrder, period and type.
            return e.localOrder() > lastOrder
                && e.timestamp() > notOlderThan
                && F.contains(evtTypes, e.type());
          }
        };

    Collection<Event> evts = ignite.events().localQuery(p);

    // Update latest order in node local, if not empty.
    if (!evts.isEmpty()) {
      Event maxEvt = Collections.max(evts, EVTS_ORDER_COMPARATOR);

      nl.put(evtOrderKey, maxEvt.localOrder());
    }

    // Update throttle counter.
    if (!lastFound.get())
      nl.put(evtThrottleCntrKey, throttle == 0 ? EVENTS_LOST_THROTTLE : throttle - 1);

    boolean lost = !lastFound.get() && throttle == 0;

    Collection<VisorGridEvent> res = new ArrayList<>(evts.size() + (lost ? 1 : 0));

    if (lost) res.add(new VisorGridEventsLost(ignite.cluster().localNode().id()));

    for (Event e : evts) {
      VisorGridEvent visorEvt = evtMapper.apply(e);

      if (visorEvt != null) res.add(visorEvt);
    }

    return res;
  }
Exemplo n.º 16
0
 /** @return Number of entries in this partition (constant-time method). */
 public int size() {
   return map.size();
 }
Exemplo n.º 17
0
 /** @return {@code True} if partition is empty. */
 public boolean isEmpty() {
   return map.isEmpty();
 }
Exemplo n.º 18
0
 /** @return Entries belonging to partition. */
 public Collection<GridDhtCacheEntry> entries() {
   return map.values();
 }
Exemplo n.º 19
0
 /** @return Keys belonging to partition. */
 public Set<KeyCacheObject> keySet() {
   return map.keySet();
 }
  // For each Tweet that is read, check threshold of the stock and apply
  @Override
  public void execute(Tuple input) {
    String userId = input.getString(0);
    String displayname = input.getString(1);
    String hashtag_all = input.getString(2);
    String tweet = input.getString(3);
    String created = input.getString(4);
    String longitude = input.getString(5);
    String latitude = input.getString(6);
    String language = input.getString(7);
    String fullTweet = input.getString(8);

    if (hashtag_all.length() == 0) {
      System.out.println("Skipping tweet...unable to find hashtag from it:" + tweet);
      collector.ack(input);
      return;
    }

    String hashtags[] = hashtag_all.split(" ");

    for (String hashtag : hashtags) {

      System.out.println(
          "RuleBolt received event displayname: "
              + displayname
              + " hashtag: "
              + hashtag
              + " tweet: "
              + tweet);
      // double latitude = input.getDoubleByField("latitude");
      // long correlationId = input.getLongByField("correlationId");
      // int truckId = input.getIntegerByField("truckId");

      // save event to our Map of events and retrive how many times its been mentioned in tweets
      // tweeted
      twitterEvents.putIfAbsent(hashtag, new AtomicInteger(0));
      int numTimesStockTweeted = twitterEvents.get(hashtag).incrementAndGet();

      // query HBase table for threshold for the stock symbol that was tweeted about
      int threshold = findThresholdForStock(hashtag);
      // int threshold = DEFAULT_ALERT_THRESHOLD;

      System.out.println(
          "\n\n\n\n\n\n\nStock: "
              + hashtag
              + " now has count: "
              + numTimesStockTweeted
              + ", threshold = "
              + threshold
              + " structure: "
              + twitterEvents
              + "\n\n\n\n\n\n\n");

      // check if this event takes the tweet volume for this stock above threshold
      if (numTimesStockTweeted > threshold) {
        int unixTime = (int) (System.currentTimeMillis() / 1000L);
        String timeStamp = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date());
        String upsert =
            String.format(
                "upsert into alerts values(%d, '%s', '%s', %d)",
                unixTime, hashtag, timeStamp, numTimesStockTweeted);
        System.out.println(
            "ALERT!!! Stock: "
                + hashtag
                + " exceeded limit: "
                + threshold
                + " as it has count: "
                + numTimesStockTweeted
                + " on: "
                + timeStamp);

        runHbaseUpsert(upsert);

        createSolrAlert(userId, created, hashtag);
      }
    }

    collector.ack(input);
  }