/** {@inheritDoc} */
    @Nullable
    @Override
    public Map<String, Collection<?>> run(GridStreamerContext ctx, Collection<Bar> bars)
        throws GridException {
      ConcurrentMap<String, Bar> loc = ctx.localSpace();

      GridStreamerWindow win = ctx.window("stage2");

      // Add numbers to window.
      win.enqueueAll(bars);

      Collection<Bar> polled = win.pollEvictedBatch();

      if (!polled.isEmpty()) {
        Map<String, Bar> map = new HashMap<>();

        for (Bar polledBar : polled) {
          String symbol = polledBar.symbol();

          Bar bar = map.get(symbol);

          if (bar == null) map.put(symbol, bar = new Bar(symbol));

          bar.update(polledBar);
        }

        loc.putAll(map);
      }

      return null;
    }
  /**
   * @param sesId Session ID.
   * @param taskNodeId Task node ID.
   * @param taskName Task name.
   * @param dep Deployment.
   * @param taskClsName Task class name.
   * @param top Topology.
   * @param startTime Execution start time.
   * @param endTime Execution end time.
   * @param siblings Collection of siblings.
   * @param attrs Map of attributes.
   * @param fullSup {@code True} to enable distributed session attributes and checkpoints.
   * @return New session if one did not exist, or existing one.
   */
  public GridTaskSessionImpl createTaskSession(
      GridUuid sesId,
      UUID taskNodeId,
      String taskName,
      @Nullable GridDeployment dep,
      String taskClsName,
      @Nullable Collection<UUID> top,
      long startTime,
      long endTime,
      Collection<GridComputeJobSibling> siblings,
      Map<Object, Object> attrs,
      boolean fullSup) {
    if (!fullSup) {
      return new GridTaskSessionImpl(
          taskNodeId,
          taskName,
          dep,
          taskClsName,
          sesId,
          top,
          startTime,
          endTime,
          siblings,
          attrs,
          ctx,
          fullSup);
    }

    while (true) {
      GridTaskSessionImpl ses = sesMap.get(sesId);

      if (ses == null) {
        GridTaskSessionImpl old =
            sesMap.putIfAbsent(
                sesId,
                ses =
                    new GridTaskSessionImpl(
                        taskNodeId,
                        taskName,
                        dep,
                        taskClsName,
                        sesId,
                        top,
                        startTime,
                        endTime,
                        siblings,
                        attrs,
                        ctx,
                        fullSup));

        if (old != null) ses = old;
        else
          // Return without acquire.
          return ses;
      }

      if (ses.acquire()) return ses;
      else sesMap.remove(sesId, ses);
    }
  }
  /**
   * @param p Partition number.
   * @param topVer Topology version.
   * @param create Create flag.
   * @param updateSeq Update sequence.
   * @return Local partition.
   */
  private GridDhtLocalPartition localPartition(
      int p, AffinityTopologyVersion topVer, boolean create, boolean updateSeq) {
    while (true) {
      boolean belongs = cctx.affinity().localNode(p, topVer);

      GridDhtLocalPartition loc = locParts.get(p);

      if (loc != null && loc.state() == EVICTED) {
        locParts.remove(p, loc);

        if (!create) return null;

        if (!belongs)
          throw new GridDhtInvalidPartitionException(
              p,
              "Adding entry to evicted partition [part="
                  + p
                  + ", topVer="
                  + topVer
                  + ", this.topVer="
                  + this.topVer
                  + ']');

        continue;
      }

      if (loc == null && create) {
        if (!belongs)
          throw new GridDhtInvalidPartitionException(
              p,
              "Creating partition which does not belong [part="
                  + p
                  + ", topVer="
                  + topVer
                  + ", this.topVer="
                  + this.topVer
                  + ']');

        lock.writeLock().lock();

        try {
          GridDhtLocalPartition old =
              locParts.putIfAbsent(p, loc = new GridDhtLocalPartition(cctx, p));

          if (old != null) loc = old;
          else {
            if (updateSeq) this.updateSeq.incrementAndGet();

            if (log.isDebugEnabled()) log.debug("Created local partition: " + loc);
          }
        } finally {
          lock.writeLock().unlock();
        }
      }

      return loc;
    }
  }
  @Override
  protected void doSubscribe(Node node, NotifyListener listener) {

    List<NodeType> listenNodeTypes = node.getListenNodeTypes();
    if (CollectionUtils.isEmpty(listenNodeTypes)) {
      return;
    }
    for (NodeType listenNodeType : listenNodeTypes) {
      String listenNodePath = NodeRegistryUtils.getNodeTypePath(clusterName, listenNodeType);

      Notifier notifier = notifiers.get(listenNodePath);
      if (notifier == null) {
        Notifier newNotifier = new Notifier(listenNodePath);
        notifiers.putIfAbsent(listenNodePath, newNotifier);
        notifier = notifiers.get(listenNodePath);
        if (notifier == newNotifier) {
          notifier.start();
        }
      }

      boolean success = false;
      NodeRegistryException exception = null;
      for (Map.Entry<String, JedisPool> entry : jedisPools.entrySet()) {
        JedisPool jedisPool = entry.getValue();
        try {
          Jedis jedis = jedisPool.getResource();
          try {
            doNotify(
                jedis,
                Collections.singletonList(listenNodePath),
                Collections.singletonList(listener));
            success = true;
            break; // 只需读一个服务器的数据

          } finally {
            jedis.close();
          }
        } catch (Throwable t) {
          exception =
              new NodeRegistryException(
                  "Failed to unregister node to redis registry. registry: "
                      + entry.getKey()
                      + ", node: "
                      + node
                      + ", cause: "
                      + t.getMessage(),
                  t);
        }
      }
      if (exception != null) {
        if (success) {
          LOGGER.warn(exception.getMessage(), exception);
        } else {
          throw exception;
        }
      }
    }
  }
  /**
   * Removes session for a given session ID.
   *
   * @param sesId ID of session to remove.
   * @return {@code True} if session was removed.
   */
  public boolean removeSession(GridUuid sesId) {
    GridTaskSessionImpl ses = sesMap.get(sesId);

    assert ses == null || ses.isFullSupport();

    if (ses != null && ses.release()) {
      sesMap.remove(sesId, ses);

      return true;
    }

    return false;
  }
Exemplo n.º 6
0
  public ServerObject serverFor(Object o) {

    ServerObject server = serving.get(o);

    if (server == null) {

      server = new ServerObject(objectsId.getAndIncrement(), o);

      serving.put(o, server);
      servingById.put(server.getId(), server);
    }

    return server;
  }
Exemplo n.º 7
0
  protected void handleTaskSubmittedRequest(
      Runnable runnable, Address source, long requestId, long threadId) {
    // We store in our map so that when that task is
    // finished so that we can send back to the owner
    // with the results
    _running.put(runnable, new Owner(source, requestId));
    // We give the task to the thread that is now waiting for it to be returned
    // If we can't offer then we have to respond back to
    // caller that we can't handle it.  They must have
    // gotten our address when we had a consumer, but
    // they went away between then and now.
    boolean received;
    try {
      _tasks.put(threadId, runnable);

      CyclicBarrier barrier = _taskBarriers.remove(threadId);
      if (received = (barrier != null)) {
        // Only wait 10 milliseconds, in case if the consumer was
        // stopped between when we were told it was available and now
        barrier.await(10, TimeUnit.MILLISECONDS);
      }
    } catch (InterruptedException e) {
      if (log.isDebugEnabled()) log.debug("Interrupted while handing off task");
      Thread.currentThread().interrupt();
      received = false;
    } catch (BrokenBarrierException e) {
      if (log.isDebugEnabled())
        log.debug(
            "Consumer " + threadId + " has been interrupted, " + "must retry to submit elsewhere");
      received = false;
    } catch (TimeoutException e) {
      if (log.isDebugEnabled())
        log.debug("Timeout waiting to hand off to barrier, consumer " + threadId + " must be slow");
      // This should only happen if the consumer put the latch then got
      // interrupted but hadn't yet removed the latch, should almost never
      // happen
      received = false;
    }

    if (!received) {
      // Clean up the tasks request
      _tasks.remove(threadId);
      if (log.isDebugEnabled()) log.debug("Run rejected not able to pass off to consumer");
      // If we couldn't hand off the task we have to tell the client
      // and also reupdate the coordinator that our consumer is ready
      sendRequest(source, Type.RUN_REJECTED, requestId, null);
      _running.remove(runnable);
    }
  }
 /**
  * Creates a new {@link Timer} and registers it under the given metric name.
  *
  * @param metricName the name of the metric
  * @param durationUnit the duration scale unit of the new timer
  * @param rateUnit the rate scale unit of the new timer
  * @return a new {@link Timer}
  */
 public Timer newTimer(MetricName metricName, TimeUnit durationUnit, TimeUnit rateUnit) {
   final Metric existingMetric = metrics.get(metricName);
   if (existingMetric != null) {
     return (Timer) existingMetric;
   }
   return getOrAdd(metricName, new Timer(newMeterTickThreadPool(), durationUnit, rateUnit, clock));
 }
 /**
  * Creates a new {@link Meter} and registers it under the given metric name.
  *
  * @param metricName the name of the metric
  * @param eventType the plural name of the type of events the meter is measuring (e.g., {@code
  *     "requests"})
  * @param unit the rate unit of the new meter
  * @return a new {@link Meter}
  */
 public Meter newMeter(MetricName metricName, String eventType, TimeUnit unit) {
   final Metric existingMetric = metrics.get(metricName);
   if (existingMetric != null) {
     return (Meter) existingMetric;
   }
   return getOrAdd(metricName, new Meter(newMeterTickThreadPool(), eventType, unit, clock));
 }
  /**
   * Waits for renting partitions.
   *
   * @return {@code True} if mapping was changed.
   * @throws IgniteCheckedException If failed.
   */
  private boolean waitForRent() throws IgniteCheckedException {
    boolean changed = false;

    // Synchronously wait for all renting partitions to complete.
    for (Iterator<GridDhtLocalPartition> it = locParts.values().iterator(); it.hasNext(); ) {
      GridDhtLocalPartition p = it.next();

      GridDhtPartitionState state = p.state();

      if (state == RENTING || state == EVICTED) {
        if (log.isDebugEnabled()) log.debug("Waiting for renting partition: " + p);

        // Wait for partition to empty out.
        p.rent(true).get();

        if (log.isDebugEnabled()) log.debug("Finished waiting for renting partition: " + p);

        // Remove evicted partition.
        it.remove();

        changed = true;
      }
    }

    return changed;
  }
  /** {@inheritDoc} */
  @Override
  public void printMemoryStats() {
    super.printMemoryStats();

    X.println(">>>   threadsSize: " + threads.size());
    X.println(">>>   futsSize: " + futs.size());
  }
Exemplo n.º 12
0
  /**
   * @param cancel {@code True} to close with cancellation.
   * @throws GridException If failed.
   */
  @Override
  public void close(boolean cancel) throws GridException {
    if (!closed.compareAndSet(false, true)) return;

    busyLock.block();

    if (log.isDebugEnabled())
      log.debug("Closing data loader [ldr=" + this + ", cancel=" + cancel + ']');

    GridException e = null;

    try {
      // Assuming that no methods are called on this loader after this method is called.
      if (cancel) {
        cancelled = true;

        for (Buffer buf : bufMappings.values()) buf.cancelAll();
      } else doFlush();

      ctx.event().removeLocalEventListener(discoLsnr);

      ctx.io().removeMessageListener(topic);
    } catch (GridException e0) {
      e = e0;
    }

    fut.onDone(null, e);

    if (e != null) throw e;
  }
Exemplo n.º 13
0
    public void prepare(String query, InetAddress toExclude) throws InterruptedException {
      for (Map.Entry<Host, HostConnectionPool> entry : pools.entrySet()) {
        if (entry.getKey().getAddress().equals(toExclude)) continue;

        // Let's not wait too long if we can't get a connection. Things
        // will fix themselves once the user tries a query anyway.
        Connection c = null;
        try {
          c = entry.getValue().borrowConnection(200, TimeUnit.MILLISECONDS);
          c.write(new PrepareMessage(query)).get();
        } catch (ConnectionException e) {
          // Again, not being able to prepare the query right now is no big deal, so just ignore
        } catch (BusyConnectionException e) {
          // Same as above
        } catch (TimeoutException e) {
          // Same as above
        } catch (ExecutionException e) {
          // We shouldn't really get exception while preparing a
          // query, so log this (but ignore otherwise as it's not a big deal)
          logger.error(
              String.format(
                  "Unexpected error while preparing query (%s) on %s", query, entry.getKey()),
              e);
        } finally {
          if (c != null) entry.getValue().returnConnection(c);
        }
      }
    }
Exemplo n.º 14
0
  /**
   * @param updateSeq Update sequence.
   * @return Future for evict attempt.
   */
  IgniteInternalFuture<Boolean> tryEvictAsync(boolean updateSeq) {
    if (map.isEmpty()
        && !GridQueryProcessor.isEnabled(cctx.config())
        && state.compareAndSet(RENTING, EVICTED, 0, 0)) {
      if (log.isDebugEnabled()) log.debug("Evicted partition: " + this);

      clearSwap();

      if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id);

      cctx.dataStructures().onPartitionEvicted(id);

      rent.onDone();

      ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq);

      clearDeferredDeletes();

      return new GridFinishedFuture<>(true);
    }

    return cctx.closures()
        .callLocalSafe(
            new GPC<Boolean>() {
              @Override
              public Boolean call() {
                return tryEvict(true);
              }
            }, /*system pool*/
            true);
  }
Exemplo n.º 15
0
  /**
   * @param updateSeq Update sequence.
   * @return {@code True} if entry has been transitioned to state EVICTED.
   */
  boolean tryEvict(boolean updateSeq) {
    if (state.getReference() != RENTING || state.getStamp() != 0 || groupReserved()) return false;

    // Attempt to evict partition entries from cache.
    clearAll();

    if (map.isEmpty() && state.compareAndSet(RENTING, EVICTED, 0, 0)) {
      if (log.isDebugEnabled()) log.debug("Evicted partition: " + this);

      if (!GridQueryProcessor.isEnabled(cctx.config())) clearSwap();

      if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id);

      cctx.dataStructures().onPartitionEvicted(id);

      rent.onDone();

      ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq);

      clearDeferredDeletes();

      return true;
    }

    return false;
  }
Exemplo n.º 16
0
 @Override
 public void destroy() {
   super.destroy();
   try {
     expireFuture.cancel(true);
   } catch (Throwable t) {
     LOGGER.warn(t.getMessage(), t);
   }
   try {
     for (Notifier notifier : notifiers.values()) {
       notifier.shutdown();
     }
   } catch (Throwable t) {
     LOGGER.warn(t.getMessage(), t);
   }
   for (Map.Entry<String, JedisPool> entry : jedisPools.entrySet()) {
     JedisPool jedisPool = entry.getValue();
     try {
       jedisPool.destroy();
     } catch (Throwable t) {
       LOGGER.warn(
           "Failed to destroy the redis registry client. registry: "
               + entry.getKey()
               + ", cause: "
               + t.getMessage(),
           t);
     }
   }
 }
Exemplo n.º 17
0
  public void connectionRemoved(Connection connection) {
    if (connection.isClient() && connection instanceof TcpIpConnection && nodeEngine.isActive()) {
      final ClientEndpoint endpoint = endpoints.get(connection);
      if (endpoint != null
          && node.getLocalMember().getUuid().equals(endpoint.getPrincipal().getOwnerUuid())) {
        removeEndpoint(connection, true);
        if (!endpoint.isFirstConnection()) {
          return;
        }
        NodeEngine nodeEngine = node.nodeEngine;
        final Collection<MemberImpl> memberList = nodeEngine.getClusterService().getMemberList();
        for (MemberImpl member : memberList) {
          final ClientDisconnectionOperation op =
              new ClientDisconnectionOperation(endpoint.getUuid());
          op.setNodeEngine(nodeEngine)
              .setServiceName(SERVICE_NAME)
              .setService(this)
              .setResponseHandler(ResponseHandlerFactory.createEmptyResponseHandler());

          if (member.localMember()) {
            nodeEngine.getOperationService().runOperation(op);
          } else {
            nodeEngine.getOperationService().send(op, member.getAddress());
          }
        }
      }
    }
  }
Exemplo n.º 18
0
  /**
   * Register a tree for the given request to be compared to the appropriate trees in
   * Stage.ANTIENTROPY when they become available.
   */
  private void rendezvous(TreeRequest request, MerkleTree tree) {
    RepairSession session = sessions.get(request.sessionid);
    if (session == null) {
      logger.warn(
          "Got a merkle tree response for unknown repair session {}: either this node has been restarted since the session was started, or the session has been interrupted for an unknown reason. ",
          request.sessionid);
      return;
    }

    RepairSession.RepairJob job = session.jobs.peek();
    if (job == null) {
      assert session.terminated();
      return;
    }

    logger.info(
        String.format(
            "[repair #%s] Received merkle tree for %s from %s",
            session.getName(), request.cf.right, request.endpoint));

    if (job.addTree(request, tree) == 0) {
      logger.debug("All trees received for " + session.getName() + "/" + request.cf.right);
      job.submitDifferencers();

      // This job is complete, switching to next in line (note that only
      // one thread will can ever do this)
      session.jobs.poll();
      RepairSession.RepairJob nextJob = session.jobs.peek();
      if (nextJob == null)
        // We are done with this repair session as far as differencing
        // is considern. Just inform the session
        session.differencingDone.signalAll();
      else nextJob.sendTreeRequests();
    }
  }
  /**
   * Processes cache query request.
   *
   * @param sndId Sender node id.
   * @param req Query request.
   */
  @SuppressWarnings("unchecked")
  @Override
  void processQueryRequest(UUID sndId, GridCacheQueryRequest req) {
    if (req.cancel()) {
      cancelIds.add(new CancelMessageId(req.id(), sndId));

      if (req.fields()) removeFieldsQueryResult(sndId, req.id());
      else removeQueryResult(sndId, req.id());
    } else {
      if (!cancelIds.contains(new CancelMessageId(req.id(), sndId))) {
        if (!F.eq(req.cacheName(), cctx.name())) {
          GridCacheQueryResponse res =
              new GridCacheQueryResponse(
                  cctx.cacheId(),
                  req.id(),
                  new IgniteCheckedException(
                      "Received request for incorrect cache [expected="
                          + cctx.name()
                          + ", actual="
                          + req.cacheName()));

          sendQueryResponse(sndId, res, 0);
        } else {
          threads.put(req.id(), Thread.currentThread());

          try {
            GridCacheQueryInfo info = distributedQueryInfo(sndId, req);

            if (info == null) return;

            if (req.fields()) runFieldsQuery(info);
            else runQuery(info);
          } catch (Throwable e) {
            U.error(log(), "Failed to run query.", e);

            sendQueryResponse(
                sndId, new GridCacheQueryResponse(cctx.cacheId(), req.id(), e.getCause()), 0);

            if (e instanceof Error) throw (Error) e;
          } finally {
            threads.remove(req.id());
          }
        }
      }
    }
  }
  /**
   * Gets any existing metric with the given name or, if none exists, adds the given metric.
   *
   * @param name the metric's name
   * @param metric the new metric
   * @param <T> the type of the metric
   * @return either the existing metric or {@code metric}
   */
  @SuppressWarnings("unchecked")
  protected final <T extends Metric> T getOrAdd(MetricName name, T metric) {
    final Metric existingMetric = metrics.get(name);
    if (existingMetric == null) {
      final Metric justAddedMetric = metrics.putIfAbsent(name, metric);
      if (justAddedMetric == null) {
        notifyMetricAdded(name, metric);
        return metric;
      }

      if (metric instanceof Stoppable) {
        ((Stoppable) metric).stop();
      }

      return (T) justAddedMetric;
    }
    return (T) existingMetric;
  }
Exemplo n.º 21
0
 public void shutdown() {
   for (ClientEndpoint endpoint : endpoints.values()) {
     try {
       endpoint.destroy();
     } catch (LoginException e) {
       logger.finest(e.getMessage());
     }
     try {
       final Connection conn = endpoint.getConnection();
       if (conn.live()) {
         conn.close();
       }
     } catch (Exception e) {
       logger.finest(e);
     }
   }
   endpoints.clear();
 }
 /**
  * Removes the metric with the given name.
  *
  * @param name the name of the metric
  */
 public void removeMetric(MetricName name) {
   final Metric metric = metrics.remove(name);
   if (metric != null) {
     if (metric instanceof Stoppable) {
       ((Stoppable) metric).stop();
     }
     notifyMetricRemoved(name);
   }
 }
Exemplo n.º 23
0
 Set<ClientEndpoint> getEndpoints(String uuid) {
   Set<ClientEndpoint> endpointSet = new HashSet<ClientEndpoint>();
   for (ClientEndpoint endpoint : endpoints.values()) {
     if (uuid.equals(endpoint.getUuid())) {
       endpointSet.add(endpoint);
     }
   }
   return endpointSet;
 }
Exemplo n.º 24
0
    public void onDown(Host host) {
      loadBalancer.onDown(host);
      HostConnectionPool pool = pools.remove(host);

      // This should not be necessary but it's harmless
      if (pool != null) pool.shutdown();

      // If we've remove a host, the loadBalancer is allowed to change his mind on host distances.
      for (Host h : cluster.getMetadata().allHosts()) {
        if (!h.getMonitor().isUp()) continue;

        HostDistance dist = loadBalancer.distance(h);
        if (dist != HostDistance.IGNORED) {
          HostConnectionPool p = pools.get(h);
          if (p == null) addHost(host);
          else p.hostDistance = dist;
        }
      }
    }
Exemplo n.º 25
0
    private boolean shutdown(long timeout, TimeUnit unit) throws InterruptedException {

      if (!isShutdown.compareAndSet(false, true)) return true;

      long start = System.nanoTime();
      boolean success = true;
      for (HostConnectionPool pool : pools.values())
        success &= pool.shutdown(timeout - Cluster.timeSince(start, unit), unit);
      return success;
    }
Exemplo n.º 26
0
 private HostConnectionPool addHost(Host host) {
   try {
     HostDistance distance = loadBalancer.distance(host);
     if (distance == HostDistance.IGNORED) {
       return pools.get(host);
     } else {
       logger.debug("Adding {} to list of queried hosts", host);
       return pools.put(host, new HostConnectionPool(host, distance, this));
     }
   } catch (AuthenticationException e) {
     logger.error("Error creating pool to {} ({})", host, e.getMessage());
     host.getMonitor()
         .signalConnectionFailure(new ConnectionException(e.getHost(), e.getMessage()));
     return pools.get(host);
   } catch (ConnectionException e) {
     logger.debug("Error creating pool to {} ({})", host, e.getMessage());
     host.getMonitor().signalConnectionFailure(e);
     return pools.get(host);
   }
 }
Exemplo n.º 27
0
  /** @param entry Entry to add. */
  void onAdded(GridDhtCacheEntry entry) {
    GridDhtPartitionState state = state();

    if (state == EVICTED)
      throw new GridDhtInvalidPartitionException(
          id, "Adding entry to invalid partition [part=" + id + ']');

    map.put(entry.key(), entry);

    if (!entry.isInternal()) mapPubSize.increment();
  }
Exemplo n.º 28
0
  /**
   * Flushes every internal buffer if buffer was flushed before passed in threshold.
   *
   * <p>Does not wait for result and does not fail on errors assuming that this method should be
   * called periodically.
   */
  @Override
  public void tryFlush() throws GridInterruptedException {
    if (!busyLock.enterBusy()) return;

    try {
      for (Buffer buf : bufMappings.values()) buf.flush();

      lastFlushTime = U.currentTimeMillis();
    } finally {
      leaveBusy();
    }
  }
Exemplo n.º 29
0
 protected void handleInterruptRequest(Address source, long requestId) {
   Owner owner = new Owner(source, requestId);
   Runnable runnable = removeKeyForValue(_running, owner);
   Thread thread = null;
   if (runnable != null) {
     thread = _runnableThreads.remove(runnable);
   }
   if (thread != null) {
     thread.interrupt();
   } else if (log.isTraceEnabled())
     log.trace("Message could not be interrupted due to it already returned");
 }
Exemplo n.º 30
0
 protected void handleTaskRejectedResponse(Address source, long requestId) {
   Runnable runnable = _awaitingReturn.remove(new Owner(source, requestId));
   if (runnable != null) {
     _awaitingConsumer.add(runnable);
     Long taskRequestId = _requestId.get(runnable);
     if (taskRequestId != requestId) {
       log.warn("Task Request Id doesn't match in rejection");
     }
     sendToCoordinator(RUN_REQUEST, taskRequestId, local_addr);
   } else {
     log.error(Util.getMessage("ErrorResubmittingTaskForRequestId") + requestId);
   }
 }