Пример #1
0
  void finish(final String tag) {
    if (mRequestQueue != null) {
      mRequestQueue.finish(this);
    }
    if (MarkerLog.ENABLED) {
      final long threadId = Thread.currentThread().getId();
      if (Looper.myLooper() != Looper.getMainLooper()) {
        Handler mainThread = new Handler(Looper.getMainLooper());
        mainThread.post(
            new Runnable() {
              @Override
              public void run() {
                mEventLog.add(tag, threadId);
                mEventLog.finish(this.toString());
              }
            });
        return;
      }

      mEventLog.add(tag, threadId);
      mEventLog.finish(this.toString());
    } else {
      long requestTime = SystemClock.elapsedRealtime() - mRequestBirthTime;
      if (requestTime >= SLOW_REQUEST_THRESHOLD_MS) {
        VolleyLog.d("%d ms: %s", requestTime, this.toString());
      }
    }
  }
Пример #2
0
  /**
   * Called from {@link Request#finish(String)}, indicating that processing of the given request has
   * finished.
   *
   * <p>Releases waiting requests for <code>request.getCacheKey()</code> if <code>
   * request.shouldCache()</code>.
   */
  <T> void finish(Request<T> request) {
    // Remove from the set of requests currently being processed.
    synchronized (mCurrentRequests) {
      mCurrentRequests.remove(request);
    }
    synchronized (mFinishedListeners) {
      for (RequestFinishedListener<T> listener : mFinishedListeners) {
        listener.onRequestFinished(request);
      }
    }

    if (request.shouldCache()) {
      synchronized (mWaitingRequests) {
        String cacheKey = request.getCacheKey();
        Queue<Request<?>> waitingRequests = mWaitingRequests.remove(cacheKey);
        if (waitingRequests != null) {
          if (VolleyLog.DEBUG) {
            VolleyLog.v(
                "Releasing %d waiting requests for cacheKey=%s.", waitingRequests.size(), cacheKey);
          }
          // Process all queued up requests. They won't be considered
          // as in flight, but
          // that's not a problem as the cache has been primed by
          // 'request'.
          mCacheQueue.addAll(waitingRequests);
        }
      }
    }
  }
Пример #3
0
 @Override
 public byte[] getBody() {
   try {
     return mRequestBody == null ? null : mRequestBody.getBytes(PROTOCOL_CHARSET);
   } catch (UnsupportedEncodingException uee) {
     VolleyLog.wtf(
         "Unsupported Encoding while trying to get the bytes of %s using %s",
         mRequestBody, PROTOCOL_CHARSET);
     return null;
   }
 }
Пример #4
0
  /**
   * Adds a Request to the dispatch queue.
   *
   * @param request The request to service
   * @return The passed-in request
   */
  public <T> Request<T> add(Request<T> request) {
    // Tag the request as belonging to this queue and add it to the set of
    // current requests.
    request.setRequestQueue(this);
    synchronized (mCurrentRequests) {
      mCurrentRequests.add(request);
    }

    // Process requests in the order they are added.
    request.setSequence(getSequenceNumber());
    request.addMarker("add-to-queue");

    // If the request is uncacheable, skip the cache queue and go straight
    // to the network.
    if (!request.shouldCache()) {
      mNetworkQueue.add(request);
      return request;
    }

    // Insert request into stage if there's already a request with the same
    // cache key in flight.
    synchronized (mWaitingRequests) {
      String cacheKey = request.getCacheKey();
      if (mWaitingRequests.containsKey(cacheKey)) {
        // There is already a request in flight. Queue up.
        Queue<Request<?>> stagedRequests = mWaitingRequests.get(cacheKey);
        if (stagedRequests == null) {
          stagedRequests = new LinkedList<Request<?>>();
        }
        stagedRequests.add(request);
        mWaitingRequests.put(cacheKey, stagedRequests);
        if (VolleyLog.DEBUG) {
          VolleyLog.v("Request for cacheKey=%s is in flight, putting on hold.", cacheKey);
        }
      } else {
        // Insert 'null' queue for this cacheKey, indicating there is
        // now a request in
        // flight.
        mWaitingRequests.put(cacheKey, null);
        mCacheQueue.add(request);
      }
      return request;
    }
  }
Пример #5
0
  @Override
  public void run() {
    if (DEBUG) VolleyLog.v("start new dispatcher");
    Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND);

    // Make a blocking call to initialize the cache.
    if (mCache != null) {
      mCache.initialize();
    }

    while (true) {
      try {
        // Get a request from the cache triage queue, blocking until
        // at least one is available.
        final Request<?> request = mCacheQueue.take();
        request.addMarker("cache-queue-take");
        mDelivery.postPreExecute(request);

        // If the request has been canceled, don't bother dispatching
        // it.
        if (request.isCanceled()) {
          request.finish("cache-discard-canceled");
          mDelivery.postCancel(request);
          mDelivery.postFinish(request);
          continue;
        }

        // Attempt to retrieve this item from cache.
        DiskCache.Entry entry = mCache != null ? mCache.getEntry(request.getCacheKey()) : null;
        if (entry == null) {
          request.addMarker("cache-miss");
          // Cache miss; send off to the network dispatcher.
          mNetworkQueue.put(request);
          mDelivery.postNetworking(request);
          continue;
        }

        // If it is completely expired, just send it to the network.
        if (entry.isExpired()) {
          request.addMarker("cache-hit-expired");
          request.setCacheEntry(entry);
          mNetworkQueue.put(request);
          mDelivery.postNetworking(request);
          continue;
        }

        // We have a cache hit; parse its data for delivery back to the
        // request.
        request.addMarker("cache-hit");
        Response<?> response =
            request.parseNetworkResponse(new NetworkResponse(entry.data, entry.responseHeaders));
        request.addMarker("cache-hit-parsed");
        mDelivery.postUsedCache(request);

        if (!entry.refreshNeeded()) {
          // Completely unexpired cache hit. Just deliver the
          // response.
          mDelivery.postResponse(request, response);
        } else {
          // Soft-expired cache hit. We can deliver the cached
          // response,
          // but we need to also send the request to the network for
          // refreshing.
          request.addMarker("cache-hit-refresh-needed");
          request.setCacheEntry(entry);

          // Mark the response as intermediate.
          response.intermediate = true;

          // Post the intermediate response back to the user and have
          // the delivery then forward the request along to the
          // network.
          mDelivery.postResponse(
              request,
              response,
              new Runnable() {
                @Override
                public void run() {
                  try {
                    mNetworkQueue.put(request);
                  } catch (InterruptedException e) {
                    // Not much we can do about this.
                  }
                }
              });
        }
      } catch (InterruptedException e) {
        // We may have been interrupted because it was time to quit.
        if (mQuit) {
          return;
        }
        continue;
      }
    }
  }
  @Override
  public void run() {
    Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND);
    Request<?> request;
    while (true) {
      try {
        // Take a request from the queue.
        request = mQueue.take();
      } catch (InterruptedException e) {
        // We may have been interrupted because it was time to quit.
        if (mQuit) {
          return;
        }
        continue;
      }

      try {
        request.addMarker("network-queue-take");

        // If the request was cancelled already, do not perform the
        // network request.
        if (request.isCanceled()) {
          request.finish("network-discard-cancelled");
          continue;
        }

        addTrafficStatsTag(request);

        // Perform the network request.
        NetworkResponse networkResponse = mNetwork.performRequest(request);
        request.addMarker("network-http-complete");

        // If the server returned 304 AND we delivered a response already,
        // we're done -- don't deliver a second identical response.
        if (networkResponse.notModified && request.hasHadResponseDelivered()) {
          request.finish("not-modified");
          continue;
        }

        // Parse the response here on the worker thread.
        Response<?> response = request.parseNetworkResponse(networkResponse);
        request.addMarker("network-parse-complete");

        // Write to cache if applicable.
        // TODO: Only update cache metadata instead of entire record for 304s.
        if (request.shouldCache() && response.cacheEntry != null) {
          mCache.put(request.getCacheKey(), response.cacheEntry);
          request.addMarker("network-cache-written");
        }

        // Post the response back.
        request.markDelivered();
        mDelivery.postResponse(request, response);
      } catch (VolleyError volleyError) {
        parseAndDeliverNetworkError(request, volleyError);
      } catch (Exception e) {
        VolleyLog.e(e, "Unhandled exception %s", e.toString());
        mDelivery.postError(request, new VolleyError(e));
      }
    }
  }