volley的RequestQueue 是整个模块的入口点。它在启动的时候回开启一个cache线程和若干个network线程;当业务层发出请求的时候,这些工作线程就会从相应的队列中取到请求,并执行请求,并把结果通知到ui线程。
public void start() {
stop(); // Make sure any currently running dispatchers are stopped.
// Create the cache dispatcher and start it.
mCacheDispatcher = new CacheDispatcher(mCacheQueue, mNetworkQueue, mCache, mDelivery);
mCacheDispatcher.start();
// Create network dispatchers (and corresponding threads) up to the pool size.
for (int i = 0; i < mDispatchers.length; i++) {
NetworkDispatcher networkDispatcher = new NetworkDispatcher(mNetworkQueue, mNetwork,
mCache, mDelivery);
mDispatchers[i] = networkDispatcher;
networkDispatcher.start();
}
}
之后业务成有请求的时候通过add方法将请求放入对应的请求队列中
public <T> Request<T> add(Request<T> request) {
// Tag the request as belonging to this queue and add it to the set of current requests.
request.setRequestQueue(this);
synchronized (mCurrentRequests) {
mCurrentRequests.add(request);
}
// Process requests in the order they are added.
request.setSequence(getSequenceNumber());
request.addMarker("add-to-queue");
// If the request is uncacheable, skip the cache queue and go straight to the network.
//如果不允许为缓存队列,则为网络队列
//默认缓存
if (!request.shouldCache()) {
mNetworkQueue.add(request);
return request;
}
// Insert request into stage if there's already a request with the same cache key in flight.
synchronized (mWaitingRequests) {
String cacheKey = request.getCacheKey();
if (mWaitingRequests.containsKey(cacheKey)) {
// There is already a request in flight. Queue up.
Queue<Request<?>> stagedRequests = mWaitingRequests.get(cacheKey);
if (stagedRequests == null) {
stagedRequests = new LinkedList<Request<?>>();
}
stagedRequests.add(request);
mWaitingRequests.put(cacheKey, stagedRequests);
if (VolleyLog.DEBUG) {
VolleyLog.v("Request for cacheKey=%s is in flight, putting on hold.", cacheKey);
}
} else {
// Insert 'null' queue for this cacheKey, indicating there is now a request in
// flight.
mWaitingRequests.put(cacheKey, null);
mCacheQueue.add(request);
}
return request;
}
}
在networkDispatcher中是一个while循环的线程,一直从请求队列中获取请求,请求队列是一个可阻塞的队列,直到获取一个请求之后,开始处理请求。
public void run() {
Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND);
Request<?> request;
while (true) {
try {
// Take a request from the queue.
// 获取request对象
request = mQueue.take();
} catch (InterruptedException e) {
// We may have been interrupted because it was time to quit.
if (mQuit) {
return;
}
continue;
}
try {
request.addMarker("network-queue-take");
// If the request was cancelled already, do not perform the
// network request.
if (request.isCanceled()) {
request.finish("network-discard-cancelled");
continue;
}
addTrafficStatsTag(request);
// Perform the network request.
// 访问网络,得到数据
NetworkResponse networkResponse = mNetwork
.performRequest(request);
request.addMarker("network-http-complete");
// If the server returned 304 AND we delivered a response
// already,
// we're done -- don't deliver a second identical response.
if (networkResponse.notModified
&& request.hasHadResponseDelivered()) {
request.finish("not-modified");
continue;
}
// Parse the response here on the worker thread.
Response<?> response = request
.parseNetworkResponse(networkResponse);
request.addMarker("network-parse-complete");
// 写入缓存
// Write to cache if applicable.
// TODO: Only update cache metadata instead of entire record for
// 304s.
if (request.shouldCache() && response.cacheEntry != null) {
mCache.put(request.getCacheKey(), response.cacheEntry);
request.addMarker("network-cache-written");
}
// Post the response back.
request.markDelivered();
mDelivery.postResponse(request, response);
} catch (VolleyError volleyError) {
parseAndDeliverNetworkError(request, volleyError);
} catch (Exception e) {
VolleyLog.e(e, "Unhandled exception %s", e.toString());
mDelivery.postError(request, new VolleyError(e));
}
}
}