1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "content/browser/loader/resource_scheduler.h"
9 #include "base/metrics/field_trial.h"
10 #include "base/metrics/histogram.h"
11 #include "base/stl_util.h"
12 #include "base/time/time.h"
13 #include "content/common/resource_messages.h"
14 #include "content/browser/loader/resource_message_delegate.h"
15 #include "content/public/browser/resource_controller.h"
16 #include "content/public/browser/resource_request_info.h"
17 #include "content/public/browser/resource_throttle.h"
18 #include "ipc/ipc_message_macros.h"
19 #include "net/base/host_port_pair.h"
20 #include "net/base/load_flags.h"
21 #include "net/base/request_priority.h"
22 #include "net/http/http_server_properties.h"
23 #include "net/url_request/url_request.h"
24 #include "net/url_request/url_request_context.h"
30 void PostHistogram(const char* base_name
,
32 base::TimeDelta time
) {
33 std::string histogram_name
=
34 base::StringPrintf("ResourceScheduler.%s.%s", base_name
, suffix
);
35 base::HistogramBase
* histogram_counter
= base::Histogram::FactoryTimeGet(
37 base::TimeDelta::FromMilliseconds(1),
38 base::TimeDelta::FromMinutes(5),
40 base::Histogram::kUmaTargetedHistogramFlag
);
41 histogram_counter
->AddTime(time
);
46 static const size_t kCoalescedTimerPeriod
= 5000;
47 static const size_t kMaxNumDelayableRequestsPerClient
= 10;
48 static const size_t kMaxNumDelayableRequestsPerHost
= 6;
49 static const size_t kMaxNumThrottledRequestsPerClient
= 1;
51 struct ResourceScheduler::RequestPriorityParams
{
52 RequestPriorityParams()
53 : priority(net::DEFAULT_PRIORITY
),
57 RequestPriorityParams(net::RequestPriority priority
, int intra_priority
)
59 intra_priority(intra_priority
) {
62 bool operator==(const RequestPriorityParams
& other
) const {
63 return (priority
== other
.priority
) &&
64 (intra_priority
== other
.intra_priority
);
67 bool operator!=(const RequestPriorityParams
& other
) const {
68 return !(*this == other
);
71 bool GreaterThan(const RequestPriorityParams
& other
) const {
72 if (priority
!= other
.priority
)
73 return priority
> other
.priority
;
74 return intra_priority
> other
.intra_priority
;
77 net::RequestPriority priority
;
81 class ResourceScheduler::RequestQueue
{
83 typedef std::multiset
<ScheduledResourceRequest
*, ScheduledResourceSorter
>
86 RequestQueue() : fifo_ordering_ids_(0) {}
89 // Adds |request| to the queue with given |priority|.
90 void Insert(ScheduledResourceRequest
* request
);
92 // Removes |request| from the queue.
93 void Erase(ScheduledResourceRequest
* request
) {
94 PointerMap::iterator it
= pointers_
.find(request
);
95 DCHECK(it
!= pointers_
.end());
96 if (it
== pointers_
.end())
98 queue_
.erase(it
->second
);
102 NetQueue::iterator
GetNextHighestIterator() {
103 return queue_
.begin();
106 NetQueue::iterator
End() {
110 // Returns true if |request| is queued.
111 bool IsQueued(ScheduledResourceRequest
* request
) const {
112 return ContainsKey(pointers_
, request
);
115 // Returns true if no requests are queued.
116 bool IsEmpty() const { return queue_
.size() == 0; }
119 typedef std::map
<ScheduledResourceRequest
*, NetQueue::iterator
> PointerMap
;
121 uint32
MakeFifoOrderingId() {
122 fifo_ordering_ids_
+= 1;
123 return fifo_ordering_ids_
;
126 // Used to create an ordering ID for scheduled resources so that resources
127 // with same priority/intra_priority stay in fifo order.
128 uint32 fifo_ordering_ids_
;
131 PointerMap pointers_
;
134 // This is the handle we return to the ResourceDispatcherHostImpl so it can
135 // interact with the request.
136 class ResourceScheduler::ScheduledResourceRequest
137 : public ResourceMessageDelegate
,
138 public ResourceThrottle
{
140 ScheduledResourceRequest(const ClientId
& client_id
,
141 net::URLRequest
* request
,
142 ResourceScheduler
* scheduler
,
143 const RequestPriorityParams
& priority
)
144 : ResourceMessageDelegate(request
),
145 client_id_(client_id
),
146 client_state_on_creation_(scheduler
->GetClientState(client_id_
)),
150 classification_(NORMAL_REQUEST
),
151 scheduler_(scheduler
),
154 TRACE_EVENT_ASYNC_BEGIN1("net", "URLRequest", request_
,
155 "url", request
->url().spec());
158 ~ScheduledResourceRequest() override
{ scheduler_
->RemoveRequest(this); }
161 TRACE_EVENT_ASYNC_STEP_PAST0("net", "URLRequest", request_
, "Queued");
163 if (!request_
->status().is_success())
165 base::TimeTicks time
= base::TimeTicks::Now();
166 ClientState current_state
= scheduler_
->GetClientState(client_id_
);
167 // Note: the client state isn't perfectly accurate since it won't capture
168 // tabs which have switched between active and background multiple times.
169 // Ex: A tab with the following transitions Active -> Background -> Active
170 // will be recorded as Active.
171 const char* client_state
= "Other";
172 if (current_state
== client_state_on_creation_
&& current_state
== ACTIVE
) {
173 client_state
= "Active";
174 } else if (current_state
== client_state_on_creation_
&&
175 current_state
== BACKGROUND
) {
176 client_state
= "Background";
179 base::TimeDelta time_was_deferred
= base::TimeDelta::FromMicroseconds(0);
182 controller()->Resume();
183 time_was_deferred
= time
- time_deferred_
;
185 PostHistogram("RequestTimeDeferred", client_state
, time_was_deferred
);
187 "RequestTimeThrottled", client_state
, time
- request_
->creation_time());
188 // TODO(aiolos): Remove one of the above histograms after gaining an
189 // understanding of the difference between them and which one is more
193 void set_request_priority_params(const RequestPriorityParams
& priority
) {
194 priority_
= priority
;
196 const RequestPriorityParams
& get_request_priority_params() const {
199 const ClientId
& client_id() const { return client_id_
; }
200 net::URLRequest
* url_request() { return request_
; }
201 const net::URLRequest
* url_request() const { return request_
; }
202 uint32
fifo_ordering() const { return fifo_ordering_
; }
203 void set_fifo_ordering(uint32 fifo_ordering
) {
204 fifo_ordering_
= fifo_ordering
;
206 RequestClassification
classification() const {
207 return classification_
;
209 void set_classification(RequestClassification classification
) {
210 classification_
= classification
;
214 // ResourceMessageDelegate interface:
215 bool OnMessageReceived(const IPC::Message
& message
) override
{
217 IPC_BEGIN_MESSAGE_MAP(ScheduledResourceRequest
, message
)
218 IPC_MESSAGE_HANDLER(ResourceHostMsg_DidChangePriority
, DidChangePriority
)
219 IPC_MESSAGE_UNHANDLED(handled
= false)
220 IPC_END_MESSAGE_MAP()
224 // ResourceThrottle interface:
225 void WillStartRequest(bool* defer
) override
{
226 deferred_
= *defer
= !ready_
;
227 time_deferred_
= base::TimeTicks::Now();
230 const char* GetNameForLogging() const override
{ return "ResourceScheduler"; }
232 void DidChangePriority(int request_id
, net::RequestPriority new_priority
,
233 int intra_priority_value
) {
234 scheduler_
->ReprioritizeRequest(this, new_priority
, intra_priority_value
);
237 const ClientId client_id_
;
238 const ResourceScheduler::ClientState client_state_on_creation_
;
239 net::URLRequest
* request_
;
242 RequestClassification classification_
;
243 ResourceScheduler
* scheduler_
;
244 RequestPriorityParams priority_
;
245 uint32 fifo_ordering_
;
246 base::TimeTicks time_deferred_
;
248 DISALLOW_COPY_AND_ASSIGN(ScheduledResourceRequest
);
251 bool ResourceScheduler::ScheduledResourceSorter::operator()(
252 const ScheduledResourceRequest
* a
,
253 const ScheduledResourceRequest
* b
) const {
254 // Want the set to be ordered first by decreasing priority, then by
255 // decreasing intra_priority.
256 // ie. with (priority, intra_priority)
257 // [(1, 0), (1, 0), (0, 100), (0, 0)]
258 if (a
->get_request_priority_params() != b
->get_request_priority_params())
259 return a
->get_request_priority_params().GreaterThan(
260 b
->get_request_priority_params());
262 // If priority/intra_priority is the same, fall back to fifo ordering.
263 // std::multiset doesn't guarantee this until c++11.
264 return a
->fifo_ordering() < b
->fifo_ordering();
267 void ResourceScheduler::RequestQueue::Insert(
268 ScheduledResourceRequest
* request
) {
269 DCHECK(!ContainsKey(pointers_
, request
));
270 request
->set_fifo_ordering(MakeFifoOrderingId());
271 pointers_
[request
] = queue_
.insert(request
);
274 // Each client represents a tab.
275 class ResourceScheduler::Client
{
277 explicit Client(ResourceScheduler
* scheduler
,
280 : is_audible_(is_audible
),
281 is_visible_(is_visible
),
285 using_spdy_proxy_(false),
286 in_flight_delayable_count_(0),
287 total_layout_blocking_count_(0),
288 throttle_state_(ResourceScheduler::THROTTLED
) {
289 scheduler_
= scheduler
;
293 // Update to default state and pause to ensure the scheduler has a
294 // correct count of relevant types of clients.
298 UpdateThrottleState();
301 void ScheduleRequest(
302 net::URLRequest
* url_request
,
303 ScheduledResourceRequest
* request
) {
304 if (ShouldStartRequest(request
) == START_REQUEST
)
305 StartRequest(request
);
307 pending_requests_
.Insert(request
);
308 SetRequestClassification(request
, ClassifyRequest(request
));
311 void RemoveRequest(ScheduledResourceRequest
* request
) {
312 if (pending_requests_
.IsQueued(request
)) {
313 pending_requests_
.Erase(request
);
314 DCHECK(!ContainsKey(in_flight_requests_
, request
));
316 EraseInFlightRequest(request
);
318 // Removing this request may have freed up another to load.
319 LoadAnyStartablePendingRequests();
323 RequestSet
RemoveAllRequests() {
324 RequestSet unowned_requests
;
325 for (RequestSet::iterator it
= in_flight_requests_
.begin();
326 it
!= in_flight_requests_
.end(); ++it
) {
327 unowned_requests
.insert(*it
);
328 (*it
)->set_classification(NORMAL_REQUEST
);
330 ClearInFlightRequests();
331 return unowned_requests
;
334 bool is_active() const { return is_visible_
|| is_audible_
; }
336 bool is_loaded() const { return is_loaded_
; }
338 bool is_visible() const { return is_visible_
; }
340 void OnAudibilityChanged(bool is_audible
) {
341 if (is_audible
== is_audible_
) {
344 is_audible_
= is_audible
;
345 UpdateThrottleState();
348 void OnVisibilityChanged(bool is_visible
) {
349 if (is_visible
== is_visible_
) {
352 is_visible_
= is_visible
;
353 UpdateThrottleState();
356 void OnLoadingStateChanged(bool is_loaded
) {
357 if (is_loaded
== is_loaded_
) {
360 is_loaded_
= is_loaded
;
361 UpdateThrottleState();
366 UpdateThrottleState();
369 void UpdateThrottleState() {
370 ClientThrottleState old_throttle_state
= throttle_state_
;
371 if (!scheduler_
->should_throttle()) {
372 SetThrottleState(UNTHROTTLED
);
373 } else if (is_active() && !is_loaded_
) {
374 SetThrottleState(ACTIVE_AND_LOADING
);
375 } else if (is_active()) {
376 SetThrottleState(UNTHROTTLED
);
377 } else if (is_paused_
) {
378 SetThrottleState(PAUSED
);
379 } else if (!scheduler_
->active_clients_loaded()) {
380 SetThrottleState(THROTTLED
);
381 } else if (is_loaded_
&& scheduler_
->should_coalesce()) {
382 SetThrottleState(COALESCED
);
383 } else if (!is_active()) {
384 SetThrottleState(UNTHROTTLED
);
387 if (throttle_state_
== old_throttle_state
) {
390 if (throttle_state_
== ACTIVE_AND_LOADING
) {
391 scheduler_
->IncrementActiveClientsLoading();
392 } else if (old_throttle_state
== ACTIVE_AND_LOADING
) {
393 scheduler_
->DecrementActiveClientsLoading();
395 if (throttle_state_
== COALESCED
) {
396 scheduler_
->IncrementCoalescedClients();
397 } else if (old_throttle_state
== COALESCED
) {
398 scheduler_
->DecrementCoalescedClients();
407 void OnWillInsertBody() {
409 LoadAnyStartablePendingRequests();
412 void OnReceivedSpdyProxiedHttpResponse() {
413 if (!using_spdy_proxy_
) {
414 using_spdy_proxy_
= true;
415 LoadAnyStartablePendingRequests();
419 void ReprioritizeRequest(ScheduledResourceRequest
* request
,
420 RequestPriorityParams old_priority_params
,
421 RequestPriorityParams new_priority_params
) {
422 request
->url_request()->SetPriority(new_priority_params
.priority
);
423 request
->set_request_priority_params(new_priority_params
);
424 if (!pending_requests_
.IsQueued(request
)) {
425 DCHECK(ContainsKey(in_flight_requests_
, request
));
426 // The priority and SPDY support may have changed, so update the
428 SetRequestClassification(request
, ClassifyRequest(request
));
429 // Request has already started.
433 pending_requests_
.Erase(request
);
434 pending_requests_
.Insert(request
);
436 if (new_priority_params
.priority
> old_priority_params
.priority
) {
437 // Check if this request is now able to load at its new priority.
438 LoadAnyStartablePendingRequests();
442 // Called on Client creation, when a Client changes user observability,
443 // possibly when all observable Clients have finished loading, and
444 // possibly when this Client has finished loading.
446 // Client became observable.
447 // any state -> UNTHROTTLED
448 // Client is unobservable, but all observable clients finished loading.
449 // THROTTLED -> UNTHROTTLED
450 // Non-observable client finished loading.
451 // THROTTLED || UNTHROTTLED -> COALESCED
452 // Non-observable client, an observable client starts loading.
453 // COALESCED -> THROTTLED
454 // A COALESCED client will transition into UNTHROTTLED when the network is
455 // woken up by a heartbeat and then transition back into COALESCED.
456 void SetThrottleState(ResourceScheduler::ClientThrottleState throttle_state
) {
457 if (throttle_state
== throttle_state_
) {
460 throttle_state_
= throttle_state
;
461 if (throttle_state_
!= PAUSED
) {
464 LoadAnyStartablePendingRequests();
465 // TODO(aiolos): Stop any started but not inflght requests when
466 // switching to stricter throttle state?
469 ResourceScheduler::ClientThrottleState
throttle_state() const {
470 return throttle_state_
;
473 void LoadCoalescedRequests() {
474 if (throttle_state_
!= COALESCED
) {
477 if (scheduler_
->active_clients_loaded()) {
478 SetThrottleState(UNTHROTTLED
);
480 SetThrottleState(THROTTLED
);
482 LoadAnyStartablePendingRequests();
483 SetThrottleState(COALESCED
);
487 enum ShouldStartReqResult
{
488 DO_NOT_START_REQUEST_AND_STOP_SEARCHING
,
489 DO_NOT_START_REQUEST_AND_KEEP_SEARCHING
,
493 void InsertInFlightRequest(ScheduledResourceRequest
* request
) {
494 in_flight_requests_
.insert(request
);
495 SetRequestClassification(request
, ClassifyRequest(request
));
498 void EraseInFlightRequest(ScheduledResourceRequest
* request
) {
499 size_t erased
= in_flight_requests_
.erase(request
);
500 DCHECK_EQ(1u, erased
);
501 // Clear any special state that we were tracking for this request.
502 SetRequestClassification(request
, NORMAL_REQUEST
);
505 void ClearInFlightRequests() {
506 in_flight_requests_
.clear();
507 in_flight_delayable_count_
= 0;
508 total_layout_blocking_count_
= 0;
511 size_t CountRequestsWithClassification(
512 const RequestClassification classification
, const bool include_pending
) {
513 size_t classification_request_count
= 0;
514 for (RequestSet::const_iterator it
= in_flight_requests_
.begin();
515 it
!= in_flight_requests_
.end(); ++it
) {
516 if ((*it
)->classification() == classification
)
517 classification_request_count
++;
519 if (include_pending
) {
520 for (RequestQueue::NetQueue::const_iterator
521 it
= pending_requests_
.GetNextHighestIterator();
522 it
!= pending_requests_
.End(); ++it
) {
523 if ((*it
)->classification() == classification
)
524 classification_request_count
++;
527 return classification_request_count
;
530 void SetRequestClassification(ScheduledResourceRequest
* request
,
531 RequestClassification classification
) {
532 RequestClassification old_classification
= request
->classification();
533 if (old_classification
== classification
)
536 if (old_classification
== IN_FLIGHT_DELAYABLE_REQUEST
)
537 in_flight_delayable_count_
--;
538 if (old_classification
== LAYOUT_BLOCKING_REQUEST
)
539 total_layout_blocking_count_
--;
541 if (classification
== IN_FLIGHT_DELAYABLE_REQUEST
)
542 in_flight_delayable_count_
++;
543 if (classification
== LAYOUT_BLOCKING_REQUEST
)
544 total_layout_blocking_count_
++;
546 request
->set_classification(classification
);
548 CountRequestsWithClassification(IN_FLIGHT_DELAYABLE_REQUEST
, false),
549 in_flight_delayable_count_
);
550 DCHECK_EQ(CountRequestsWithClassification(LAYOUT_BLOCKING_REQUEST
, true),
551 total_layout_blocking_count_
);
554 RequestClassification
ClassifyRequest(ScheduledResourceRequest
* request
) {
555 // If a request is already marked as layout-blocking make sure to keep the
556 // classification across redirects unless the priority was lowered.
557 if (request
->classification() == LAYOUT_BLOCKING_REQUEST
&&
558 request
->url_request()->priority() > net::LOW
) {
559 return LAYOUT_BLOCKING_REQUEST
;
562 if (!has_body_
&& request
->url_request()->priority() > net::LOW
)
563 return LAYOUT_BLOCKING_REQUEST
;
565 if (request
->url_request()->priority() < net::LOW
) {
566 net::HostPortPair host_port_pair
=
567 net::HostPortPair::FromURL(request
->url_request()->url());
568 net::HttpServerProperties
& http_server_properties
=
569 *request
->url_request()->context()->http_server_properties();
570 if (!http_server_properties
.SupportsSpdy(host_port_pair
) &&
571 ContainsKey(in_flight_requests_
, request
)) {
572 return IN_FLIGHT_DELAYABLE_REQUEST
;
575 return NORMAL_REQUEST
;
578 bool ShouldKeepSearching(
579 const net::HostPortPair
& active_request_host
) const {
580 size_t same_host_count
= 0;
581 for (RequestSet::const_iterator it
= in_flight_requests_
.begin();
582 it
!= in_flight_requests_
.end(); ++it
) {
583 net::HostPortPair host_port_pair
=
584 net::HostPortPair::FromURL((*it
)->url_request()->url());
585 if (active_request_host
.Equals(host_port_pair
)) {
587 if (same_host_count
>= kMaxNumDelayableRequestsPerHost
)
594 void StartRequest(ScheduledResourceRequest
* request
) {
595 InsertInFlightRequest(request
);
599 // ShouldStartRequest is the main scheduling algorithm.
601 // Requests are evaluated on five attributes:
603 // 1. Non-delayable requests:
604 // * Synchronous requests.
605 // * Non-HTTP[S] requests.
607 // 2. Requests to SPDY-capable origin servers.
609 // 3. High-priority requests:
610 // * Higher priority requests (>= net::LOW).
612 // 4. Layout-blocking requests:
613 // * High-priority requests (> net::LOW) initiated before the renderer has
616 // 5. Low priority requests
618 // The following rules are followed:
620 // ACTIVE_AND_LOADING and UNTHROTTLED Clients follow these rules:
621 // * Non-delayable, High-priority and SPDY capable requests are issued
623 // * Low priority requests are delayable.
624 // * Allow one delayable request to load at a time while layout-blocking
625 // requests are loading or the body tag has not yet been parsed.
626 // * If no high priority or layout-blocking requests are in flight, start
627 // loading delayable requests.
628 // * Never exceed 10 delayable requests in flight per client.
629 // * Never exceed 6 delayable requests for a given host.
631 // THROTTLED Clients follow these rules:
632 // * Non-delayable and SPDY-capable requests are issued immediately.
633 // * At most one non-SPDY request will be issued per THROTTLED Client
634 // * If no high priority requests are in flight, start loading low priority
637 // COALESCED Clients never load requests, with the following exceptions:
638 // * Non-delayable requests are issued imediately.
639 // * On a (currently 5 second) heart beat, they load all requests as an
640 // UNTHROTTLED Client, and then return to the COALESCED state.
641 // * When an active Client makes a request, they are THROTTLED until the
642 // active Client finishes loading.
643 ShouldStartReqResult
ShouldStartRequest(
644 ScheduledResourceRequest
* request
) const {
645 const net::URLRequest
& url_request
= *request
->url_request();
646 // Syncronous requests could block the entire render, which could impact
647 // user-observable Clients.
648 if (!ResourceRequestInfo::ForRequest(&url_request
)->IsAsync()) {
649 return START_REQUEST
;
652 // TODO(simonjam): This may end up causing disk contention. We should
653 // experiment with throttling if that happens.
654 // TODO(aiolos): We probably want to Coalesce these as well to avoid
656 if (!url_request
.url().SchemeIsHTTPOrHTTPS()) {
657 return START_REQUEST
;
660 if (throttle_state_
== COALESCED
) {
661 return DO_NOT_START_REQUEST_AND_STOP_SEARCHING
;
664 if (using_spdy_proxy_
&& url_request
.url().SchemeIs(url::kHttpScheme
)) {
665 return START_REQUEST
;
668 net::HostPortPair host_port_pair
=
669 net::HostPortPair::FromURL(url_request
.url());
670 net::HttpServerProperties
& http_server_properties
=
671 *url_request
.context()->http_server_properties();
673 // TODO(willchan): We should really improve this algorithm as described in
674 // crbug.com/164101. Also, theoretically we should not count a SPDY request
675 // against the delayable requests limit.
676 if (http_server_properties
.SupportsSpdy(host_port_pair
)) {
677 return START_REQUEST
;
680 if (throttle_state_
== THROTTLED
&&
681 in_flight_requests_
.size() >= kMaxNumThrottledRequestsPerClient
) {
682 // There may still be SPDY-capable requests that should be issued.
683 return DO_NOT_START_REQUEST_AND_KEEP_SEARCHING
;
686 // High-priority and layout-blocking requests.
687 if (url_request
.priority() >= net::LOW
) {
688 return START_REQUEST
;
691 if (in_flight_delayable_count_
>= kMaxNumDelayableRequestsPerClient
) {
692 return DO_NOT_START_REQUEST_AND_STOP_SEARCHING
;
695 if (ShouldKeepSearching(host_port_pair
)) {
696 // There may be other requests for other hosts we'd allow,
698 return DO_NOT_START_REQUEST_AND_KEEP_SEARCHING
;
701 bool have_immediate_requests_in_flight
=
702 in_flight_requests_
.size() > in_flight_delayable_count_
;
703 if (have_immediate_requests_in_flight
&&
704 (!has_body_
|| total_layout_blocking_count_
!= 0) &&
705 in_flight_delayable_count_
!= 0) {
706 return DO_NOT_START_REQUEST_AND_STOP_SEARCHING
;
709 return START_REQUEST
;
712 void LoadAnyStartablePendingRequests() {
713 // We iterate through all the pending requests, starting with the highest
714 // priority one. For each entry, one of three things can happen:
715 // 1) We start the request, remove it from the list, and keep checking.
716 // 2) We do NOT start the request, but ShouldStartRequest() signals us that
717 // there may be room for other requests, so we keep checking and leave
718 // the previous request still in the list.
719 // 3) We do not start the request, same as above, but StartRequest() tells
720 // us there's no point in checking any further requests.
721 RequestQueue::NetQueue::iterator request_iter
=
722 pending_requests_
.GetNextHighestIterator();
724 while (request_iter
!= pending_requests_
.End()) {
725 ScheduledResourceRequest
* request
= *request_iter
;
726 ShouldStartReqResult query_result
= ShouldStartRequest(request
);
728 if (query_result
== START_REQUEST
) {
729 pending_requests_
.Erase(request
);
730 StartRequest(request
);
732 // StartRequest can modify the pending list, so we (re)start evaluation
733 // from the currently highest priority request. Avoid copying a singular
734 // iterator, which would trigger undefined behavior.
735 if (pending_requests_
.GetNextHighestIterator() ==
736 pending_requests_
.End())
738 request_iter
= pending_requests_
.GetNextHighestIterator();
739 } else if (query_result
== DO_NOT_START_REQUEST_AND_KEEP_SEARCHING
) {
743 DCHECK(query_result
== DO_NOT_START_REQUEST_AND_STOP_SEARCHING
);
754 bool using_spdy_proxy_
;
755 RequestQueue pending_requests_
;
756 RequestSet in_flight_requests_
;
757 ResourceScheduler
* scheduler_
;
758 // The number of delayable in-flight requests.
759 size_t in_flight_delayable_count_
;
760 // The number of layout-blocking in-flight requests.
761 size_t total_layout_blocking_count_
;
762 ResourceScheduler::ClientThrottleState throttle_state_
;
765 ResourceScheduler::ResourceScheduler()
766 : should_coalesce_(false),
767 should_throttle_(false),
768 active_clients_loading_(0),
769 coalesced_clients_(0),
770 coalescing_timer_(new base::Timer(true /* retain_user_task */,
771 true /* is_repeating */)) {
772 std::string throttling_trial_group
=
773 base::FieldTrialList::FindFullName("RequestThrottlingAndCoalescing");
774 if (throttling_trial_group
== "Throttle") {
775 should_throttle_
= true;
776 } else if (throttling_trial_group
== "Coalesce") {
777 should_coalesce_
= true;
778 should_throttle_
= true;
782 ResourceScheduler::~ResourceScheduler() {
783 DCHECK(unowned_requests_
.empty());
784 DCHECK(client_map_
.empty());
787 void ResourceScheduler::SetThrottleOptionsForTesting(bool should_throttle
,
788 bool should_coalesce
) {
789 should_coalesce_
= should_coalesce
;
790 should_throttle_
= should_throttle
;
791 OnLoadingActiveClientsStateChangedForAllClients();
794 ResourceScheduler::ClientThrottleState
795 ResourceScheduler::GetClientStateForTesting(int child_id
, int route_id
) {
796 Client
* client
= GetClient(child_id
, route_id
);
798 return client
->throttle_state();
801 scoped_ptr
<ResourceThrottle
> ResourceScheduler::ScheduleRequest(
804 net::URLRequest
* url_request
) {
805 DCHECK(CalledOnValidThread());
806 ClientId client_id
= MakeClientId(child_id
, route_id
);
807 scoped_ptr
<ScheduledResourceRequest
> request(new ScheduledResourceRequest(
811 RequestPriorityParams(url_request
->priority(), 0)));
813 ClientMap::iterator it
= client_map_
.find(client_id
);
814 if (it
== client_map_
.end()) {
815 // There are several ways this could happen:
816 // 1. <a ping> requests don't have a route_id.
817 // 2. Most unittests don't send the IPCs needed to register Clients.
818 // 3. The tab is closed while a RequestResource IPC is in flight.
819 unowned_requests_
.insert(request
.get());
821 return request
.Pass();
824 Client
* client
= it
->second
;
825 client
->ScheduleRequest(url_request
, request
.get());
826 return request
.Pass();
829 void ResourceScheduler::RemoveRequest(ScheduledResourceRequest
* request
) {
830 DCHECK(CalledOnValidThread());
831 if (ContainsKey(unowned_requests_
, request
)) {
832 unowned_requests_
.erase(request
);
836 ClientMap::iterator client_it
= client_map_
.find(request
->client_id());
837 if (client_it
== client_map_
.end()) {
841 Client
* client
= client_it
->second
;
842 client
->RemoveRequest(request
);
845 void ResourceScheduler::OnClientCreated(int child_id
,
849 DCHECK(CalledOnValidThread());
850 ClientId client_id
= MakeClientId(child_id
, route_id
);
851 DCHECK(!ContainsKey(client_map_
, client_id
));
853 Client
* client
= new Client(this, is_visible
, is_audible
);
854 client_map_
[client_id
] = client
;
856 // TODO(aiolos): set Client visibility/audibility when signals are added
857 // this will UNTHROTTLE Clients as needed
858 client
->UpdateThrottleState();
861 void ResourceScheduler::OnClientDeleted(int child_id
, int route_id
) {
862 DCHECK(CalledOnValidThread());
863 ClientId client_id
= MakeClientId(child_id
, route_id
);
864 DCHECK(ContainsKey(client_map_
, client_id
));
865 ClientMap::iterator it
= client_map_
.find(client_id
);
866 if (it
== client_map_
.end())
869 Client
* client
= it
->second
;
870 // FYI, ResourceDispatcherHost cancels all of the requests after this function
871 // is called. It should end up canceling all of the requests except for a
872 // cross-renderer navigation.
873 RequestSet client_unowned_requests
= client
->RemoveAllRequests();
874 for (RequestSet::iterator it
= client_unowned_requests
.begin();
875 it
!= client_unowned_requests
.end(); ++it
) {
876 unowned_requests_
.insert(*it
);
880 client_map_
.erase(it
);
883 void ResourceScheduler::OnLoadingStateChanged(int child_id
,
886 Client
* client
= GetClient(child_id
, route_id
);
888 client
->OnLoadingStateChanged(is_loaded
);
891 void ResourceScheduler::OnVisibilityChanged(int child_id
,
894 Client
* client
= GetClient(child_id
, route_id
);
896 client
->OnVisibilityChanged(is_visible
);
899 void ResourceScheduler::OnAudibilityChanged(int child_id
,
902 Client
* client
= GetClient(child_id
, route_id
);
903 // We might get this call after the client has been deleted.
905 client
->OnAudibilityChanged(is_audible
);
908 void ResourceScheduler::OnNavigate(int child_id
, int route_id
) {
909 DCHECK(CalledOnValidThread());
910 ClientId client_id
= MakeClientId(child_id
, route_id
);
912 ClientMap::iterator it
= client_map_
.find(client_id
);
913 if (it
== client_map_
.end()) {
914 // The client was likely deleted shortly before we received this IPC.
918 Client
* client
= it
->second
;
919 client
->OnNavigate();
922 void ResourceScheduler::OnWillInsertBody(int child_id
, int route_id
) {
923 DCHECK(CalledOnValidThread());
924 ClientId client_id
= MakeClientId(child_id
, route_id
);
926 ClientMap::iterator it
= client_map_
.find(client_id
);
927 if (it
== client_map_
.end()) {
928 // The client was likely deleted shortly before we received this IPC.
932 Client
* client
= it
->second
;
933 client
->OnWillInsertBody();
936 void ResourceScheduler::OnReceivedSpdyProxiedHttpResponse(
939 DCHECK(CalledOnValidThread());
940 ClientId client_id
= MakeClientId(child_id
, route_id
);
942 ClientMap::iterator client_it
= client_map_
.find(client_id
);
943 if (client_it
== client_map_
.end()) {
947 Client
* client
= client_it
->second
;
948 client
->OnReceivedSpdyProxiedHttpResponse();
951 bool ResourceScheduler::IsClientVisibleForTesting(int child_id
, int route_id
) {
952 Client
* client
= GetClient(child_id
, route_id
);
954 return client
->is_visible();
957 ResourceScheduler::Client
* ResourceScheduler::GetClient(int child_id
,
959 ClientId client_id
= MakeClientId(child_id
, route_id
);
960 ClientMap::iterator client_it
= client_map_
.find(client_id
);
961 if (client_it
== client_map_
.end()) {
964 return client_it
->second
;
967 void ResourceScheduler::DecrementActiveClientsLoading() {
968 DCHECK_NE(0u, active_clients_loading_
);
969 --active_clients_loading_
;
970 DCHECK_EQ(active_clients_loading_
, CountActiveClientsLoading());
971 if (active_clients_loading_
== 0) {
972 OnLoadingActiveClientsStateChangedForAllClients();
976 void ResourceScheduler::IncrementActiveClientsLoading() {
977 ++active_clients_loading_
;
978 DCHECK_EQ(active_clients_loading_
, CountActiveClientsLoading());
979 if (active_clients_loading_
== 1) {
980 OnLoadingActiveClientsStateChangedForAllClients();
984 void ResourceScheduler::OnLoadingActiveClientsStateChangedForAllClients() {
985 ClientMap::iterator client_it
= client_map_
.begin();
986 while (client_it
!= client_map_
.end()) {
987 Client
* client
= client_it
->second
;
988 client
->UpdateThrottleState();
993 size_t ResourceScheduler::CountActiveClientsLoading() const {
994 size_t active_and_loading
= 0;
995 ClientMap::const_iterator client_it
= client_map_
.begin();
996 while (client_it
!= client_map_
.end()) {
997 Client
* client
= client_it
->second
;
998 if (client
->throttle_state() == ACTIVE_AND_LOADING
) {
999 ++active_and_loading
;
1003 return active_and_loading
;
1006 void ResourceScheduler::IncrementCoalescedClients() {
1007 ++coalesced_clients_
;
1008 DCHECK(should_coalesce_
);
1009 DCHECK_EQ(coalesced_clients_
, CountCoalescedClients());
1010 if (coalesced_clients_
== 1) {
1011 coalescing_timer_
->Start(
1013 base::TimeDelta::FromMilliseconds(kCoalescedTimerPeriod
),
1014 base::Bind(&ResourceScheduler::LoadCoalescedRequests
,
1015 base::Unretained(this)));
1019 void ResourceScheduler::DecrementCoalescedClients() {
1020 DCHECK(should_coalesce_
);
1021 DCHECK_NE(0U, coalesced_clients_
);
1022 --coalesced_clients_
;
1023 DCHECK_EQ(coalesced_clients_
, CountCoalescedClients());
1024 if (coalesced_clients_
== 0) {
1025 coalescing_timer_
->Stop();
1029 size_t ResourceScheduler::CountCoalescedClients() const {
1030 DCHECK(should_coalesce_
);
1031 size_t coalesced_clients
= 0;
1032 ClientMap::const_iterator client_it
= client_map_
.begin();
1033 while (client_it
!= client_map_
.end()) {
1034 Client
* client
= client_it
->second
;
1035 if (client
->throttle_state() == COALESCED
) {
1036 ++coalesced_clients
;
1040 return coalesced_clients_
;
1043 void ResourceScheduler::LoadCoalescedRequests() {
1044 DCHECK(should_coalesce_
);
1045 ClientMap::iterator client_it
= client_map_
.begin();
1046 while (client_it
!= client_map_
.end()) {
1047 Client
* client
= client_it
->second
;
1048 client
->LoadCoalescedRequests();
1053 ResourceScheduler::ClientState
ResourceScheduler::GetClientState(
1054 ClientId client_id
) const {
1055 ClientMap::const_iterator client_it
= client_map_
.find(client_id
);
1056 if (client_it
== client_map_
.end())
1058 return client_it
->second
->is_active() ? ACTIVE
: BACKGROUND
;
1061 void ResourceScheduler::ReprioritizeRequest(ScheduledResourceRequest
* request
,
1062 net::RequestPriority new_priority
,
1063 int new_intra_priority_value
) {
1064 if (request
->url_request()->load_flags() & net::LOAD_IGNORE_LIMITS
) {
1065 // We should not be re-prioritizing requests with the
1066 // IGNORE_LIMITS flag.
1070 RequestPriorityParams
new_priority_params(new_priority
,
1071 new_intra_priority_value
);
1072 RequestPriorityParams old_priority_params
=
1073 request
->get_request_priority_params();
1075 DCHECK(old_priority_params
!= new_priority_params
);
1077 ClientMap::iterator client_it
= client_map_
.find(request
->client_id());
1078 if (client_it
== client_map_
.end()) {
1079 // The client was likely deleted shortly before we received this IPC.
1080 request
->url_request()->SetPriority(new_priority_params
.priority
);
1081 request
->set_request_priority_params(new_priority_params
);
1085 if (old_priority_params
== new_priority_params
)
1088 Client
*client
= client_it
->second
;
1089 client
->ReprioritizeRequest(
1090 request
, old_priority_params
, new_priority_params
);
1093 ResourceScheduler::ClientId
ResourceScheduler::MakeClientId(
1094 int child_id
, int route_id
) {
1095 return (static_cast<ResourceScheduler::ClientId
>(child_id
) << 32) | route_id
;
1098 } // namespace content