1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "content/browser/loader/resource_scheduler.h"
9 #include "base/metrics/field_trial.h"
10 #include "base/metrics/histogram.h"
11 #include "base/stl_util.h"
12 #include "base/time/time.h"
13 #include "content/common/resource_messages.h"
14 #include "content/browser/loader/resource_message_delegate.h"
15 #include "content/public/browser/resource_controller.h"
16 #include "content/public/browser/resource_request_info.h"
17 #include "content/public/browser/resource_throttle.h"
18 #include "ipc/ipc_message_macros.h"
19 #include "net/base/host_port_pair.h"
20 #include "net/base/load_flags.h"
21 #include "net/base/request_priority.h"
22 #include "net/http/http_server_properties.h"
23 #include "net/url_request/url_request.h"
24 #include "net/url_request/url_request_context.h"
30 // Post ResourceScheduler histograms of the following forms:
31 // If |histogram_suffix| is NULL or the empty string:
32 // ResourceScheduler.base_name.histogram_name
34 // ResourceScheduler.base_name.histogram_name.histogram_suffix
35 void PostHistogram(const char* base_name
,
36 const char* histogram_name
,
37 const char* histogram_suffix
,
38 base::TimeDelta time
) {
39 std::string histogram
=
40 base::StringPrintf("ResourceScheduler.%s.%s", base_name
, histogram_name
);
41 if (histogram_suffix
&& histogram_suffix
[0] != '\0')
42 histogram
= histogram
+ "." + histogram_suffix
;
43 base::HistogramBase
* histogram_counter
= base::Histogram::FactoryTimeGet(
44 histogram
, base::TimeDelta::FromMilliseconds(1),
45 base::TimeDelta::FromMinutes(5), 50,
46 base::Histogram::kUmaTargetedHistogramFlag
);
47 histogram_counter
->AddTime(time
);
50 // For use with PostHistogram to specify the correct string for histogram
51 // suffixes based on number of Clients.
52 const char* GetNumClientsString(size_t num_clients
) {
55 else if (num_clients
<= 5)
57 else if (num_clients
<= 15)
58 return "Max15Clients";
59 else if (num_clients
<= 30)
60 return "Max30Clients";
61 return "Over30Clients";
66 static const size_t kCoalescedTimerPeriod
= 5000;
67 static const size_t kMaxNumDelayableRequestsPerClient
= 10;
68 static const size_t kMaxNumDelayableRequestsPerHost
= 6;
69 static const size_t kMaxNumThrottledRequestsPerClient
= 1;
71 struct ResourceScheduler::RequestPriorityParams
{
72 RequestPriorityParams()
73 : priority(net::DEFAULT_PRIORITY
),
77 RequestPriorityParams(net::RequestPriority priority
, int intra_priority
)
79 intra_priority(intra_priority
) {
82 bool operator==(const RequestPriorityParams
& other
) const {
83 return (priority
== other
.priority
) &&
84 (intra_priority
== other
.intra_priority
);
87 bool operator!=(const RequestPriorityParams
& other
) const {
88 return !(*this == other
);
91 bool GreaterThan(const RequestPriorityParams
& other
) const {
92 if (priority
!= other
.priority
)
93 return priority
> other
.priority
;
94 return intra_priority
> other
.intra_priority
;
97 net::RequestPriority priority
;
101 class ResourceScheduler::RequestQueue
{
103 typedef std::multiset
<ScheduledResourceRequest
*, ScheduledResourceSorter
>
106 RequestQueue() : fifo_ordering_ids_(0) {}
109 // Adds |request| to the queue with given |priority|.
110 void Insert(ScheduledResourceRequest
* request
);
112 // Removes |request| from the queue.
113 void Erase(ScheduledResourceRequest
* request
) {
114 PointerMap::iterator it
= pointers_
.find(request
);
115 DCHECK(it
!= pointers_
.end());
116 if (it
== pointers_
.end())
118 queue_
.erase(it
->second
);
122 NetQueue::iterator
GetNextHighestIterator() {
123 return queue_
.begin();
126 NetQueue::iterator
End() {
130 // Returns true if |request| is queued.
131 bool IsQueued(ScheduledResourceRequest
* request
) const {
132 return ContainsKey(pointers_
, request
);
135 // Returns true if no requests are queued.
136 bool IsEmpty() const { return queue_
.size() == 0; }
139 typedef std::map
<ScheduledResourceRequest
*, NetQueue::iterator
> PointerMap
;
141 uint32
MakeFifoOrderingId() {
142 fifo_ordering_ids_
+= 1;
143 return fifo_ordering_ids_
;
146 // Used to create an ordering ID for scheduled resources so that resources
147 // with same priority/intra_priority stay in fifo order.
148 uint32 fifo_ordering_ids_
;
151 PointerMap pointers_
;
154 // This is the handle we return to the ResourceDispatcherHostImpl so it can
155 // interact with the request.
156 class ResourceScheduler::ScheduledResourceRequest
157 : public ResourceMessageDelegate
,
158 public ResourceThrottle
{
160 ScheduledResourceRequest(const ClientId
& client_id
,
161 net::URLRequest
* request
,
162 ResourceScheduler
* scheduler
,
163 const RequestPriorityParams
& priority
)
164 : ResourceMessageDelegate(request
),
165 client_id_(client_id
),
166 client_state_on_creation_(scheduler
->GetClientState(client_id_
)),
170 classification_(NORMAL_REQUEST
),
171 scheduler_(scheduler
),
176 ~ScheduledResourceRequest() override
{ scheduler_
->RemoveRequest(this); }
180 if (!request_
->status().is_success())
182 base::TimeTicks time
= base::TimeTicks::Now();
183 ClientState current_state
= scheduler_
->GetClientState(client_id_
);
184 // Note: the client state isn't perfectly accurate since it won't capture
185 // tabs which have switched between active and background multiple times.
186 // Ex: A tab with the following transitions Active -> Background -> Active
187 // will be recorded as Active.
188 const char* client_state
= "Other";
189 if (current_state
== client_state_on_creation_
&& current_state
== ACTIVE
) {
190 client_state
= "Active";
191 } else if (current_state
== client_state_on_creation_
&&
192 current_state
== BACKGROUND
) {
193 client_state
= "Background";
196 base::TimeDelta time_was_deferred
= base::TimeDelta::FromMicroseconds(0);
199 controller()->Resume();
200 time_was_deferred
= time
- time_deferred_
;
202 PostHistogram("RequestTimeDeferred", client_state
, NULL
, time_was_deferred
);
203 PostHistogram("RequestTimeThrottled", client_state
, NULL
,
204 time
- request_
->creation_time());
205 // TODO(aiolos): Remove one of the above histograms after gaining an
206 // understanding of the difference between them and which one is more
210 void set_request_priority_params(const RequestPriorityParams
& priority
) {
211 priority_
= priority
;
213 const RequestPriorityParams
& get_request_priority_params() const {
216 const ClientId
& client_id() const { return client_id_
; }
217 net::URLRequest
* url_request() { return request_
; }
218 const net::URLRequest
* url_request() const { return request_
; }
219 uint32
fifo_ordering() const { return fifo_ordering_
; }
220 void set_fifo_ordering(uint32 fifo_ordering
) {
221 fifo_ordering_
= fifo_ordering
;
223 RequestClassification
classification() const {
224 return classification_
;
226 void set_classification(RequestClassification classification
) {
227 classification_
= classification
;
231 // ResourceMessageDelegate interface:
232 bool OnMessageReceived(const IPC::Message
& message
) override
{
234 IPC_BEGIN_MESSAGE_MAP(ScheduledResourceRequest
, message
)
235 IPC_MESSAGE_HANDLER(ResourceHostMsg_DidChangePriority
, DidChangePriority
)
236 IPC_MESSAGE_UNHANDLED(handled
= false)
237 IPC_END_MESSAGE_MAP()
241 // ResourceThrottle interface:
242 void WillStartRequest(bool* defer
) override
{
243 deferred_
= *defer
= !ready_
;
244 time_deferred_
= base::TimeTicks::Now();
247 const char* GetNameForLogging() const override
{ return "ResourceScheduler"; }
249 void DidChangePriority(int request_id
, net::RequestPriority new_priority
,
250 int intra_priority_value
) {
251 scheduler_
->ReprioritizeRequest(this, new_priority
, intra_priority_value
);
254 const ClientId client_id_
;
255 const ResourceScheduler::ClientState client_state_on_creation_
;
256 net::URLRequest
* request_
;
259 RequestClassification classification_
;
260 ResourceScheduler
* scheduler_
;
261 RequestPriorityParams priority_
;
262 uint32 fifo_ordering_
;
263 base::TimeTicks time_deferred_
;
265 DISALLOW_COPY_AND_ASSIGN(ScheduledResourceRequest
);
268 bool ResourceScheduler::ScheduledResourceSorter::operator()(
269 const ScheduledResourceRequest
* a
,
270 const ScheduledResourceRequest
* b
) const {
271 // Want the set to be ordered first by decreasing priority, then by
272 // decreasing intra_priority.
273 // ie. with (priority, intra_priority)
274 // [(1, 0), (1, 0), (0, 100), (0, 0)]
275 if (a
->get_request_priority_params() != b
->get_request_priority_params())
276 return a
->get_request_priority_params().GreaterThan(
277 b
->get_request_priority_params());
279 // If priority/intra_priority is the same, fall back to fifo ordering.
280 // std::multiset doesn't guarantee this until c++11.
281 return a
->fifo_ordering() < b
->fifo_ordering();
284 void ResourceScheduler::RequestQueue::Insert(
285 ScheduledResourceRequest
* request
) {
286 DCHECK(!ContainsKey(pointers_
, request
));
287 request
->set_fifo_ordering(MakeFifoOrderingId());
288 pointers_
[request
] = queue_
.insert(request
);
291 // Each client represents a tab.
292 class ResourceScheduler::Client
{
294 explicit Client(ResourceScheduler
* scheduler
,
297 : is_audible_(is_audible
),
298 is_visible_(is_visible
),
302 using_spdy_proxy_(false),
303 load_started_time_(base::TimeTicks::Now()),
304 scheduler_(scheduler
),
305 in_flight_delayable_count_(0),
306 total_layout_blocking_count_(0),
307 throttle_state_(ResourceScheduler::THROTTLED
) {}
310 // Update to default state and pause to ensure the scheduler has a
311 // correct count of relevant types of clients.
315 UpdateThrottleState();
318 void ScheduleRequest(
319 net::URLRequest
* url_request
,
320 ScheduledResourceRequest
* request
) {
321 if (ShouldStartRequest(request
) == START_REQUEST
)
322 StartRequest(request
);
324 pending_requests_
.Insert(request
);
325 SetRequestClassification(request
, ClassifyRequest(request
));
328 void RemoveRequest(ScheduledResourceRequest
* request
) {
329 if (pending_requests_
.IsQueued(request
)) {
330 pending_requests_
.Erase(request
);
331 DCHECK(!ContainsKey(in_flight_requests_
, request
));
333 EraseInFlightRequest(request
);
335 // Removing this request may have freed up another to load.
336 LoadAnyStartablePendingRequests();
340 RequestSet
RemoveAllRequests() {
341 RequestSet unowned_requests
;
342 for (RequestSet::iterator it
= in_flight_requests_
.begin();
343 it
!= in_flight_requests_
.end(); ++it
) {
344 unowned_requests
.insert(*it
);
345 (*it
)->set_classification(NORMAL_REQUEST
);
347 ClearInFlightRequests();
348 return unowned_requests
;
351 bool is_active() const { return is_visible_
|| is_audible_
; }
353 bool is_loaded() const { return is_loaded_
; }
355 bool is_visible() const { return is_visible_
; }
357 void OnAudibilityChanged(bool is_audible
) {
358 UpdateState(is_audible
, &is_audible_
);
361 void OnVisibilityChanged(bool is_visible
) {
362 UpdateState(is_visible
, &is_visible_
);
365 // Function to update any client state variable used to determine whether a
366 // Client is active or background. Used for is_visible_ and is_audible_.
367 void UpdateState(bool new_state
, bool* current_state
) {
368 bool was_active
= is_active();
369 *current_state
= new_state
;
370 if (was_active
== is_active())
372 last_active_switch_time_
= base::TimeTicks::Now();
373 UpdateThrottleState();
376 void OnLoadingStateChanged(bool is_loaded
) {
377 if (is_loaded
== is_loaded_
) {
380 is_loaded_
= is_loaded
;
381 UpdateThrottleState();
383 load_started_time_
= base::TimeTicks::Now();
384 last_active_switch_time_
= base::TimeTicks();
387 base::TimeTicks cur_time
= base::TimeTicks::Now();
388 const char* num_clients
=
389 GetNumClientsString(scheduler_
->client_map_
.size());
390 const char* client_catagory
= "Other";
391 if (last_active_switch_time_
.is_null()) {
392 client_catagory
= is_active() ? "Active" : "Background";
393 } else if (is_active()) {
394 base::TimeDelta time_since_active
= cur_time
- last_active_switch_time_
;
395 PostHistogram("ClientLoadedTime", "Other.SwitchedToActive", NULL
,
397 PostHistogram("ClientLoadedTime", "Other.SwitchedToActive", num_clients
,
400 base::TimeDelta time_since_load_started
= cur_time
- load_started_time_
;
401 PostHistogram("ClientLoadedTime", client_catagory
, NULL
,
402 time_since_load_started
);
403 PostHistogram("ClientLoadedTime", client_catagory
, num_clients
,
404 time_since_load_started
);
405 // TODO(aiolos): The above histograms will not take main resource load time
406 // into account with PlzNavigate into account. The ResourceScheduler also
407 // will load the main resources without a clients with the current logic.
408 // Find a way to fix both of these issues.
413 UpdateThrottleState();
416 void UpdateThrottleState() {
417 ClientThrottleState old_throttle_state
= throttle_state_
;
418 if (!scheduler_
->should_throttle()) {
419 SetThrottleState(UNTHROTTLED
);
420 } else if (is_active() && !is_loaded_
) {
421 SetThrottleState(ACTIVE_AND_LOADING
);
422 } else if (is_active()) {
423 SetThrottleState(UNTHROTTLED
);
424 } else if (is_paused_
) {
425 SetThrottleState(PAUSED
);
426 } else if (!scheduler_
->active_clients_loaded()) {
427 SetThrottleState(THROTTLED
);
428 } else if (is_loaded_
&& scheduler_
->should_coalesce()) {
429 SetThrottleState(COALESCED
);
430 } else if (!is_active()) {
431 SetThrottleState(UNTHROTTLED
);
434 if (throttle_state_
== old_throttle_state
) {
437 if (throttle_state_
== ACTIVE_AND_LOADING
) {
438 scheduler_
->IncrementActiveClientsLoading();
439 } else if (old_throttle_state
== ACTIVE_AND_LOADING
) {
440 scheduler_
->DecrementActiveClientsLoading();
442 if (throttle_state_
== COALESCED
) {
443 scheduler_
->IncrementCoalescedClients();
444 } else if (old_throttle_state
== COALESCED
) {
445 scheduler_
->DecrementCoalescedClients();
454 void OnWillInsertBody() {
456 LoadAnyStartablePendingRequests();
459 void OnReceivedSpdyProxiedHttpResponse() {
460 if (!using_spdy_proxy_
) {
461 using_spdy_proxy_
= true;
462 LoadAnyStartablePendingRequests();
466 void ReprioritizeRequest(ScheduledResourceRequest
* request
,
467 RequestPriorityParams old_priority_params
,
468 RequestPriorityParams new_priority_params
) {
469 request
->url_request()->SetPriority(new_priority_params
.priority
);
470 request
->set_request_priority_params(new_priority_params
);
471 if (!pending_requests_
.IsQueued(request
)) {
472 DCHECK(ContainsKey(in_flight_requests_
, request
));
473 // The priority of the request and priority support of the server may
474 // have changed, so update the delayable count.
475 SetRequestClassification(request
, ClassifyRequest(request
));
476 // Request has already started.
480 pending_requests_
.Erase(request
);
481 pending_requests_
.Insert(request
);
483 if (new_priority_params
.priority
> old_priority_params
.priority
) {
484 // Check if this request is now able to load at its new priority.
485 LoadAnyStartablePendingRequests();
489 // Called on Client creation, when a Client changes user observability,
490 // possibly when all observable Clients have finished loading, and
491 // possibly when this Client has finished loading.
493 // Client became observable.
494 // any state -> UNTHROTTLED
495 // Client is unobservable, but all observable clients finished loading.
496 // THROTTLED -> UNTHROTTLED
497 // Non-observable client finished loading.
498 // THROTTLED || UNTHROTTLED -> COALESCED
499 // Non-observable client, an observable client starts loading.
500 // COALESCED -> THROTTLED
501 // A COALESCED client will transition into UNTHROTTLED when the network is
502 // woken up by a heartbeat and then transition back into COALESCED.
503 void SetThrottleState(ResourceScheduler::ClientThrottleState throttle_state
) {
504 if (throttle_state
== throttle_state_
) {
507 throttle_state_
= throttle_state
;
508 if (throttle_state_
!= PAUSED
) {
511 LoadAnyStartablePendingRequests();
512 // TODO(aiolos): Stop any started but not inflght requests when
513 // switching to stricter throttle state?
516 ResourceScheduler::ClientThrottleState
throttle_state() const {
517 return throttle_state_
;
520 void LoadCoalescedRequests() {
521 if (throttle_state_
!= COALESCED
) {
524 if (scheduler_
->active_clients_loaded()) {
525 SetThrottleState(UNTHROTTLED
);
527 SetThrottleState(THROTTLED
);
529 LoadAnyStartablePendingRequests();
530 SetThrottleState(COALESCED
);
534 enum ShouldStartReqResult
{
535 DO_NOT_START_REQUEST_AND_STOP_SEARCHING
,
536 DO_NOT_START_REQUEST_AND_KEEP_SEARCHING
,
540 void InsertInFlightRequest(ScheduledResourceRequest
* request
) {
541 in_flight_requests_
.insert(request
);
542 SetRequestClassification(request
, ClassifyRequest(request
));
545 void EraseInFlightRequest(ScheduledResourceRequest
* request
) {
546 size_t erased
= in_flight_requests_
.erase(request
);
547 DCHECK_EQ(1u, erased
);
548 // Clear any special state that we were tracking for this request.
549 SetRequestClassification(request
, NORMAL_REQUEST
);
552 void ClearInFlightRequests() {
553 in_flight_requests_
.clear();
554 in_flight_delayable_count_
= 0;
555 total_layout_blocking_count_
= 0;
558 size_t CountRequestsWithClassification(
559 const RequestClassification classification
, const bool include_pending
) {
560 size_t classification_request_count
= 0;
561 for (RequestSet::const_iterator it
= in_flight_requests_
.begin();
562 it
!= in_flight_requests_
.end(); ++it
) {
563 if ((*it
)->classification() == classification
)
564 classification_request_count
++;
566 if (include_pending
) {
567 for (RequestQueue::NetQueue::const_iterator
568 it
= pending_requests_
.GetNextHighestIterator();
569 it
!= pending_requests_
.End(); ++it
) {
570 if ((*it
)->classification() == classification
)
571 classification_request_count
++;
574 return classification_request_count
;
577 void SetRequestClassification(ScheduledResourceRequest
* request
,
578 RequestClassification classification
) {
579 RequestClassification old_classification
= request
->classification();
580 if (old_classification
== classification
)
583 if (old_classification
== IN_FLIGHT_DELAYABLE_REQUEST
)
584 in_flight_delayable_count_
--;
585 if (old_classification
== LAYOUT_BLOCKING_REQUEST
)
586 total_layout_blocking_count_
--;
588 if (classification
== IN_FLIGHT_DELAYABLE_REQUEST
)
589 in_flight_delayable_count_
++;
590 if (classification
== LAYOUT_BLOCKING_REQUEST
)
591 total_layout_blocking_count_
++;
593 request
->set_classification(classification
);
595 CountRequestsWithClassification(IN_FLIGHT_DELAYABLE_REQUEST
, false),
596 in_flight_delayable_count_
);
597 DCHECK_EQ(CountRequestsWithClassification(LAYOUT_BLOCKING_REQUEST
, true),
598 total_layout_blocking_count_
);
601 RequestClassification
ClassifyRequest(ScheduledResourceRequest
* request
) {
602 // If a request is already marked as layout-blocking make sure to keep the
603 // classification across redirects unless the priority was lowered.
604 if (request
->classification() == LAYOUT_BLOCKING_REQUEST
&&
605 request
->url_request()->priority() > net::LOW
) {
606 return LAYOUT_BLOCKING_REQUEST
;
609 if (!has_body_
&& request
->url_request()->priority() > net::LOW
)
610 return LAYOUT_BLOCKING_REQUEST
;
612 if (request
->url_request()->priority() < net::LOW
) {
613 net::HostPortPair host_port_pair
=
614 net::HostPortPair::FromURL(request
->url_request()->url());
615 net::HttpServerProperties
& http_server_properties
=
616 *request
->url_request()->context()->http_server_properties();
617 if (!http_server_properties
.SupportsRequestPriority(host_port_pair
) &&
618 ContainsKey(in_flight_requests_
, request
)) {
619 return IN_FLIGHT_DELAYABLE_REQUEST
;
622 return NORMAL_REQUEST
;
625 bool ShouldKeepSearching(
626 const net::HostPortPair
& active_request_host
) const {
627 size_t same_host_count
= 0;
628 for (RequestSet::const_iterator it
= in_flight_requests_
.begin();
629 it
!= in_flight_requests_
.end(); ++it
) {
630 net::HostPortPair host_port_pair
=
631 net::HostPortPair::FromURL((*it
)->url_request()->url());
632 if (active_request_host
.Equals(host_port_pair
)) {
634 if (same_host_count
>= kMaxNumDelayableRequestsPerHost
)
641 void StartRequest(ScheduledResourceRequest
* request
) {
642 InsertInFlightRequest(request
);
646 // ShouldStartRequest is the main scheduling algorithm.
648 // Requests are evaluated on five attributes:
650 // 1. Non-delayable requests:
651 // * Synchronous requests.
652 // * Non-HTTP[S] requests.
654 // 2. Requests to request-priority-capable origin servers.
656 // 3. High-priority requests:
657 // * Higher priority requests (>= net::LOW).
659 // 4. Layout-blocking requests:
660 // * High-priority requests (> net::LOW) initiated before the renderer has
663 // 5. Low priority requests
665 // The following rules are followed:
667 // ACTIVE_AND_LOADING and UNTHROTTLED Clients follow these rules:
668 // * Non-delayable, High-priority and request-priority capable requests are
669 // issued immediately.
670 // * Low priority requests are delayable.
671 // * Allow one delayable request to load at a time while layout-blocking
672 // requests are loading or the body tag has not yet been parsed.
673 // * If no high priority or layout-blocking requests are in flight, start
674 // loading delayable requests.
675 // * Never exceed 10 delayable requests in flight per client.
676 // * Never exceed 6 delayable requests for a given host.
678 // THROTTLED Clients follow these rules:
679 // * Non-delayable and request-priority-capable requests are issued
681 // * At most one non-request-priority-capable request will be issued per
683 // * If no high priority requests are in flight, start loading low priority
686 // COALESCED Clients never load requests, with the following exceptions:
687 // * Non-delayable requests are issued imediately.
688 // * On a (currently 5 second) heart beat, they load all requests as an
689 // UNTHROTTLED Client, and then return to the COALESCED state.
690 // * When an active Client makes a request, they are THROTTLED until the
691 // active Client finishes loading.
692 ShouldStartReqResult
ShouldStartRequest(
693 ScheduledResourceRequest
* request
) const {
694 const net::URLRequest
& url_request
= *request
->url_request();
695 // Syncronous requests could block the entire render, which could impact
696 // user-observable Clients.
697 if (!ResourceRequestInfo::ForRequest(&url_request
)->IsAsync()) {
698 return START_REQUEST
;
701 // TODO(simonjam): This may end up causing disk contention. We should
702 // experiment with throttling if that happens.
703 // TODO(aiolos): We probably want to Coalesce these as well to avoid
705 if (!url_request
.url().SchemeIsHTTPOrHTTPS()) {
706 return START_REQUEST
;
709 if (throttle_state_
== COALESCED
) {
710 return DO_NOT_START_REQUEST_AND_STOP_SEARCHING
;
713 if (using_spdy_proxy_
&& url_request
.url().SchemeIs(url::kHttpScheme
)) {
714 return START_REQUEST
;
717 net::HostPortPair host_port_pair
=
718 net::HostPortPair::FromURL(url_request
.url());
719 net::HttpServerProperties
& http_server_properties
=
720 *url_request
.context()->http_server_properties();
722 // TODO(willchan): We should really improve this algorithm as described in
723 // crbug.com/164101. Also, theoretically we should not count a
724 // request-priority capable request against the delayable requests limit.
725 if (http_server_properties
.SupportsRequestPriority(host_port_pair
)) {
726 return START_REQUEST
;
729 if (throttle_state_
== THROTTLED
&&
730 in_flight_requests_
.size() >= kMaxNumThrottledRequestsPerClient
) {
731 // There may still be request-priority-capable requests that should be
733 return DO_NOT_START_REQUEST_AND_KEEP_SEARCHING
;
736 // High-priority and layout-blocking requests.
737 if (url_request
.priority() >= net::LOW
) {
738 return START_REQUEST
;
741 if (in_flight_delayable_count_
>= kMaxNumDelayableRequestsPerClient
) {
742 return DO_NOT_START_REQUEST_AND_STOP_SEARCHING
;
745 if (ShouldKeepSearching(host_port_pair
)) {
746 // There may be other requests for other hosts we'd allow,
748 return DO_NOT_START_REQUEST_AND_KEEP_SEARCHING
;
751 bool have_immediate_requests_in_flight
=
752 in_flight_requests_
.size() > in_flight_delayable_count_
;
753 if (have_immediate_requests_in_flight
&&
754 (!has_body_
|| total_layout_blocking_count_
!= 0) &&
755 in_flight_delayable_count_
!= 0) {
756 return DO_NOT_START_REQUEST_AND_STOP_SEARCHING
;
759 return START_REQUEST
;
762 void LoadAnyStartablePendingRequests() {
763 // We iterate through all the pending requests, starting with the highest
764 // priority one. For each entry, one of three things can happen:
765 // 1) We start the request, remove it from the list, and keep checking.
766 // 2) We do NOT start the request, but ShouldStartRequest() signals us that
767 // there may be room for other requests, so we keep checking and leave
768 // the previous request still in the list.
769 // 3) We do not start the request, same as above, but StartRequest() tells
770 // us there's no point in checking any further requests.
771 RequestQueue::NetQueue::iterator request_iter
=
772 pending_requests_
.GetNextHighestIterator();
774 while (request_iter
!= pending_requests_
.End()) {
775 ScheduledResourceRequest
* request
= *request_iter
;
776 ShouldStartReqResult query_result
= ShouldStartRequest(request
);
778 if (query_result
== START_REQUEST
) {
779 pending_requests_
.Erase(request
);
780 StartRequest(request
);
782 // StartRequest can modify the pending list, so we (re)start evaluation
783 // from the currently highest priority request. Avoid copying a singular
784 // iterator, which would trigger undefined behavior.
785 if (pending_requests_
.GetNextHighestIterator() ==
786 pending_requests_
.End())
788 request_iter
= pending_requests_
.GetNextHighestIterator();
789 } else if (query_result
== DO_NOT_START_REQUEST_AND_KEEP_SEARCHING
) {
793 DCHECK(query_result
== DO_NOT_START_REQUEST_AND_STOP_SEARCHING
);
804 bool using_spdy_proxy_
;
805 RequestQueue pending_requests_
;
806 RequestSet in_flight_requests_
;
807 base::TimeTicks load_started_time_
;
808 // The last time the client switched state between active and background.
809 base::TimeTicks last_active_switch_time_
;
810 ResourceScheduler
* scheduler_
;
811 // The number of delayable in-flight requests.
812 size_t in_flight_delayable_count_
;
813 // The number of layout-blocking in-flight requests.
814 size_t total_layout_blocking_count_
;
815 ResourceScheduler::ClientThrottleState throttle_state_
;
818 ResourceScheduler::ResourceScheduler()
819 : should_coalesce_(false),
820 should_throttle_(false),
821 active_clients_loading_(0),
822 coalesced_clients_(0),
823 coalescing_timer_(new base::Timer(true /* retain_user_task */,
824 true /* is_repeating */)) {
825 std::string throttling_trial_group
=
826 base::FieldTrialList::FindFullName("RequestThrottlingAndCoalescing");
827 if (throttling_trial_group
== "Throttle") {
828 should_throttle_
= true;
829 } else if (throttling_trial_group
== "Coalesce") {
830 should_coalesce_
= true;
831 should_throttle_
= true;
835 ResourceScheduler::~ResourceScheduler() {
836 DCHECK(unowned_requests_
.empty());
837 DCHECK(client_map_
.empty());
840 void ResourceScheduler::SetThrottleOptionsForTesting(bool should_throttle
,
841 bool should_coalesce
) {
842 should_coalesce_
= should_coalesce
;
843 should_throttle_
= should_throttle
;
844 OnLoadingActiveClientsStateChangedForAllClients();
847 ResourceScheduler::ClientThrottleState
848 ResourceScheduler::GetClientStateForTesting(int child_id
, int route_id
) {
849 Client
* client
= GetClient(child_id
, route_id
);
851 return client
->throttle_state();
854 scoped_ptr
<ResourceThrottle
> ResourceScheduler::ScheduleRequest(
857 net::URLRequest
* url_request
) {
858 DCHECK(CalledOnValidThread());
859 ClientId client_id
= MakeClientId(child_id
, route_id
);
860 scoped_ptr
<ScheduledResourceRequest
> request(new ScheduledResourceRequest(
864 RequestPriorityParams(url_request
->priority(), 0)));
866 ClientMap::iterator it
= client_map_
.find(client_id
);
867 if (it
== client_map_
.end()) {
868 // There are several ways this could happen:
869 // 1. <a ping> requests don't have a route_id.
870 // 2. Most unittests don't send the IPCs needed to register Clients.
871 // 3. The tab is closed while a RequestResource IPC is in flight.
872 unowned_requests_
.insert(request
.get());
874 return request
.Pass();
877 Client
* client
= it
->second
;
878 client
->ScheduleRequest(url_request
, request
.get());
879 return request
.Pass();
882 void ResourceScheduler::RemoveRequest(ScheduledResourceRequest
* request
) {
883 DCHECK(CalledOnValidThread());
884 if (ContainsKey(unowned_requests_
, request
)) {
885 unowned_requests_
.erase(request
);
889 ClientMap::iterator client_it
= client_map_
.find(request
->client_id());
890 if (client_it
== client_map_
.end()) {
894 Client
* client
= client_it
->second
;
895 client
->RemoveRequest(request
);
898 void ResourceScheduler::OnClientCreated(int child_id
,
902 DCHECK(CalledOnValidThread());
903 ClientId client_id
= MakeClientId(child_id
, route_id
);
904 DCHECK(!ContainsKey(client_map_
, client_id
));
906 Client
* client
= new Client(this, is_visible
, is_audible
);
907 client_map_
[client_id
] = client
;
909 client
->UpdateThrottleState();
912 void ResourceScheduler::OnClientDeleted(int child_id
, int route_id
) {
913 DCHECK(CalledOnValidThread());
914 ClientId client_id
= MakeClientId(child_id
, route_id
);
915 DCHECK(ContainsKey(client_map_
, client_id
));
916 ClientMap::iterator it
= client_map_
.find(client_id
);
917 if (it
== client_map_
.end())
920 Client
* client
= it
->second
;
921 // FYI, ResourceDispatcherHost cancels all of the requests after this function
922 // is called. It should end up canceling all of the requests except for a
923 // cross-renderer navigation.
924 RequestSet client_unowned_requests
= client
->RemoveAllRequests();
925 for (RequestSet::iterator it
= client_unowned_requests
.begin();
926 it
!= client_unowned_requests
.end(); ++it
) {
927 unowned_requests_
.insert(*it
);
931 client_map_
.erase(it
);
934 void ResourceScheduler::OnLoadingStateChanged(int child_id
,
937 Client
* client
= GetClient(child_id
, route_id
);
939 client
->OnLoadingStateChanged(is_loaded
);
942 void ResourceScheduler::OnVisibilityChanged(int child_id
,
945 Client
* client
= GetClient(child_id
, route_id
);
947 client
->OnVisibilityChanged(is_visible
);
950 void ResourceScheduler::OnAudibilityChanged(int child_id
,
953 Client
* client
= GetClient(child_id
, route_id
);
954 // We might get this call after the client has been deleted.
956 client
->OnAudibilityChanged(is_audible
);
959 void ResourceScheduler::OnNavigate(int child_id
, int route_id
) {
960 DCHECK(CalledOnValidThread());
961 ClientId client_id
= MakeClientId(child_id
, route_id
);
963 ClientMap::iterator it
= client_map_
.find(client_id
);
964 if (it
== client_map_
.end()) {
965 // The client was likely deleted shortly before we received this IPC.
969 Client
* client
= it
->second
;
970 client
->OnNavigate();
973 void ResourceScheduler::OnWillInsertBody(int child_id
, int route_id
) {
974 DCHECK(CalledOnValidThread());
975 ClientId client_id
= MakeClientId(child_id
, route_id
);
977 ClientMap::iterator it
= client_map_
.find(client_id
);
978 if (it
== client_map_
.end()) {
979 // The client was likely deleted shortly before we received this IPC.
983 Client
* client
= it
->second
;
984 client
->OnWillInsertBody();
987 void ResourceScheduler::OnReceivedSpdyProxiedHttpResponse(
990 DCHECK(CalledOnValidThread());
991 ClientId client_id
= MakeClientId(child_id
, route_id
);
993 ClientMap::iterator client_it
= client_map_
.find(client_id
);
994 if (client_it
== client_map_
.end()) {
998 Client
* client
= client_it
->second
;
999 client
->OnReceivedSpdyProxiedHttpResponse();
1002 bool ResourceScheduler::IsClientVisibleForTesting(int child_id
, int route_id
) {
1003 Client
* client
= GetClient(child_id
, route_id
);
1005 return client
->is_visible();
1008 ResourceScheduler::Client
* ResourceScheduler::GetClient(int child_id
,
1010 ClientId client_id
= MakeClientId(child_id
, route_id
);
1011 ClientMap::iterator client_it
= client_map_
.find(client_id
);
1012 if (client_it
== client_map_
.end()) {
1015 return client_it
->second
;
1018 void ResourceScheduler::DecrementActiveClientsLoading() {
1019 DCHECK_NE(0u, active_clients_loading_
);
1020 --active_clients_loading_
;
1021 DCHECK_EQ(active_clients_loading_
, CountActiveClientsLoading());
1022 if (active_clients_loading_
== 0) {
1023 OnLoadingActiveClientsStateChangedForAllClients();
1027 void ResourceScheduler::IncrementActiveClientsLoading() {
1028 ++active_clients_loading_
;
1029 DCHECK_EQ(active_clients_loading_
, CountActiveClientsLoading());
1030 if (active_clients_loading_
== 1) {
1031 OnLoadingActiveClientsStateChangedForAllClients();
1035 void ResourceScheduler::OnLoadingActiveClientsStateChangedForAllClients() {
1036 ClientMap::iterator client_it
= client_map_
.begin();
1037 while (client_it
!= client_map_
.end()) {
1038 Client
* client
= client_it
->second
;
1039 client
->UpdateThrottleState();
1044 size_t ResourceScheduler::CountActiveClientsLoading() const {
1045 size_t active_and_loading
= 0;
1046 ClientMap::const_iterator client_it
= client_map_
.begin();
1047 while (client_it
!= client_map_
.end()) {
1048 Client
* client
= client_it
->second
;
1049 if (client
->throttle_state() == ACTIVE_AND_LOADING
) {
1050 ++active_and_loading
;
1054 return active_and_loading
;
1057 void ResourceScheduler::IncrementCoalescedClients() {
1058 ++coalesced_clients_
;
1059 DCHECK(should_coalesce_
);
1060 DCHECK_EQ(coalesced_clients_
, CountCoalescedClients());
1061 if (coalesced_clients_
== 1) {
1062 coalescing_timer_
->Start(
1064 base::TimeDelta::FromMilliseconds(kCoalescedTimerPeriod
),
1065 base::Bind(&ResourceScheduler::LoadCoalescedRequests
,
1066 base::Unretained(this)));
1070 void ResourceScheduler::DecrementCoalescedClients() {
1071 DCHECK(should_coalesce_
);
1072 DCHECK_NE(0U, coalesced_clients_
);
1073 --coalesced_clients_
;
1074 DCHECK_EQ(coalesced_clients_
, CountCoalescedClients());
1075 if (coalesced_clients_
== 0) {
1076 coalescing_timer_
->Stop();
1080 size_t ResourceScheduler::CountCoalescedClients() const {
1081 DCHECK(should_coalesce_
);
1082 size_t coalesced_clients
= 0;
1083 ClientMap::const_iterator client_it
= client_map_
.begin();
1084 while (client_it
!= client_map_
.end()) {
1085 Client
* client
= client_it
->second
;
1086 if (client
->throttle_state() == COALESCED
) {
1087 ++coalesced_clients
;
1091 return coalesced_clients_
;
1094 void ResourceScheduler::LoadCoalescedRequests() {
1095 DCHECK(should_coalesce_
);
1096 ClientMap::iterator client_it
= client_map_
.begin();
1097 while (client_it
!= client_map_
.end()) {
1098 Client
* client
= client_it
->second
;
1099 client
->LoadCoalescedRequests();
1104 ResourceScheduler::ClientState
ResourceScheduler::GetClientState(
1105 ClientId client_id
) const {
1106 ClientMap::const_iterator client_it
= client_map_
.find(client_id
);
1107 if (client_it
== client_map_
.end())
1109 return client_it
->second
->is_active() ? ACTIVE
: BACKGROUND
;
1112 void ResourceScheduler::ReprioritizeRequest(ScheduledResourceRequest
* request
,
1113 net::RequestPriority new_priority
,
1114 int new_intra_priority_value
) {
1115 if (request
->url_request()->load_flags() & net::LOAD_IGNORE_LIMITS
) {
1116 // We should not be re-prioritizing requests with the
1117 // IGNORE_LIMITS flag.
1121 RequestPriorityParams
new_priority_params(new_priority
,
1122 new_intra_priority_value
);
1123 RequestPriorityParams old_priority_params
=
1124 request
->get_request_priority_params();
1126 DCHECK(old_priority_params
!= new_priority_params
);
1128 ClientMap::iterator client_it
= client_map_
.find(request
->client_id());
1129 if (client_it
== client_map_
.end()) {
1130 // The client was likely deleted shortly before we received this IPC.
1131 request
->url_request()->SetPriority(new_priority_params
.priority
);
1132 request
->set_request_priority_params(new_priority_params
);
1136 if (old_priority_params
== new_priority_params
)
1139 Client
*client
= client_it
->second
;
1140 client
->ReprioritizeRequest(
1141 request
, old_priority_params
, new_priority_params
);
1144 ResourceScheduler::ClientId
ResourceScheduler::MakeClientId(
1145 int child_id
, int route_id
) {
1146 return (static_cast<ResourceScheduler::ClientId
>(child_id
) << 32) | route_id
;
1149 } // namespace content