1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "content/browser/loader/resource_scheduler.h"
9 #include "base/metrics/field_trial.h"
10 #include "base/metrics/histogram.h"
11 #include "base/stl_util.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "base/strings/string_piece.h"
14 #include "base/time/time.h"
15 #include "content/common/resource_messages.h"
16 #include "content/browser/loader/resource_message_delegate.h"
17 #include "content/public/browser/resource_controller.h"
18 #include "content/public/browser/resource_request_info.h"
19 #include "content/public/browser/resource_throttle.h"
20 #include "ipc/ipc_message_macros.h"
21 #include "net/base/host_port_pair.h"
22 #include "net/base/load_flags.h"
23 #include "net/base/request_priority.h"
24 #include "net/http/http_server_properties.h"
25 #include "net/url_request/url_request.h"
26 #include "net/url_request/url_request_context.h"
32 // Field trial constants
33 const char kThrottleCoalesceFieldTrial
[] = "RequestThrottlingAndCoalescing";
34 const char kThrottleCoalesceFieldTrialThrottle
[] = "Throttle";
35 const char kThrottleCoalesceFieldTrialCoalesce
[] = "Coalesce";
37 const char kRequestLimitFieldTrial
[] = "OutstandingRequestLimiting";
38 const char kRequestLimitFieldTrialGroupPrefix
[] = "Limit";
40 // Post ResourceScheduler histograms of the following forms:
41 // If |histogram_suffix| is NULL or the empty string:
42 // ResourceScheduler.base_name.histogram_name
44 // ResourceScheduler.base_name.histogram_name.histogram_suffix
45 void PostHistogram(const char* base_name
,
46 const char* histogram_name
,
47 const char* histogram_suffix
,
48 base::TimeDelta time
) {
49 std::string histogram
=
50 base::StringPrintf("ResourceScheduler.%s.%s", base_name
, histogram_name
);
51 if (histogram_suffix
&& histogram_suffix
[0] != '\0')
52 histogram
= histogram
+ "." + histogram_suffix
;
53 base::HistogramBase
* histogram_counter
= base::Histogram::FactoryTimeGet(
54 histogram
, base::TimeDelta::FromMilliseconds(1),
55 base::TimeDelta::FromMinutes(5), 50,
56 base::Histogram::kUmaTargetedHistogramFlag
);
57 histogram_counter
->AddTime(time
);
60 // For use with PostHistogram to specify the correct string for histogram
61 // suffixes based on number of Clients.
62 const char* GetNumClientsString(size_t num_clients
) {
65 else if (num_clients
<= 5)
67 else if (num_clients
<= 15)
68 return "Max15Clients";
69 else if (num_clients
<= 30)
70 return "Max30Clients";
71 return "Over30Clients";
76 static const size_t kCoalescedTimerPeriod
= 5000;
77 static const size_t kMaxNumDelayableRequestsPerClient
= 10;
78 static const size_t kMaxNumDelayableRequestsPerHost
= 6;
79 static const size_t kMaxNumThrottledRequestsPerClient
= 1;
81 struct ResourceScheduler::RequestPriorityParams
{
82 RequestPriorityParams()
83 : priority(net::DEFAULT_PRIORITY
),
87 RequestPriorityParams(net::RequestPriority priority
, int intra_priority
)
89 intra_priority(intra_priority
) {
92 bool operator==(const RequestPriorityParams
& other
) const {
93 return (priority
== other
.priority
) &&
94 (intra_priority
== other
.intra_priority
);
97 bool operator!=(const RequestPriorityParams
& other
) const {
98 return !(*this == other
);
101 bool GreaterThan(const RequestPriorityParams
& other
) const {
102 if (priority
!= other
.priority
)
103 return priority
> other
.priority
;
104 return intra_priority
> other
.intra_priority
;
107 net::RequestPriority priority
;
111 class ResourceScheduler::RequestQueue
{
113 typedef std::multiset
<ScheduledResourceRequest
*, ScheduledResourceSorter
>
116 RequestQueue() : fifo_ordering_ids_(0) {}
119 // Adds |request| to the queue with given |priority|.
120 void Insert(ScheduledResourceRequest
* request
);
122 // Removes |request| from the queue.
123 void Erase(ScheduledResourceRequest
* request
) {
124 PointerMap::iterator it
= pointers_
.find(request
);
125 DCHECK(it
!= pointers_
.end());
126 if (it
== pointers_
.end())
128 queue_
.erase(it
->second
);
132 NetQueue::iterator
GetNextHighestIterator() {
133 return queue_
.begin();
136 NetQueue::iterator
End() {
140 // Returns true if |request| is queued.
141 bool IsQueued(ScheduledResourceRequest
* request
) const {
142 return ContainsKey(pointers_
, request
);
145 // Returns true if no requests are queued.
146 bool IsEmpty() const { return queue_
.size() == 0; }
149 typedef std::map
<ScheduledResourceRequest
*, NetQueue::iterator
> PointerMap
;
151 uint32
MakeFifoOrderingId() {
152 fifo_ordering_ids_
+= 1;
153 return fifo_ordering_ids_
;
156 // Used to create an ordering ID for scheduled resources so that resources
157 // with same priority/intra_priority stay in fifo order.
158 uint32 fifo_ordering_ids_
;
161 PointerMap pointers_
;
164 // This is the handle we return to the ResourceDispatcherHostImpl so it can
165 // interact with the request.
166 class ResourceScheduler::ScheduledResourceRequest
167 : public ResourceMessageDelegate
,
168 public ResourceThrottle
{
170 ScheduledResourceRequest(const ClientId
& client_id
,
171 net::URLRequest
* request
,
172 ResourceScheduler
* scheduler
,
173 const RequestPriorityParams
& priority
)
174 : ResourceMessageDelegate(request
),
175 client_id_(client_id
),
176 client_state_on_creation_(scheduler
->GetClientState(client_id_
)),
180 classification_(NORMAL_REQUEST
),
181 scheduler_(scheduler
),
186 ~ScheduledResourceRequest() override
{ scheduler_
->RemoveRequest(this); }
190 if (!request_
->status().is_success())
192 base::TimeTicks time
= base::TimeTicks::Now();
193 ClientState current_state
= scheduler_
->GetClientState(client_id_
);
194 // Note: the client state isn't perfectly accurate since it won't capture
195 // tabs which have switched between active and background multiple times.
196 // Ex: A tab with the following transitions Active -> Background -> Active
197 // will be recorded as Active.
198 const char* client_state
= "Other";
199 if (current_state
== client_state_on_creation_
&& current_state
== ACTIVE
) {
200 client_state
= "Active";
201 } else if (current_state
== client_state_on_creation_
&&
202 current_state
== BACKGROUND
) {
203 client_state
= "Background";
206 base::TimeDelta time_was_deferred
= base::TimeDelta::FromMicroseconds(0);
209 controller()->Resume();
210 time_was_deferred
= time
- time_deferred_
;
212 PostHistogram("RequestTimeDeferred", client_state
, NULL
, time_was_deferred
);
213 PostHistogram("RequestTimeThrottled", client_state
, NULL
,
214 time
- request_
->creation_time());
215 // TODO(aiolos): Remove one of the above histograms after gaining an
216 // understanding of the difference between them and which one is more
220 void set_request_priority_params(const RequestPriorityParams
& priority
) {
221 priority_
= priority
;
223 const RequestPriorityParams
& get_request_priority_params() const {
226 const ClientId
& client_id() const { return client_id_
; }
227 net::URLRequest
* url_request() { return request_
; }
228 const net::URLRequest
* url_request() const { return request_
; }
229 uint32
fifo_ordering() const { return fifo_ordering_
; }
230 void set_fifo_ordering(uint32 fifo_ordering
) {
231 fifo_ordering_
= fifo_ordering
;
233 RequestClassification
classification() const {
234 return classification_
;
236 void set_classification(RequestClassification classification
) {
237 classification_
= classification
;
241 // ResourceMessageDelegate interface:
242 bool OnMessageReceived(const IPC::Message
& message
) override
{
244 IPC_BEGIN_MESSAGE_MAP(ScheduledResourceRequest
, message
)
245 IPC_MESSAGE_HANDLER(ResourceHostMsg_DidChangePriority
, DidChangePriority
)
246 IPC_MESSAGE_UNHANDLED(handled
= false)
247 IPC_END_MESSAGE_MAP()
251 // ResourceThrottle interface:
252 void WillStartRequest(bool* defer
) override
{
253 deferred_
= *defer
= !ready_
;
254 time_deferred_
= base::TimeTicks::Now();
257 const char* GetNameForLogging() const override
{ return "ResourceScheduler"; }
259 void DidChangePriority(int request_id
, net::RequestPriority new_priority
,
260 int intra_priority_value
) {
261 scheduler_
->ReprioritizeRequest(this, new_priority
, intra_priority_value
);
264 const ClientId client_id_
;
265 const ResourceScheduler::ClientState client_state_on_creation_
;
266 net::URLRequest
* request_
;
269 RequestClassification classification_
;
270 ResourceScheduler
* scheduler_
;
271 RequestPriorityParams priority_
;
272 uint32 fifo_ordering_
;
273 base::TimeTicks time_deferred_
;
275 DISALLOW_COPY_AND_ASSIGN(ScheduledResourceRequest
);
278 bool ResourceScheduler::ScheduledResourceSorter::operator()(
279 const ScheduledResourceRequest
* a
,
280 const ScheduledResourceRequest
* b
) const {
281 // Want the set to be ordered first by decreasing priority, then by
282 // decreasing intra_priority.
283 // ie. with (priority, intra_priority)
284 // [(1, 0), (1, 0), (0, 100), (0, 0)]
285 if (a
->get_request_priority_params() != b
->get_request_priority_params())
286 return a
->get_request_priority_params().GreaterThan(
287 b
->get_request_priority_params());
289 // If priority/intra_priority is the same, fall back to fifo ordering.
290 // std::multiset doesn't guarantee this until c++11.
291 return a
->fifo_ordering() < b
->fifo_ordering();
294 void ResourceScheduler::RequestQueue::Insert(
295 ScheduledResourceRequest
* request
) {
296 DCHECK(!ContainsKey(pointers_
, request
));
297 request
->set_fifo_ordering(MakeFifoOrderingId());
298 pointers_
[request
] = queue_
.insert(request
);
301 // Each client represents a tab.
302 class ResourceScheduler::Client
{
304 explicit Client(ResourceScheduler
* scheduler
,
307 : is_audible_(is_audible
),
308 is_visible_(is_visible
),
312 using_spdy_proxy_(false),
313 load_started_time_(base::TimeTicks::Now()),
314 scheduler_(scheduler
),
315 in_flight_delayable_count_(0),
316 total_layout_blocking_count_(0),
317 throttle_state_(ResourceScheduler::THROTTLED
) {}
320 // Update to default state and pause to ensure the scheduler has a
321 // correct count of relevant types of clients.
325 UpdateThrottleState();
328 void ScheduleRequest(
329 net::URLRequest
* url_request
,
330 ScheduledResourceRequest
* request
) {
331 if (ShouldStartRequest(request
) == START_REQUEST
)
332 StartRequest(request
);
334 pending_requests_
.Insert(request
);
335 SetRequestClassification(request
, ClassifyRequest(request
));
338 void RemoveRequest(ScheduledResourceRequest
* request
) {
339 if (pending_requests_
.IsQueued(request
)) {
340 pending_requests_
.Erase(request
);
341 DCHECK(!ContainsKey(in_flight_requests_
, request
));
343 EraseInFlightRequest(request
);
345 // Removing this request may have freed up another to load.
346 LoadAnyStartablePendingRequests();
350 RequestSet
StartAndRemoveAllRequests() {
351 // First start any pending requests so that they will be moved into
352 // in_flight_requests_. This may exceed the limits
353 // kMaxNumDelayableRequestsPerClient, kMaxNumDelayableRequestsPerHost and
354 // kMaxNumThrottledRequestsPerClient, so this method must not do anything
355 // that depends on those limits before calling ClearInFlightRequests()
357 while (!pending_requests_
.IsEmpty()) {
358 ScheduledResourceRequest
* request
=
359 *pending_requests_
.GetNextHighestIterator();
360 pending_requests_
.Erase(request
);
361 // StartRequest() may modify pending_requests_. TODO(ricea): Does it?
362 StartRequest(request
);
364 RequestSet unowned_requests
;
365 for (RequestSet::iterator it
= in_flight_requests_
.begin();
366 it
!= in_flight_requests_
.end(); ++it
) {
367 unowned_requests
.insert(*it
);
368 (*it
)->set_classification(NORMAL_REQUEST
);
370 ClearInFlightRequests();
371 return unowned_requests
;
374 bool is_active() const { return is_visible_
|| is_audible_
; }
376 bool is_loaded() const { return is_loaded_
; }
378 bool is_visible() const { return is_visible_
; }
380 void OnAudibilityChanged(bool is_audible
) {
381 UpdateState(is_audible
, &is_audible_
);
384 void OnVisibilityChanged(bool is_visible
) {
385 UpdateState(is_visible
, &is_visible_
);
388 // Function to update any client state variable used to determine whether a
389 // Client is active or background. Used for is_visible_ and is_audible_.
390 void UpdateState(bool new_state
, bool* current_state
) {
391 bool was_active
= is_active();
392 *current_state
= new_state
;
393 if (was_active
== is_active())
395 last_active_switch_time_
= base::TimeTicks::Now();
396 UpdateThrottleState();
399 void OnLoadingStateChanged(bool is_loaded
) {
400 if (is_loaded
== is_loaded_
) {
403 is_loaded_
= is_loaded
;
404 UpdateThrottleState();
406 load_started_time_
= base::TimeTicks::Now();
407 last_active_switch_time_
= base::TimeTicks();
410 base::TimeTicks cur_time
= base::TimeTicks::Now();
411 const char* num_clients
=
412 GetNumClientsString(scheduler_
->client_map_
.size());
413 const char* client_catagory
= "Other";
414 if (last_active_switch_time_
.is_null()) {
415 client_catagory
= is_active() ? "Active" : "Background";
416 } else if (is_active()) {
417 base::TimeDelta time_since_active
= cur_time
- last_active_switch_time_
;
418 PostHistogram("ClientLoadedTime", "Other.SwitchedToActive", NULL
,
420 PostHistogram("ClientLoadedTime", "Other.SwitchedToActive", num_clients
,
423 base::TimeDelta time_since_load_started
= cur_time
- load_started_time_
;
424 PostHistogram("ClientLoadedTime", client_catagory
, NULL
,
425 time_since_load_started
);
426 PostHistogram("ClientLoadedTime", client_catagory
, num_clients
,
427 time_since_load_started
);
428 // TODO(aiolos): The above histograms will not take main resource load time
429 // into account with PlzNavigate into account. The ResourceScheduler also
430 // will load the main resources without a clients with the current logic.
431 // Find a way to fix both of these issues.
436 UpdateThrottleState();
439 void UpdateThrottleState() {
440 ClientThrottleState old_throttle_state
= throttle_state_
;
441 if (!scheduler_
->should_throttle()) {
442 SetThrottleState(UNTHROTTLED
);
443 } else if (is_active() && !is_loaded_
) {
444 SetThrottleState(ACTIVE_AND_LOADING
);
445 } else if (is_active()) {
446 SetThrottleState(UNTHROTTLED
);
447 } else if (is_paused_
) {
448 SetThrottleState(PAUSED
);
449 } else if (!scheduler_
->active_clients_loaded()) {
450 SetThrottleState(THROTTLED
);
451 } else if (is_loaded_
&& scheduler_
->should_coalesce()) {
452 SetThrottleState(COALESCED
);
453 } else if (!is_active()) {
454 SetThrottleState(UNTHROTTLED
);
457 if (throttle_state_
== old_throttle_state
) {
460 if (throttle_state_
== ACTIVE_AND_LOADING
) {
461 scheduler_
->IncrementActiveClientsLoading();
462 } else if (old_throttle_state
== ACTIVE_AND_LOADING
) {
463 scheduler_
->DecrementActiveClientsLoading();
465 if (throttle_state_
== COALESCED
) {
466 scheduler_
->IncrementCoalescedClients();
467 } else if (old_throttle_state
== COALESCED
) {
468 scheduler_
->DecrementCoalescedClients();
477 void OnWillInsertBody() {
479 LoadAnyStartablePendingRequests();
482 void OnReceivedSpdyProxiedHttpResponse() {
483 if (!using_spdy_proxy_
) {
484 using_spdy_proxy_
= true;
485 LoadAnyStartablePendingRequests();
489 void ReprioritizeRequest(ScheduledResourceRequest
* request
,
490 RequestPriorityParams old_priority_params
,
491 RequestPriorityParams new_priority_params
) {
492 request
->url_request()->SetPriority(new_priority_params
.priority
);
493 request
->set_request_priority_params(new_priority_params
);
494 if (!pending_requests_
.IsQueued(request
)) {
495 DCHECK(ContainsKey(in_flight_requests_
, request
));
496 // The priority of the request and priority support of the server may
497 // have changed, so update the delayable count.
498 SetRequestClassification(request
, ClassifyRequest(request
));
499 // Request has already started.
503 pending_requests_
.Erase(request
);
504 pending_requests_
.Insert(request
);
506 if (new_priority_params
.priority
> old_priority_params
.priority
) {
507 // Check if this request is now able to load at its new priority.
508 LoadAnyStartablePendingRequests();
512 // Called on Client creation, when a Client changes user observability,
513 // possibly when all observable Clients have finished loading, and
514 // possibly when this Client has finished loading.
516 // Client became observable.
517 // any state -> UNTHROTTLED
518 // Client is unobservable, but all observable clients finished loading.
519 // THROTTLED -> UNTHROTTLED
520 // Non-observable client finished loading.
521 // THROTTLED || UNTHROTTLED -> COALESCED
522 // Non-observable client, an observable client starts loading.
523 // COALESCED -> THROTTLED
524 // A COALESCED client will transition into UNTHROTTLED when the network is
525 // woken up by a heartbeat and then transition back into COALESCED.
526 void SetThrottleState(ResourceScheduler::ClientThrottleState throttle_state
) {
527 if (throttle_state
== throttle_state_
) {
530 throttle_state_
= throttle_state
;
531 if (throttle_state_
!= PAUSED
) {
534 LoadAnyStartablePendingRequests();
535 // TODO(aiolos): Stop any started but not inflght requests when
536 // switching to stricter throttle state?
539 ResourceScheduler::ClientThrottleState
throttle_state() const {
540 return throttle_state_
;
543 void LoadCoalescedRequests() {
544 if (throttle_state_
!= COALESCED
) {
547 if (scheduler_
->active_clients_loaded()) {
548 SetThrottleState(UNTHROTTLED
);
550 SetThrottleState(THROTTLED
);
552 LoadAnyStartablePendingRequests();
553 SetThrottleState(COALESCED
);
557 enum ShouldStartReqResult
{
558 DO_NOT_START_REQUEST_AND_STOP_SEARCHING
,
559 DO_NOT_START_REQUEST_AND_KEEP_SEARCHING
,
563 void InsertInFlightRequest(ScheduledResourceRequest
* request
) {
564 in_flight_requests_
.insert(request
);
565 SetRequestClassification(request
, ClassifyRequest(request
));
568 void EraseInFlightRequest(ScheduledResourceRequest
* request
) {
569 size_t erased
= in_flight_requests_
.erase(request
);
570 DCHECK_EQ(1u, erased
);
571 // Clear any special state that we were tracking for this request.
572 SetRequestClassification(request
, NORMAL_REQUEST
);
575 void ClearInFlightRequests() {
576 in_flight_requests_
.clear();
577 in_flight_delayable_count_
= 0;
578 total_layout_blocking_count_
= 0;
581 size_t CountRequestsWithClassification(
582 const RequestClassification classification
, const bool include_pending
) {
583 size_t classification_request_count
= 0;
584 for (RequestSet::const_iterator it
= in_flight_requests_
.begin();
585 it
!= in_flight_requests_
.end(); ++it
) {
586 if ((*it
)->classification() == classification
)
587 classification_request_count
++;
589 if (include_pending
) {
590 for (RequestQueue::NetQueue::const_iterator
591 it
= pending_requests_
.GetNextHighestIterator();
592 it
!= pending_requests_
.End(); ++it
) {
593 if ((*it
)->classification() == classification
)
594 classification_request_count
++;
597 return classification_request_count
;
600 void SetRequestClassification(ScheduledResourceRequest
* request
,
601 RequestClassification classification
) {
602 RequestClassification old_classification
= request
->classification();
603 if (old_classification
== classification
)
606 if (old_classification
== IN_FLIGHT_DELAYABLE_REQUEST
)
607 in_flight_delayable_count_
--;
608 if (old_classification
== LAYOUT_BLOCKING_REQUEST
)
609 total_layout_blocking_count_
--;
611 if (classification
== IN_FLIGHT_DELAYABLE_REQUEST
)
612 in_flight_delayable_count_
++;
613 if (classification
== LAYOUT_BLOCKING_REQUEST
)
614 total_layout_blocking_count_
++;
616 request
->set_classification(classification
);
618 CountRequestsWithClassification(IN_FLIGHT_DELAYABLE_REQUEST
, false),
619 in_flight_delayable_count_
);
620 DCHECK_EQ(CountRequestsWithClassification(LAYOUT_BLOCKING_REQUEST
, true),
621 total_layout_blocking_count_
);
624 RequestClassification
ClassifyRequest(ScheduledResourceRequest
* request
) {
625 // If a request is already marked as layout-blocking make sure to keep the
626 // classification across redirects unless the priority was lowered.
627 if (request
->classification() == LAYOUT_BLOCKING_REQUEST
&&
628 request
->url_request()->priority() > net::LOW
) {
629 return LAYOUT_BLOCKING_REQUEST
;
632 if (!has_body_
&& request
->url_request()->priority() > net::LOW
)
633 return LAYOUT_BLOCKING_REQUEST
;
635 if (request
->url_request()->priority() < net::LOW
) {
636 net::HostPortPair host_port_pair
=
637 net::HostPortPair::FromURL(request
->url_request()->url());
638 net::HttpServerProperties
& http_server_properties
=
639 *request
->url_request()->context()->http_server_properties();
640 if (!http_server_properties
.SupportsRequestPriority(host_port_pair
) &&
641 ContainsKey(in_flight_requests_
, request
)) {
642 return IN_FLIGHT_DELAYABLE_REQUEST
;
645 return NORMAL_REQUEST
;
648 bool ShouldKeepSearching(
649 const net::HostPortPair
& active_request_host
) const {
650 size_t same_host_count
= 0;
651 for (RequestSet::const_iterator it
= in_flight_requests_
.begin();
652 it
!= in_flight_requests_
.end(); ++it
) {
653 net::HostPortPair host_port_pair
=
654 net::HostPortPair::FromURL((*it
)->url_request()->url());
655 if (active_request_host
.Equals(host_port_pair
)) {
657 if (same_host_count
>= kMaxNumDelayableRequestsPerHost
)
664 void StartRequest(ScheduledResourceRequest
* request
) {
665 InsertInFlightRequest(request
);
669 // ShouldStartRequest is the main scheduling algorithm.
671 // Requests are evaluated on five attributes:
673 // 1. Non-delayable requests:
674 // * Synchronous requests.
675 // * Non-HTTP[S] requests.
677 // 2. Requests to request-priority-capable origin servers.
679 // 3. High-priority requests:
680 // * Higher priority requests (>= net::LOW).
682 // 4. Layout-blocking requests:
683 // * High-priority requests (> net::LOW) initiated before the renderer has
686 // 5. Low priority requests
688 // The following rules are followed:
690 // All types of requests:
691 // * If an outstanding request limit is in place, only that number
692 // of requests may be in flight for a single client at the same time.
694 // ACTIVE_AND_LOADING and UNTHROTTLED Clients follow these rules:
695 // * Non-delayable, High-priority and request-priority capable requests are
696 // issued immediately.
697 // * Low priority requests are delayable.
698 // * While layout-blocking requests are loading or the body tag has not
699 // yet been parsed, limit the number of delayable requests that may be
700 // in flight (to 1 by default, or to zero if there's an outstanding
701 // request limit in place).
702 // * If no high priority or layout-blocking requests are in flight, start
703 // loading delayable requests.
704 // * Never exceed 10 delayable requests in flight per client.
705 // * Never exceed 6 delayable requests for a given host.
707 // THROTTLED Clients follow these rules:
708 // * Non-delayable and request-priority-capable requests are issued
710 // * At most one non-request-priority-capable request will be issued per
712 // * If no high priority requests are in flight, start loading low priority
715 // COALESCED Clients never load requests, with the following exceptions:
716 // * Non-delayable requests are issued imediately.
717 // * On a (currently 5 second) heart beat, they load all requests as an
718 // UNTHROTTLED Client, and then return to the COALESCED state.
719 // * When an active Client makes a request, they are THROTTLED until the
720 // active Client finishes loading.
721 ShouldStartReqResult
ShouldStartRequest(
722 ScheduledResourceRequest
* request
) const {
723 const net::URLRequest
& url_request
= *request
->url_request();
724 // Syncronous requests could block the entire render, which could impact
725 // user-observable Clients.
726 if (!ResourceRequestInfo::ForRequest(&url_request
)->IsAsync()) {
727 return START_REQUEST
;
730 // TODO(simonjam): This may end up causing disk contention. We should
731 // experiment with throttling if that happens.
732 // TODO(aiolos): We probably want to Coalesce these as well to avoid
734 if (!url_request
.url().SchemeIsHTTPOrHTTPS()) {
735 return START_REQUEST
;
738 if (throttle_state_
== COALESCED
) {
739 return DO_NOT_START_REQUEST_AND_STOP_SEARCHING
;
742 if (using_spdy_proxy_
&& url_request
.url().SchemeIs(url::kHttpScheme
)) {
743 return START_REQUEST
;
746 // Implementation of the kRequestLimitFieldTrial.
747 if (scheduler_
->limit_outstanding_requests() &&
748 in_flight_requests_
.size() >= scheduler_
->outstanding_request_limit()) {
749 return DO_NOT_START_REQUEST_AND_STOP_SEARCHING
;
752 net::HostPortPair host_port_pair
=
753 net::HostPortPair::FromURL(url_request
.url());
754 net::HttpServerProperties
& http_server_properties
=
755 *url_request
.context()->http_server_properties();
757 // TODO(willchan): We should really improve this algorithm as described in
758 // crbug.com/164101. Also, theoretically we should not count a
759 // request-priority capable request against the delayable requests limit.
760 if (http_server_properties
.SupportsRequestPriority(host_port_pair
)) {
761 return START_REQUEST
;
764 if (throttle_state_
== THROTTLED
&&
765 in_flight_requests_
.size() >= kMaxNumThrottledRequestsPerClient
) {
766 // There may still be request-priority-capable requests that should be
768 return DO_NOT_START_REQUEST_AND_KEEP_SEARCHING
;
771 // High-priority and layout-blocking requests.
772 if (url_request
.priority() >= net::LOW
) {
773 return START_REQUEST
;
776 if (in_flight_delayable_count_
>= kMaxNumDelayableRequestsPerClient
) {
777 return DO_NOT_START_REQUEST_AND_STOP_SEARCHING
;
780 if (ShouldKeepSearching(host_port_pair
)) {
781 // There may be other requests for other hosts we'd allow,
783 return DO_NOT_START_REQUEST_AND_KEEP_SEARCHING
;
786 bool have_immediate_requests_in_flight
=
787 in_flight_requests_
.size() > in_flight_delayable_count_
;
788 if (have_immediate_requests_in_flight
&&
789 (!has_body_
|| total_layout_blocking_count_
!= 0) &&
790 // Do not allow a low priority request through in parallel if
791 // we are in a limit field trial.
792 (scheduler_
->limit_outstanding_requests() ||
793 in_flight_delayable_count_
!= 0)) {
794 return DO_NOT_START_REQUEST_AND_STOP_SEARCHING
;
797 return START_REQUEST
;
800 void LoadAnyStartablePendingRequests() {
801 // We iterate through all the pending requests, starting with the highest
802 // priority one. For each entry, one of three things can happen:
803 // 1) We start the request, remove it from the list, and keep checking.
804 // 2) We do NOT start the request, but ShouldStartRequest() signals us that
805 // there may be room for other requests, so we keep checking and leave
806 // the previous request still in the list.
807 // 3) We do not start the request, same as above, but StartRequest() tells
808 // us there's no point in checking any further requests.
809 RequestQueue::NetQueue::iterator request_iter
=
810 pending_requests_
.GetNextHighestIterator();
812 while (request_iter
!= pending_requests_
.End()) {
813 ScheduledResourceRequest
* request
= *request_iter
;
814 ShouldStartReqResult query_result
= ShouldStartRequest(request
);
816 if (query_result
== START_REQUEST
) {
817 pending_requests_
.Erase(request
);
818 StartRequest(request
);
820 // StartRequest can modify the pending list, so we (re)start evaluation
821 // from the currently highest priority request. Avoid copying a singular
822 // iterator, which would trigger undefined behavior.
823 if (pending_requests_
.GetNextHighestIterator() ==
824 pending_requests_
.End())
826 request_iter
= pending_requests_
.GetNextHighestIterator();
827 } else if (query_result
== DO_NOT_START_REQUEST_AND_KEEP_SEARCHING
) {
831 DCHECK(query_result
== DO_NOT_START_REQUEST_AND_STOP_SEARCHING
);
842 bool using_spdy_proxy_
;
843 RequestQueue pending_requests_
;
844 RequestSet in_flight_requests_
;
845 base::TimeTicks load_started_time_
;
846 // The last time the client switched state between active and background.
847 base::TimeTicks last_active_switch_time_
;
848 ResourceScheduler
* scheduler_
;
849 // The number of delayable in-flight requests.
850 size_t in_flight_delayable_count_
;
851 // The number of layout-blocking in-flight requests.
852 size_t total_layout_blocking_count_
;
853 ResourceScheduler::ClientThrottleState throttle_state_
;
856 ResourceScheduler::ResourceScheduler()
857 : should_coalesce_(false),
858 should_throttle_(false),
859 active_clients_loading_(0),
860 coalesced_clients_(0),
861 limit_outstanding_requests_(false),
862 outstanding_request_limit_(0),
863 coalescing_timer_(new base::Timer(true /* retain_user_task */,
864 true /* is_repeating */)) {
865 std::string throttling_trial_group
=
866 base::FieldTrialList::FindFullName(kThrottleCoalesceFieldTrial
);
867 if (throttling_trial_group
== kThrottleCoalesceFieldTrialThrottle
) {
868 should_throttle_
= true;
869 } else if (throttling_trial_group
== kThrottleCoalesceFieldTrialCoalesce
) {
870 should_coalesce_
= true;
871 should_throttle_
= true;
874 std::string outstanding_limit_trial_group
=
875 base::FieldTrialList::FindFullName(kRequestLimitFieldTrial
);
876 std::vector
<std::string
> split_group(
877 base::SplitString(outstanding_limit_trial_group
, "=",
878 base::KEEP_WHITESPACE
, base::SPLIT_WANT_ALL
));
879 int outstanding_limit
= 0;
880 if (split_group
.size() == 2 &&
881 split_group
[0] == kRequestLimitFieldTrialGroupPrefix
&&
882 base::StringToInt(split_group
[1], &outstanding_limit
) &&
883 outstanding_limit
> 0) {
884 limit_outstanding_requests_
= true;
885 outstanding_request_limit_
= outstanding_limit
;
889 ResourceScheduler::~ResourceScheduler() {
890 DCHECK(unowned_requests_
.empty());
891 DCHECK(client_map_
.empty());
894 void ResourceScheduler::SetThrottleOptionsForTesting(bool should_throttle
,
895 bool should_coalesce
) {
896 should_coalesce_
= should_coalesce
;
897 should_throttle_
= should_throttle
;
898 OnLoadingActiveClientsStateChangedForAllClients();
901 ResourceScheduler::ClientThrottleState
902 ResourceScheduler::GetClientStateForTesting(int child_id
, int route_id
) {
903 Client
* client
= GetClient(child_id
, route_id
);
905 return client
->throttle_state();
908 scoped_ptr
<ResourceThrottle
> ResourceScheduler::ScheduleRequest(
911 net::URLRequest
* url_request
) {
912 DCHECK(CalledOnValidThread());
913 ClientId client_id
= MakeClientId(child_id
, route_id
);
914 scoped_ptr
<ScheduledResourceRequest
> request(new ScheduledResourceRequest(
918 RequestPriorityParams(url_request
->priority(), 0)));
920 ClientMap::iterator it
= client_map_
.find(client_id
);
921 if (it
== client_map_
.end()) {
922 // There are several ways this could happen:
923 // 1. <a ping> requests don't have a route_id.
924 // 2. Most unittests don't send the IPCs needed to register Clients.
925 // 3. The tab is closed while a RequestResource IPC is in flight.
926 unowned_requests_
.insert(request
.get());
928 return request
.Pass();
931 Client
* client
= it
->second
;
932 client
->ScheduleRequest(url_request
, request
.get());
933 return request
.Pass();
936 void ResourceScheduler::RemoveRequest(ScheduledResourceRequest
* request
) {
937 DCHECK(CalledOnValidThread());
938 if (ContainsKey(unowned_requests_
, request
)) {
939 unowned_requests_
.erase(request
);
943 ClientMap::iterator client_it
= client_map_
.find(request
->client_id());
944 if (client_it
== client_map_
.end()) {
948 Client
* client
= client_it
->second
;
949 client
->RemoveRequest(request
);
952 void ResourceScheduler::OnClientCreated(int child_id
,
956 DCHECK(CalledOnValidThread());
957 ClientId client_id
= MakeClientId(child_id
, route_id
);
958 DCHECK(!ContainsKey(client_map_
, client_id
));
960 Client
* client
= new Client(this, is_visible
, is_audible
);
961 client_map_
[client_id
] = client
;
963 client
->UpdateThrottleState();
966 void ResourceScheduler::OnClientDeleted(int child_id
, int route_id
) {
967 DCHECK(CalledOnValidThread());
968 ClientId client_id
= MakeClientId(child_id
, route_id
);
969 DCHECK(ContainsKey(client_map_
, client_id
));
970 ClientMap::iterator it
= client_map_
.find(client_id
);
971 if (it
== client_map_
.end())
974 Client
* client
= it
->second
;
975 // ResourceDispatcherHost cancels all requests except for cross-renderer
976 // navigations, async revalidations and detachable requests after
977 // OnClientDeleted() returns.
978 RequestSet client_unowned_requests
= client
->StartAndRemoveAllRequests();
979 for (RequestSet::iterator it
= client_unowned_requests
.begin();
980 it
!= client_unowned_requests
.end(); ++it
) {
981 unowned_requests_
.insert(*it
);
985 client_map_
.erase(it
);
988 void ResourceScheduler::OnLoadingStateChanged(int child_id
,
991 Client
* client
= GetClient(child_id
, route_id
);
993 client
->OnLoadingStateChanged(is_loaded
);
996 void ResourceScheduler::OnVisibilityChanged(int child_id
,
999 Client
* client
= GetClient(child_id
, route_id
);
1001 client
->OnVisibilityChanged(is_visible
);
1004 void ResourceScheduler::OnAudibilityChanged(int child_id
,
1007 Client
* client
= GetClient(child_id
, route_id
);
1008 // We might get this call after the client has been deleted.
1010 client
->OnAudibilityChanged(is_audible
);
1013 void ResourceScheduler::OnNavigate(int child_id
, int route_id
) {
1014 DCHECK(CalledOnValidThread());
1015 ClientId client_id
= MakeClientId(child_id
, route_id
);
1017 ClientMap::iterator it
= client_map_
.find(client_id
);
1018 if (it
== client_map_
.end()) {
1019 // The client was likely deleted shortly before we received this IPC.
1023 Client
* client
= it
->second
;
1024 client
->OnNavigate();
1027 void ResourceScheduler::OnWillInsertBody(int child_id
, int route_id
) {
1028 DCHECK(CalledOnValidThread());
1029 ClientId client_id
= MakeClientId(child_id
, route_id
);
1031 ClientMap::iterator it
= client_map_
.find(client_id
);
1032 if (it
== client_map_
.end()) {
1033 // The client was likely deleted shortly before we received this IPC.
1037 Client
* client
= it
->second
;
1038 client
->OnWillInsertBody();
1041 void ResourceScheduler::OnReceivedSpdyProxiedHttpResponse(
1044 DCHECK(CalledOnValidThread());
1045 ClientId client_id
= MakeClientId(child_id
, route_id
);
1047 ClientMap::iterator client_it
= client_map_
.find(client_id
);
1048 if (client_it
== client_map_
.end()) {
1052 Client
* client
= client_it
->second
;
1053 client
->OnReceivedSpdyProxiedHttpResponse();
1056 bool ResourceScheduler::IsClientVisibleForTesting(int child_id
, int route_id
) {
1057 Client
* client
= GetClient(child_id
, route_id
);
1059 return client
->is_visible();
1062 bool ResourceScheduler::HasLoadingClients() const {
1063 for (const auto& client
: client_map_
) {
1064 if (!client
.second
->is_loaded())
1070 ResourceScheduler::Client
* ResourceScheduler::GetClient(int child_id
,
1072 ClientId client_id
= MakeClientId(child_id
, route_id
);
1073 ClientMap::iterator client_it
= client_map_
.find(client_id
);
1074 if (client_it
== client_map_
.end()) {
1077 return client_it
->second
;
1080 void ResourceScheduler::DecrementActiveClientsLoading() {
1081 DCHECK_NE(0u, active_clients_loading_
);
1082 --active_clients_loading_
;
1083 DCHECK_EQ(active_clients_loading_
, CountActiveClientsLoading());
1084 if (active_clients_loading_
== 0) {
1085 OnLoadingActiveClientsStateChangedForAllClients();
1089 void ResourceScheduler::IncrementActiveClientsLoading() {
1090 ++active_clients_loading_
;
1091 DCHECK_EQ(active_clients_loading_
, CountActiveClientsLoading());
1092 if (active_clients_loading_
== 1) {
1093 OnLoadingActiveClientsStateChangedForAllClients();
1097 void ResourceScheduler::OnLoadingActiveClientsStateChangedForAllClients() {
1098 ClientMap::iterator client_it
= client_map_
.begin();
1099 while (client_it
!= client_map_
.end()) {
1100 Client
* client
= client_it
->second
;
1101 client
->UpdateThrottleState();
1106 size_t ResourceScheduler::CountActiveClientsLoading() const {
1107 size_t active_and_loading
= 0;
1108 ClientMap::const_iterator client_it
= client_map_
.begin();
1109 while (client_it
!= client_map_
.end()) {
1110 Client
* client
= client_it
->second
;
1111 if (client
->throttle_state() == ACTIVE_AND_LOADING
) {
1112 ++active_and_loading
;
1116 return active_and_loading
;
1119 void ResourceScheduler::IncrementCoalescedClients() {
1120 ++coalesced_clients_
;
1121 DCHECK(should_coalesce_
);
1122 DCHECK_EQ(coalesced_clients_
, CountCoalescedClients());
1123 if (coalesced_clients_
== 1) {
1124 coalescing_timer_
->Start(
1126 base::TimeDelta::FromMilliseconds(kCoalescedTimerPeriod
),
1127 base::Bind(&ResourceScheduler::LoadCoalescedRequests
,
1128 base::Unretained(this)));
1132 void ResourceScheduler::DecrementCoalescedClients() {
1133 DCHECK(should_coalesce_
);
1134 DCHECK_NE(0U, coalesced_clients_
);
1135 --coalesced_clients_
;
1136 DCHECK_EQ(coalesced_clients_
, CountCoalescedClients());
1137 if (coalesced_clients_
== 0) {
1138 coalescing_timer_
->Stop();
1142 size_t ResourceScheduler::CountCoalescedClients() const {
1143 DCHECK(should_coalesce_
);
1144 size_t coalesced_clients
= 0;
1145 ClientMap::const_iterator client_it
= client_map_
.begin();
1146 while (client_it
!= client_map_
.end()) {
1147 Client
* client
= client_it
->second
;
1148 if (client
->throttle_state() == COALESCED
) {
1149 ++coalesced_clients
;
1153 return coalesced_clients_
;
1156 void ResourceScheduler::LoadCoalescedRequests() {
1157 DCHECK(should_coalesce_
);
1158 ClientMap::iterator client_it
= client_map_
.begin();
1159 while (client_it
!= client_map_
.end()) {
1160 Client
* client
= client_it
->second
;
1161 client
->LoadCoalescedRequests();
1166 ResourceScheduler::ClientState
ResourceScheduler::GetClientState(
1167 ClientId client_id
) const {
1168 ClientMap::const_iterator client_it
= client_map_
.find(client_id
);
1169 if (client_it
== client_map_
.end())
1171 return client_it
->second
->is_active() ? ACTIVE
: BACKGROUND
;
1174 void ResourceScheduler::ReprioritizeRequest(ScheduledResourceRequest
* request
,
1175 net::RequestPriority new_priority
,
1176 int new_intra_priority_value
) {
1177 if (request
->url_request()->load_flags() & net::LOAD_IGNORE_LIMITS
) {
1178 // We should not be re-prioritizing requests with the
1179 // IGNORE_LIMITS flag.
1183 RequestPriorityParams
new_priority_params(new_priority
,
1184 new_intra_priority_value
);
1185 RequestPriorityParams old_priority_params
=
1186 request
->get_request_priority_params();
1188 DCHECK(old_priority_params
!= new_priority_params
);
1190 ClientMap::iterator client_it
= client_map_
.find(request
->client_id());
1191 if (client_it
== client_map_
.end()) {
1192 // The client was likely deleted shortly before we received this IPC.
1193 request
->url_request()->SetPriority(new_priority_params
.priority
);
1194 request
->set_request_priority_params(new_priority_params
);
1198 if (old_priority_params
== new_priority_params
)
1201 Client
*client
= client_it
->second
;
1202 client
->ReprioritizeRequest(
1203 request
, old_priority_params
, new_priority_params
);
1206 ResourceScheduler::ClientId
ResourceScheduler::MakeClientId(
1207 int child_id
, int route_id
) {
1208 return (static_cast<ResourceScheduler::ClientId
>(child_id
) << 32) | route_id
;
1211 } // namespace content