1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/socket/client_socket_pool_base.h"
7 #include "base/compiler_specific.h"
8 #include "base/format_macros.h"
9 #include "base/logging.h"
10 #include "base/message_loop/message_loop.h"
11 #include "base/stl_util.h"
12 #include "base/strings/string_util.h"
13 #include "base/time/time.h"
14 #include "base/values.h"
15 #include "net/base/net_errors.h"
16 #include "net/log/net_log.h"
18 using base::TimeDelta
;
24 // Indicate whether we should enable idle socket cleanup timer. When timer is
25 // disabled, sockets are closed next time a socket request is made.
26 bool g_cleanup_timer_enabled
= true;
28 // The timeout value, in seconds, used to clean up idle sockets that can't be
31 // Note: It's important to close idle sockets that have received data as soon
32 // as possible because the received data may cause BSOD on Windows XP under
33 // some conditions. See http://crbug.com/4606.
34 const int kCleanupInterval
= 10; // DO NOT INCREASE THIS TIMEOUT.
36 // Indicate whether or not we should establish a new transport layer connection
37 // after a certain timeout has passed without receiving an ACK.
38 bool g_connect_backup_jobs_enabled
= true;
42 ConnectJob::ConnectJob(const std::string
& group_name
,
43 base::TimeDelta timeout_duration
,
44 RequestPriority priority
,
46 const BoundNetLog
& net_log
)
47 : group_name_(group_name
),
48 timeout_duration_(timeout_duration
),
53 DCHECK(!group_name
.empty());
55 net_log
.BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB
,
56 NetLog::StringCallback("group_name", &group_name_
));
59 ConnectJob::~ConnectJob() {
60 net_log().EndEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB
);
63 scoped_ptr
<StreamSocket
> ConnectJob::PassSocket() {
64 return socket_
.Pass();
67 int ConnectJob::Connect() {
68 if (timeout_duration_
!= base::TimeDelta())
69 timer_
.Start(FROM_HERE
, timeout_duration_
, this, &ConnectJob::OnTimeout
);
75 int rv
= ConnectInternal();
77 if (rv
!= ERR_IO_PENDING
) {
78 LogConnectCompletion(rv
);
85 void ConnectJob::SetSocket(scoped_ptr
<StreamSocket
> socket
) {
87 net_log().AddEvent(NetLog::TYPE_CONNECT_JOB_SET_SOCKET
,
88 socket
->NetLog().source().ToEventParametersCallback());
90 socket_
= socket
.Pass();
93 void ConnectJob::NotifyDelegateOfCompletion(int rv
) {
94 // The delegate will own |this|.
95 Delegate
* delegate
= delegate_
;
98 LogConnectCompletion(rv
);
99 delegate
->OnConnectJobComplete(rv
, this);
102 void ConnectJob::ResetTimer(base::TimeDelta remaining_time
) {
104 timer_
.Start(FROM_HERE
, remaining_time
, this, &ConnectJob::OnTimeout
);
107 void ConnectJob::LogConnectStart() {
108 connect_timing_
.connect_start
= base::TimeTicks::Now();
109 net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT
);
112 void ConnectJob::LogConnectCompletion(int net_error
) {
113 connect_timing_
.connect_end
= base::TimeTicks::Now();
114 net_log().EndEventWithNetErrorCode(
115 NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT
, net_error
);
118 void ConnectJob::OnTimeout() {
119 // Make sure the socket is NULL before calling into |delegate|.
120 SetSocket(scoped_ptr
<StreamSocket
>());
122 net_log_
.AddEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_TIMED_OUT
);
124 NotifyDelegateOfCompletion(ERR_TIMED_OUT
);
129 ClientSocketPoolBaseHelper::Request::Request(
130 ClientSocketHandle
* handle
,
131 const CompletionCallback
& callback
,
132 RequestPriority priority
,
135 const BoundNetLog
& net_log
)
139 ignore_limits_(ignore_limits
),
143 DCHECK_EQ(priority_
, MAXIMUM_PRIORITY
);
146 ClientSocketPoolBaseHelper::Request::~Request() {}
148 ClientSocketPoolBaseHelper::ClientSocketPoolBaseHelper(
149 HigherLayeredPool
* pool
,
151 int max_sockets_per_group
,
152 base::TimeDelta unused_idle_socket_timeout
,
153 base::TimeDelta used_idle_socket_timeout
,
154 ConnectJobFactory
* connect_job_factory
)
155 : idle_socket_count_(0),
156 connecting_socket_count_(0),
157 handed_out_socket_count_(0),
158 max_sockets_(max_sockets
),
159 max_sockets_per_group_(max_sockets_per_group
),
160 use_cleanup_timer_(g_cleanup_timer_enabled
),
161 unused_idle_socket_timeout_(unused_idle_socket_timeout
),
162 used_idle_socket_timeout_(used_idle_socket_timeout
),
163 connect_job_factory_(connect_job_factory
),
164 connect_backup_jobs_enabled_(false),
165 pool_generation_number_(0),
167 weak_factory_(this) {
168 DCHECK_LE(0, max_sockets_per_group
);
169 DCHECK_LE(max_sockets_per_group
, max_sockets
);
171 NetworkChangeNotifier::AddIPAddressObserver(this);
174 ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() {
175 // Clean up any idle sockets and pending connect jobs. Assert that we have no
176 // remaining active sockets or pending requests. They should have all been
177 // cleaned up prior to |this| being destroyed.
178 FlushWithError(ERR_ABORTED
);
179 DCHECK(group_map_
.empty());
180 DCHECK(pending_callback_map_
.empty());
181 DCHECK_EQ(0, connecting_socket_count_
);
182 CHECK(higher_pools_
.empty());
184 NetworkChangeNotifier::RemoveIPAddressObserver(this);
186 // Remove from lower layer pools.
187 for (std::set
<LowerLayeredPool
*>::iterator it
= lower_pools_
.begin();
188 it
!= lower_pools_
.end();
190 (*it
)->RemoveHigherLayeredPool(pool_
);
194 ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair()
198 ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair(
199 const CompletionCallback
& callback_in
, int result_in
)
200 : callback(callback_in
),
204 ClientSocketPoolBaseHelper::CallbackResultPair::~CallbackResultPair() {}
206 bool ClientSocketPoolBaseHelper::IsStalled() const {
207 // If a lower layer pool is stalled, consider |this| stalled as well.
208 for (std::set
<LowerLayeredPool
*>::const_iterator it
= lower_pools_
.begin();
209 it
!= lower_pools_
.end();
211 if ((*it
)->IsStalled())
215 // If fewer than |max_sockets_| are in use, then clearly |this| is not
217 if ((handed_out_socket_count_
+ connecting_socket_count_
) < max_sockets_
)
219 // So in order to be stalled, |this| must be using at least |max_sockets_| AND
220 // |this| must have a request that is actually stalled on the global socket
221 // limit. To find such a request, look for a group that has more requests
222 // than jobs AND where the number of sockets is less than
223 // |max_sockets_per_group_|. (If the number of sockets is equal to
224 // |max_sockets_per_group_|, then the request is stalled on the group limit,
225 // which does not count.)
226 for (GroupMap::const_iterator it
= group_map_
.begin();
227 it
!= group_map_
.end(); ++it
) {
228 if (it
->second
->CanUseAdditionalSocketSlot(max_sockets_per_group_
))
234 void ClientSocketPoolBaseHelper::AddLowerLayeredPool(
235 LowerLayeredPool
* lower_pool
) {
237 CHECK(!ContainsKey(lower_pools_
, lower_pool
));
238 lower_pools_
.insert(lower_pool
);
239 lower_pool
->AddHigherLayeredPool(pool_
);
242 void ClientSocketPoolBaseHelper::AddHigherLayeredPool(
243 HigherLayeredPool
* higher_pool
) {
245 CHECK(!ContainsKey(higher_pools_
, higher_pool
));
246 higher_pools_
.insert(higher_pool
);
249 void ClientSocketPoolBaseHelper::RemoveHigherLayeredPool(
250 HigherLayeredPool
* higher_pool
) {
252 CHECK(ContainsKey(higher_pools_
, higher_pool
));
253 higher_pools_
.erase(higher_pool
);
256 int ClientSocketPoolBaseHelper::RequestSocket(
257 const std::string
& group_name
,
258 scoped_ptr
<const Request
> request
) {
259 CHECK(!request
->callback().is_null());
260 CHECK(request
->handle());
262 // Cleanup any timed-out idle sockets if no timer is used.
263 if (!use_cleanup_timer_
)
264 CleanupIdleSockets(false);
266 request
->net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL
);
267 Group
* group
= GetOrCreateGroup(group_name
);
269 int rv
= RequestSocketInternal(group_name
, *request
);
270 if (rv
!= ERR_IO_PENDING
) {
271 request
->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL
, rv
);
272 CHECK(!request
->handle()->is_initialized());
275 group
->InsertPendingRequest(request
.Pass());
276 // Have to do this asynchronously, as closing sockets in higher level pools
277 // call back in to |this|, which will cause all sorts of fun and exciting
278 // re-entrancy issues if the socket pool is doing something else at the
280 if (group
->CanUseAdditionalSocketSlot(max_sockets_per_group_
)) {
281 base::MessageLoop::current()->PostTask(
284 &ClientSocketPoolBaseHelper::TryToCloseSocketsInLayeredPools
,
285 weak_factory_
.GetWeakPtr()));
291 void ClientSocketPoolBaseHelper::RequestSockets(
292 const std::string
& group_name
,
293 const Request
& request
,
295 DCHECK(request
.callback().is_null());
296 DCHECK(!request
.handle());
298 // Cleanup any timed out idle sockets if no timer is used.
299 if (!use_cleanup_timer_
)
300 CleanupIdleSockets(false);
302 if (num_sockets
> max_sockets_per_group_
) {
303 num_sockets
= max_sockets_per_group_
;
306 request
.net_log().BeginEvent(
307 NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS
,
308 NetLog::IntegerCallback("num_sockets", num_sockets
));
310 Group
* group
= GetOrCreateGroup(group_name
);
312 // RequestSocketsInternal() may delete the group.
313 bool deleted_group
= false;
316 for (int num_iterations_left
= num_sockets
;
317 group
->NumActiveSocketSlots() < num_sockets
&&
318 num_iterations_left
> 0 ; num_iterations_left
--) {
319 rv
= RequestSocketInternal(group_name
, request
);
320 if (rv
< 0 && rv
!= ERR_IO_PENDING
) {
321 // We're encountering a synchronous error. Give up.
322 if (!ContainsKey(group_map_
, group_name
))
323 deleted_group
= true;
326 if (!ContainsKey(group_map_
, group_name
)) {
327 // Unexpected. The group should only be getting deleted on synchronous
330 deleted_group
= true;
335 if (!deleted_group
&& group
->IsEmpty())
336 RemoveGroup(group_name
);
338 if (rv
== ERR_IO_PENDING
)
340 request
.net_log().EndEventWithNetErrorCode(
341 NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS
, rv
);
344 int ClientSocketPoolBaseHelper::RequestSocketInternal(
345 const std::string
& group_name
,
346 const Request
& request
) {
347 ClientSocketHandle
* const handle
= request
.handle();
348 const bool preconnecting
= !handle
;
349 Group
* group
= GetOrCreateGroup(group_name
);
351 if (!(request
.flags() & NO_IDLE_SOCKETS
)) {
352 // Try to reuse a socket.
353 if (AssignIdleSocketToRequest(request
, group
))
357 // If there are more ConnectJobs than pending requests, don't need to do
358 // anything. Can just wait for the extra job to connect, and then assign it
360 if (!preconnecting
&& group
->TryToUseUnassignedConnectJob())
361 return ERR_IO_PENDING
;
363 // Can we make another active socket now?
364 if (!group
->HasAvailableSocketSlot(max_sockets_per_group_
) &&
365 !request
.ignore_limits()) {
366 // TODO(willchan): Consider whether or not we need to close a socket in a
367 // higher layered group. I don't think this makes sense since we would just
368 // reuse that socket then if we needed one and wouldn't make it down to this
370 request
.net_log().AddEvent(
371 NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP
);
372 return ERR_IO_PENDING
;
375 if (ReachedMaxSocketsLimit() && !request
.ignore_limits()) {
376 // NOTE(mmenke): Wonder if we really need different code for each case
377 // here. Only reason for them now seems to be preconnects.
378 if (idle_socket_count() > 0) {
379 // There's an idle socket in this pool. Either that's because there's
380 // still one in this group, but we got here due to preconnecting bypassing
381 // idle sockets, or because there's an idle socket in another group.
382 bool closed
= CloseOneIdleSocketExceptInGroup(group
);
383 if (preconnecting
&& !closed
)
384 return ERR_PRECONNECT_MAX_SOCKET_LIMIT
;
386 // We could check if we really have a stalled group here, but it requires
387 // a scan of all groups, so just flip a flag here, and do the check later.
388 request
.net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS
);
389 return ERR_IO_PENDING
;
393 // We couldn't find a socket to reuse, and there's space to allocate one,
394 // so allocate and connect a new one.
395 scoped_ptr
<ConnectJob
> connect_job(
396 connect_job_factory_
->NewConnectJob(group_name
, request
, this));
398 int rv
= connect_job
->Connect();
400 LogBoundConnectJobToRequest(connect_job
->net_log().source(), request
);
401 if (!preconnecting
) {
402 HandOutSocket(connect_job
->PassSocket(), ClientSocketHandle::UNUSED
,
403 connect_job
->connect_timing(), handle
, base::TimeDelta(),
404 group
, request
.net_log());
406 AddIdleSocket(connect_job
->PassSocket(), group
);
408 } else if (rv
== ERR_IO_PENDING
) {
409 // If we don't have any sockets in this group, set a timer for potentially
410 // creating a new one. If the SYN is lost, this backup socket may complete
411 // before the slow socket, improving end user latency.
412 if (connect_backup_jobs_enabled_
&& group
->IsEmpty()) {
413 group
->StartBackupJobTimer(group_name
, this);
416 connecting_socket_count_
++;
418 group
->AddJob(connect_job
.Pass(), preconnecting
);
420 LogBoundConnectJobToRequest(connect_job
->net_log().source(), request
);
421 scoped_ptr
<StreamSocket
> error_socket
;
422 if (!preconnecting
) {
424 connect_job
->GetAdditionalErrorState(handle
);
425 error_socket
= connect_job
->PassSocket();
428 HandOutSocket(error_socket
.Pass(), ClientSocketHandle::UNUSED
,
429 connect_job
->connect_timing(), handle
, base::TimeDelta(),
430 group
, request
.net_log());
431 } else if (group
->IsEmpty()) {
432 RemoveGroup(group_name
);
439 bool ClientSocketPoolBaseHelper::AssignIdleSocketToRequest(
440 const Request
& request
, Group
* group
) {
441 std::list
<IdleSocket
>* idle_sockets
= group
->mutable_idle_sockets();
442 std::list
<IdleSocket
>::iterator idle_socket_it
= idle_sockets
->end();
444 // Iterate through the idle sockets forwards (oldest to newest)
445 // * Delete any disconnected ones.
446 // * If we find a used idle socket, assign to |idle_socket|. At the end,
447 // the |idle_socket_it| will be set to the newest used idle socket.
448 for (std::list
<IdleSocket
>::iterator it
= idle_sockets
->begin();
449 it
!= idle_sockets
->end();) {
450 if (!it
->IsUsable()) {
451 DecrementIdleCount();
453 it
= idle_sockets
->erase(it
);
457 if (it
->socket
->WasEverUsed()) {
458 // We found one we can reuse!
465 // If we haven't found an idle socket, that means there are no used idle
466 // sockets. Pick the oldest (first) idle socket (FIFO).
468 if (idle_socket_it
== idle_sockets
->end() && !idle_sockets
->empty())
469 idle_socket_it
= idle_sockets
->begin();
471 if (idle_socket_it
!= idle_sockets
->end()) {
472 DecrementIdleCount();
473 base::TimeDelta idle_time
=
474 base::TimeTicks::Now() - idle_socket_it
->start_time
;
475 IdleSocket idle_socket
= *idle_socket_it
;
476 idle_sockets
->erase(idle_socket_it
);
477 // TODO(davidben): If |idle_time| is under some low watermark, consider
478 // treating as UNUSED rather than UNUSED_IDLE. This will avoid
479 // HttpNetworkTransaction retrying on some errors.
480 ClientSocketHandle::SocketReuseType reuse_type
=
481 idle_socket
.socket
->WasEverUsed() ?
482 ClientSocketHandle::REUSED_IDLE
:
483 ClientSocketHandle::UNUSED_IDLE
;
485 scoped_ptr
<StreamSocket
>(idle_socket
.socket
),
487 LoadTimingInfo::ConnectTiming(),
499 void ClientSocketPoolBaseHelper::LogBoundConnectJobToRequest(
500 const NetLog::Source
& connect_job_source
, const Request
& request
) {
501 request
.net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB
,
502 connect_job_source
.ToEventParametersCallback());
505 void ClientSocketPoolBaseHelper::CancelRequest(
506 const std::string
& group_name
, ClientSocketHandle
* handle
) {
507 PendingCallbackMap::iterator callback_it
= pending_callback_map_
.find(handle
);
508 if (callback_it
!= pending_callback_map_
.end()) {
509 int result
= callback_it
->second
.result
;
510 pending_callback_map_
.erase(callback_it
);
511 scoped_ptr
<StreamSocket
> socket
= handle
->PassSocket();
514 socket
->Disconnect();
515 ReleaseSocket(handle
->group_name(), socket
.Pass(), handle
->id());
520 CHECK(ContainsKey(group_map_
, group_name
));
522 Group
* group
= GetOrCreateGroup(group_name
);
524 // Search pending_requests for matching handle.
525 scoped_ptr
<const Request
> request
=
526 group
->FindAndRemovePendingRequest(handle
);
528 request
->net_log().AddEvent(NetLog::TYPE_CANCELLED
);
529 request
->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL
);
531 // We let the job run, unless we're at the socket limit and there is
532 // not another request waiting on the job.
533 if (group
->jobs().size() > group
->pending_request_count() &&
534 ReachedMaxSocketsLimit()) {
535 RemoveConnectJob(*group
->jobs().begin(), group
);
536 CheckForStalledSocketGroups();
541 bool ClientSocketPoolBaseHelper::HasGroup(const std::string
& group_name
) const {
542 return ContainsKey(group_map_
, group_name
);
545 void ClientSocketPoolBaseHelper::CloseIdleSockets() {
546 CleanupIdleSockets(true);
547 DCHECK_EQ(0, idle_socket_count_
);
550 int ClientSocketPoolBaseHelper::IdleSocketCountInGroup(
551 const std::string
& group_name
) const {
552 GroupMap::const_iterator i
= group_map_
.find(group_name
);
553 CHECK(i
!= group_map_
.end());
555 return i
->second
->idle_sockets().size();
558 LoadState
ClientSocketPoolBaseHelper::GetLoadState(
559 const std::string
& group_name
,
560 const ClientSocketHandle
* handle
) const {
561 if (ContainsKey(pending_callback_map_
, handle
))
562 return LOAD_STATE_CONNECTING
;
564 GroupMap::const_iterator group_it
= group_map_
.find(group_name
);
565 if (group_it
== group_map_
.end()) {
566 // TODO(mmenke): This is actually reached in the wild, for unknown reasons.
567 // Would be great to understand why, and if it's a bug, fix it. If not,
568 // should have a test for that case.
570 return LOAD_STATE_IDLE
;
573 const Group
& group
= *group_it
->second
;
574 if (group
.HasConnectJobForHandle(handle
)) {
575 // Just return the state of the farthest along ConnectJob for the first
576 // group.jobs().size() pending requests.
577 LoadState max_state
= LOAD_STATE_IDLE
;
578 for (const auto& job
: group
.jobs()) {
579 max_state
= std::max(max_state
, job
->GetLoadState());
584 if (group
.CanUseAdditionalSocketSlot(max_sockets_per_group_
))
585 return LOAD_STATE_WAITING_FOR_STALLED_SOCKET_POOL
;
586 return LOAD_STATE_WAITING_FOR_AVAILABLE_SOCKET
;
589 base::DictionaryValue
* ClientSocketPoolBaseHelper::GetInfoAsValue(
590 const std::string
& name
, const std::string
& type
) const {
591 base::DictionaryValue
* dict
= new base::DictionaryValue();
592 dict
->SetString("name", name
);
593 dict
->SetString("type", type
);
594 dict
->SetInteger("handed_out_socket_count", handed_out_socket_count_
);
595 dict
->SetInteger("connecting_socket_count", connecting_socket_count_
);
596 dict
->SetInteger("idle_socket_count", idle_socket_count_
);
597 dict
->SetInteger("max_socket_count", max_sockets_
);
598 dict
->SetInteger("max_sockets_per_group", max_sockets_per_group_
);
599 dict
->SetInteger("pool_generation_number", pool_generation_number_
);
601 if (group_map_
.empty())
604 base::DictionaryValue
* all_groups_dict
= new base::DictionaryValue();
605 for (GroupMap::const_iterator it
= group_map_
.begin();
606 it
!= group_map_
.end(); it
++) {
607 const Group
* group
= it
->second
;
608 base::DictionaryValue
* group_dict
= new base::DictionaryValue();
610 group_dict
->SetInteger("pending_request_count",
611 group
->pending_request_count());
612 if (group
->has_pending_requests()) {
613 group_dict
->SetString(
614 "top_pending_priority",
615 RequestPriorityToString(group
->TopPendingPriority()));
618 group_dict
->SetInteger("active_socket_count", group
->active_socket_count());
620 base::ListValue
* idle_socket_list
= new base::ListValue();
621 std::list
<IdleSocket
>::const_iterator idle_socket
;
622 for (idle_socket
= group
->idle_sockets().begin();
623 idle_socket
!= group
->idle_sockets().end();
625 int source_id
= idle_socket
->socket
->NetLog().source().id
;
626 idle_socket_list
->Append(new base::FundamentalValue(source_id
));
628 group_dict
->Set("idle_sockets", idle_socket_list
);
630 base::ListValue
* connect_jobs_list
= new base::ListValue();
631 std::set
<ConnectJob
*>::const_iterator job
= group
->jobs().begin();
632 for (job
= group
->jobs().begin(); job
!= group
->jobs().end(); job
++) {
633 int source_id
= (*job
)->net_log().source().id
;
634 connect_jobs_list
->Append(new base::FundamentalValue(source_id
));
636 group_dict
->Set("connect_jobs", connect_jobs_list
);
638 group_dict
->SetBoolean("is_stalled", group
->CanUseAdditionalSocketSlot(
639 max_sockets_per_group_
));
640 group_dict
->SetBoolean("backup_job_timer_is_running",
641 group
->BackupJobTimerIsRunning());
643 all_groups_dict
->SetWithoutPathExpansion(it
->first
, group_dict
);
645 dict
->Set("groups", all_groups_dict
);
649 bool ClientSocketPoolBaseHelper::IdleSocket::IsUsable() const {
650 if (socket
->WasEverUsed())
651 return socket
->IsConnectedAndIdle();
652 return socket
->IsConnected();
655 bool ClientSocketPoolBaseHelper::IdleSocket::ShouldCleanup(
657 base::TimeDelta timeout
) const {
658 bool timed_out
= (now
- start_time
) >= timeout
;
664 void ClientSocketPoolBaseHelper::CleanupIdleSockets(bool force
) {
665 if (idle_socket_count_
== 0)
668 // Current time value. Retrieving it once at the function start rather than
669 // inside the inner loop, since it shouldn't change by any meaningful amount.
670 base::TimeTicks now
= base::TimeTicks::Now();
672 GroupMap::iterator i
= group_map_
.begin();
673 while (i
!= group_map_
.end()) {
674 Group
* group
= i
->second
;
676 std::list
<IdleSocket
>::iterator j
= group
->mutable_idle_sockets()->begin();
677 while (j
!= group
->idle_sockets().end()) {
678 base::TimeDelta timeout
=
679 j
->socket
->WasEverUsed() ?
680 used_idle_socket_timeout_
: unused_idle_socket_timeout_
;
681 if (force
|| j
->ShouldCleanup(now
, timeout
)) {
683 j
= group
->mutable_idle_sockets()->erase(j
);
684 DecrementIdleCount();
690 // Delete group if no longer needed.
691 if (group
->IsEmpty()) {
699 ClientSocketPoolBaseHelper::Group
* ClientSocketPoolBaseHelper::GetOrCreateGroup(
700 const std::string
& group_name
) {
701 GroupMap::iterator it
= group_map_
.find(group_name
);
702 if (it
!= group_map_
.end())
704 Group
* group
= new Group
;
705 group_map_
[group_name
] = group
;
709 void ClientSocketPoolBaseHelper::RemoveGroup(const std::string
& group_name
) {
710 GroupMap::iterator it
= group_map_
.find(group_name
);
711 CHECK(it
!= group_map_
.end());
716 void ClientSocketPoolBaseHelper::RemoveGroup(GroupMap::iterator it
) {
718 group_map_
.erase(it
);
722 bool ClientSocketPoolBaseHelper::connect_backup_jobs_enabled() {
723 return g_connect_backup_jobs_enabled
;
727 bool ClientSocketPoolBaseHelper::set_connect_backup_jobs_enabled(bool enabled
) {
728 bool old_value
= g_connect_backup_jobs_enabled
;
729 g_connect_backup_jobs_enabled
= enabled
;
733 void ClientSocketPoolBaseHelper::EnableConnectBackupJobs() {
734 connect_backup_jobs_enabled_
= g_connect_backup_jobs_enabled
;
737 void ClientSocketPoolBaseHelper::IncrementIdleCount() {
738 if (++idle_socket_count_
== 1 && use_cleanup_timer_
)
739 StartIdleSocketTimer();
742 void ClientSocketPoolBaseHelper::DecrementIdleCount() {
743 if (--idle_socket_count_
== 0)
748 bool ClientSocketPoolBaseHelper::cleanup_timer_enabled() {
749 return g_cleanup_timer_enabled
;
753 bool ClientSocketPoolBaseHelper::set_cleanup_timer_enabled(bool enabled
) {
754 bool old_value
= g_cleanup_timer_enabled
;
755 g_cleanup_timer_enabled
= enabled
;
759 void ClientSocketPoolBaseHelper::StartIdleSocketTimer() {
760 timer_
.Start(FROM_HERE
, TimeDelta::FromSeconds(kCleanupInterval
), this,
761 &ClientSocketPoolBaseHelper::OnCleanupTimerFired
);
764 void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string
& group_name
,
765 scoped_ptr
<StreamSocket
> socket
,
767 GroupMap::iterator i
= group_map_
.find(group_name
);
768 CHECK(i
!= group_map_
.end());
770 Group
* group
= i
->second
;
772 CHECK_GT(handed_out_socket_count_
, 0);
773 handed_out_socket_count_
--;
775 CHECK_GT(group
->active_socket_count(), 0);
776 group
->DecrementActiveSocketCount();
778 const bool can_reuse
= socket
->IsConnectedAndIdle() &&
779 id
== pool_generation_number_
;
781 // Add it to the idle list.
782 AddIdleSocket(socket
.Pass(), group
);
783 OnAvailableSocketSlot(group_name
, group
);
788 CheckForStalledSocketGroups();
791 void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() {
792 // If we have idle sockets, see if we can give one to the top-stalled group.
793 std::string top_group_name
;
794 Group
* top_group
= NULL
;
795 if (!FindTopStalledGroup(&top_group
, &top_group_name
)) {
796 // There may still be a stalled group in a lower level pool.
797 for (std::set
<LowerLayeredPool
*>::iterator it
= lower_pools_
.begin();
798 it
!= lower_pools_
.end();
800 if ((*it
)->IsStalled()) {
801 CloseOneIdleSocket();
808 if (ReachedMaxSocketsLimit()) {
809 if (idle_socket_count() > 0) {
810 CloseOneIdleSocket();
812 // We can't activate more sockets since we're already at our global
818 // Note: we don't loop on waking stalled groups. If the stalled group is at
819 // its limit, may be left with other stalled groups that could be
820 // woken. This isn't optimal, but there is no starvation, so to avoid
821 // the looping we leave it at this.
822 OnAvailableSocketSlot(top_group_name
, top_group
);
825 // Search for the highest priority pending request, amongst the groups that
826 // are not at the |max_sockets_per_group_| limit. Note: for requests with
827 // the same priority, the winner is based on group hash ordering (and not
829 bool ClientSocketPoolBaseHelper::FindTopStalledGroup(
831 std::string
* group_name
) const {
832 CHECK((group
&& group_name
) || (!group
&& !group_name
));
833 Group
* top_group
= NULL
;
834 const std::string
* top_group_name
= NULL
;
835 bool has_stalled_group
= false;
836 for (GroupMap::const_iterator i
= group_map_
.begin();
837 i
!= group_map_
.end(); ++i
) {
838 Group
* curr_group
= i
->second
;
839 if (!curr_group
->has_pending_requests())
841 if (curr_group
->CanUseAdditionalSocketSlot(max_sockets_per_group_
)) {
844 has_stalled_group
= true;
845 bool has_higher_priority
= !top_group
||
846 curr_group
->TopPendingPriority() > top_group
->TopPendingPriority();
847 if (has_higher_priority
) {
848 top_group
= curr_group
;
849 top_group_name
= &i
->first
;
857 *group_name
= *top_group_name
;
859 CHECK(!has_stalled_group
);
861 return has_stalled_group
;
864 void ClientSocketPoolBaseHelper::OnConnectJobComplete(
865 int result
, ConnectJob
* job
) {
866 DCHECK_NE(ERR_IO_PENDING
, result
);
867 const std::string group_name
= job
->group_name();
868 GroupMap::iterator group_it
= group_map_
.find(group_name
);
869 CHECK(group_it
!= group_map_
.end());
870 Group
* group
= group_it
->second
;
872 scoped_ptr
<StreamSocket
> socket
= job
->PassSocket();
874 // Copies of these are needed because |job| may be deleted before they are
876 BoundNetLog job_log
= job
->net_log();
877 LoadTimingInfo::ConnectTiming connect_timing
= job
->connect_timing();
879 // RemoveConnectJob(job, _) must be called by all branches below;
880 // otherwise, |job| will be leaked.
883 DCHECK(socket
.get());
884 RemoveConnectJob(job
, group
);
885 scoped_ptr
<const Request
> request
= group
->PopNextPendingRequest();
887 LogBoundConnectJobToRequest(job_log
.source(), *request
);
889 socket
.Pass(), ClientSocketHandle::UNUSED
, connect_timing
,
890 request
->handle(), base::TimeDelta(), group
, request
->net_log());
891 request
->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL
);
892 InvokeUserCallbackLater(request
->handle(), request
->callback(), result
);
894 AddIdleSocket(socket
.Pass(), group
);
895 OnAvailableSocketSlot(group_name
, group
);
896 CheckForStalledSocketGroups();
899 // If we got a socket, it must contain error information so pass that
900 // up so that the caller can retrieve it.
901 bool handed_out_socket
= false;
902 scoped_ptr
<const Request
> request
= group
->PopNextPendingRequest();
904 LogBoundConnectJobToRequest(job_log
.source(), *request
);
905 job
->GetAdditionalErrorState(request
->handle());
906 RemoveConnectJob(job
, group
);
908 handed_out_socket
= true;
909 HandOutSocket(socket
.Pass(), ClientSocketHandle::UNUSED
,
910 connect_timing
, request
->handle(), base::TimeDelta(),
911 group
, request
->net_log());
913 request
->net_log().EndEventWithNetErrorCode(
914 NetLog::TYPE_SOCKET_POOL
, result
);
915 InvokeUserCallbackLater(request
->handle(), request
->callback(), result
);
917 RemoveConnectJob(job
, group
);
919 if (!handed_out_socket
) {
920 OnAvailableSocketSlot(group_name
, group
);
921 CheckForStalledSocketGroups();
926 void ClientSocketPoolBaseHelper::OnIPAddressChanged() {
927 FlushWithError(ERR_NETWORK_CHANGED
);
930 void ClientSocketPoolBaseHelper::FlushWithError(int error
) {
931 pool_generation_number_
++;
932 CancelAllConnectJobs();
934 CancelAllRequestsWithError(error
);
937 void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob
* job
,
939 CHECK_GT(connecting_socket_count_
, 0);
940 connecting_socket_count_
--;
943 group
->RemoveJob(job
);
946 void ClientSocketPoolBaseHelper::OnAvailableSocketSlot(
947 const std::string
& group_name
, Group
* group
) {
948 DCHECK(ContainsKey(group_map_
, group_name
));
949 if (group
->IsEmpty()) {
950 RemoveGroup(group_name
);
951 } else if (group
->has_pending_requests()) {
952 ProcessPendingRequest(group_name
, group
);
956 void ClientSocketPoolBaseHelper::ProcessPendingRequest(
957 const std::string
& group_name
, Group
* group
) {
958 const Request
* next_request
= group
->GetNextPendingRequest();
959 DCHECK(next_request
);
961 // If the group has no idle sockets, and can't make use of an additional slot,
962 // either because it's at the limit or because it's at the socket per group
963 // limit, then there's nothing to do.
964 if (group
->idle_sockets().empty() &&
965 !group
->CanUseAdditionalSocketSlot(max_sockets_per_group_
)) {
969 int rv
= RequestSocketInternal(group_name
, *next_request
);
970 if (rv
!= ERR_IO_PENDING
) {
971 scoped_ptr
<const Request
> request
= group
->PopNextPendingRequest();
973 if (group
->IsEmpty())
974 RemoveGroup(group_name
);
976 request
->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL
, rv
);
977 InvokeUserCallbackLater(request
->handle(), request
->callback(), rv
);
981 void ClientSocketPoolBaseHelper::HandOutSocket(
982 scoped_ptr
<StreamSocket
> socket
,
983 ClientSocketHandle::SocketReuseType reuse_type
,
984 const LoadTimingInfo::ConnectTiming
& connect_timing
,
985 ClientSocketHandle
* handle
,
986 base::TimeDelta idle_time
,
988 const BoundNetLog
& net_log
) {
990 handle
->SetSocket(socket
.Pass());
991 handle
->set_reuse_type(reuse_type
);
992 handle
->set_idle_time(idle_time
);
993 handle
->set_pool_id(pool_generation_number_
);
994 handle
->set_connect_timing(connect_timing
);
996 if (handle
->is_reused()) {
998 NetLog::TYPE_SOCKET_POOL_REUSED_AN_EXISTING_SOCKET
,
999 NetLog::IntegerCallback(
1000 "idle_ms", static_cast<int>(idle_time
.InMilliseconds())));
1004 NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET
,
1005 handle
->socket()->NetLog().source().ToEventParametersCallback());
1007 handed_out_socket_count_
++;
1008 group
->IncrementActiveSocketCount();
1011 void ClientSocketPoolBaseHelper::AddIdleSocket(
1012 scoped_ptr
<StreamSocket
> socket
,
1015 IdleSocket idle_socket
;
1016 idle_socket
.socket
= socket
.release();
1017 idle_socket
.start_time
= base::TimeTicks::Now();
1019 group
->mutable_idle_sockets()->push_back(idle_socket
);
1020 IncrementIdleCount();
1023 void ClientSocketPoolBaseHelper::CancelAllConnectJobs() {
1024 for (GroupMap::iterator i
= group_map_
.begin(); i
!= group_map_
.end();) {
1025 Group
* group
= i
->second
;
1026 connecting_socket_count_
-= group
->jobs().size();
1027 group
->RemoveAllJobs();
1029 // Delete group if no longer needed.
1030 if (group
->IsEmpty()) {
1031 // RemoveGroup() will call .erase() which will invalidate the iterator,
1032 // but i will already have been incremented to a valid iterator before
1033 // RemoveGroup() is called.
1039 DCHECK_EQ(0, connecting_socket_count_
);
1042 void ClientSocketPoolBaseHelper::CancelAllRequestsWithError(int error
) {
1043 for (GroupMap::iterator i
= group_map_
.begin(); i
!= group_map_
.end();) {
1044 Group
* group
= i
->second
;
1047 scoped_ptr
<const Request
> request
= group
->PopNextPendingRequest();
1050 InvokeUserCallbackLater(request
->handle(), request
->callback(), error
);
1053 // Delete group if no longer needed.
1054 if (group
->IsEmpty()) {
1055 // RemoveGroup() will call .erase() which will invalidate the iterator,
1056 // but i will already have been incremented to a valid iterator before
1057 // RemoveGroup() is called.
1065 bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const {
1066 // Each connecting socket will eventually connect and be handed out.
1067 int total
= handed_out_socket_count_
+ connecting_socket_count_
+
1068 idle_socket_count();
1069 // There can be more sockets than the limit since some requests can ignore
1071 if (total
< max_sockets_
)
1076 bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
1077 if (idle_socket_count() == 0)
1079 return CloseOneIdleSocketExceptInGroup(NULL
);
1082 bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup(
1083 const Group
* exception_group
) {
1084 CHECK_GT(idle_socket_count(), 0);
1086 for (GroupMap::iterator i
= group_map_
.begin(); i
!= group_map_
.end(); ++i
) {
1087 Group
* group
= i
->second
;
1088 if (exception_group
== group
)
1090 std::list
<IdleSocket
>* idle_sockets
= group
->mutable_idle_sockets();
1092 if (!idle_sockets
->empty()) {
1093 delete idle_sockets
->front().socket
;
1094 idle_sockets
->pop_front();
1095 DecrementIdleCount();
1096 if (group
->IsEmpty())
1106 bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInHigherLayeredPool() {
1107 // This pool doesn't have any idle sockets. It's possible that a pool at a
1108 // higher layer is holding one of this sockets active, but it's actually idle.
1109 // Query the higher layers.
1110 for (std::set
<HigherLayeredPool
*>::const_iterator it
= higher_pools_
.begin();
1111 it
!= higher_pools_
.end(); ++it
) {
1112 if ((*it
)->CloseOneIdleConnection())
1118 void ClientSocketPoolBaseHelper::InvokeUserCallbackLater(
1119 ClientSocketHandle
* handle
, const CompletionCallback
& callback
, int rv
) {
1120 CHECK(!ContainsKey(pending_callback_map_
, handle
));
1121 pending_callback_map_
[handle
] = CallbackResultPair(callback
, rv
);
1122 base::MessageLoop::current()->PostTask(
1124 base::Bind(&ClientSocketPoolBaseHelper::InvokeUserCallback
,
1125 weak_factory_
.GetWeakPtr(), handle
));
1128 void ClientSocketPoolBaseHelper::InvokeUserCallback(
1129 ClientSocketHandle
* handle
) {
1130 PendingCallbackMap::iterator it
= pending_callback_map_
.find(handle
);
1132 // Exit if the request has already been cancelled.
1133 if (it
== pending_callback_map_
.end())
1136 CHECK(!handle
->is_initialized());
1137 CompletionCallback callback
= it
->second
.callback
;
1138 int result
= it
->second
.result
;
1139 pending_callback_map_
.erase(it
);
1140 callback
.Run(result
);
1143 void ClientSocketPoolBaseHelper::TryToCloseSocketsInLayeredPools() {
1144 while (IsStalled()) {
1145 // Closing a socket will result in calling back into |this| to use the freed
1146 // socket slot, so nothing else is needed.
1147 if (!CloseOneIdleConnectionInHigherLayeredPool())
1152 ClientSocketPoolBaseHelper::Group::Group()
1153 : unassigned_job_count_(0),
1154 pending_requests_(NUM_PRIORITIES
),
1155 active_socket_count_(0) {}
1157 ClientSocketPoolBaseHelper::Group::~Group() {
1158 DCHECK_EQ(0u, unassigned_job_count_
);
1161 void ClientSocketPoolBaseHelper::Group::StartBackupJobTimer(
1162 const std::string
& group_name
,
1163 ClientSocketPoolBaseHelper
* pool
) {
1164 // Only allow one timer to run at a time.
1165 if (BackupJobTimerIsRunning())
1168 // Unretained here is okay because |backup_job_timer_| is
1169 // automatically cancelled when it's destroyed.
1170 backup_job_timer_
.Start(
1171 FROM_HERE
, pool
->ConnectRetryInterval(),
1172 base::Bind(&Group::OnBackupJobTimerFired
, base::Unretained(this),
1176 bool ClientSocketPoolBaseHelper::Group::BackupJobTimerIsRunning() const {
1177 return backup_job_timer_
.IsRunning();
1180 bool ClientSocketPoolBaseHelper::Group::TryToUseUnassignedConnectJob() {
1183 if (unassigned_job_count_
== 0)
1185 --unassigned_job_count_
;
1189 void ClientSocketPoolBaseHelper::Group::AddJob(scoped_ptr
<ConnectJob
> job
,
1190 bool is_preconnect
) {
1194 ++unassigned_job_count_
;
1195 jobs_
.insert(job
.release());
1198 void ClientSocketPoolBaseHelper::Group::RemoveJob(ConnectJob
* job
) {
1199 scoped_ptr
<ConnectJob
> owned_job(job
);
1202 std::set
<ConnectJob
*>::iterator it
= jobs_
.find(job
);
1203 if (it
!= jobs_
.end()) {
1208 size_t job_count
= jobs_
.size();
1209 if (job_count
< unassigned_job_count_
)
1210 unassigned_job_count_
= job_count
;
1212 // If we've got no more jobs for this group, then we no longer need a
1213 // backup job either.
1215 backup_job_timer_
.Stop();
1218 void ClientSocketPoolBaseHelper::Group::OnBackupJobTimerFired(
1219 std::string group_name
,
1220 ClientSocketPoolBaseHelper
* pool
) {
1221 // If there are no more jobs pending, there is no work to do.
1222 // If we've done our cleanups correctly, this should not happen.
1223 if (jobs_
.empty()) {
1228 // If our old job is waiting on DNS, or if we can't create any sockets
1229 // right now due to limits, just reset the timer.
1230 if (pool
->ReachedMaxSocketsLimit() ||
1231 !HasAvailableSocketSlot(pool
->max_sockets_per_group_
) ||
1232 (*jobs_
.begin())->GetLoadState() == LOAD_STATE_RESOLVING_HOST
) {
1233 StartBackupJobTimer(group_name
, pool
);
1237 if (pending_requests_
.empty())
1240 scoped_ptr
<ConnectJob
> backup_job
=
1241 pool
->connect_job_factory_
->NewConnectJob(
1242 group_name
, *pending_requests_
.FirstMax().value(), pool
);
1243 backup_job
->net_log().AddEvent(NetLog::TYPE_BACKUP_CONNECT_JOB_CREATED
);
1244 int rv
= backup_job
->Connect();
1245 pool
->connecting_socket_count_
++;
1246 ConnectJob
* raw_backup_job
= backup_job
.get();
1247 AddJob(backup_job
.Pass(), false);
1248 if (rv
!= ERR_IO_PENDING
)
1249 pool
->OnConnectJobComplete(rv
, raw_backup_job
);
1252 void ClientSocketPoolBaseHelper::Group::SanityCheck() {
1253 DCHECK_LE(unassigned_job_count_
, jobs_
.size());
1256 void ClientSocketPoolBaseHelper::Group::RemoveAllJobs() {
1259 // Delete active jobs.
1260 STLDeleteElements(&jobs_
);
1261 unassigned_job_count_
= 0;
1263 // Stop backup job timer.
1264 backup_job_timer_
.Stop();
1267 const ClientSocketPoolBaseHelper::Request
*
1268 ClientSocketPoolBaseHelper::Group::GetNextPendingRequest() const {
1270 pending_requests_
.empty() ? NULL
: pending_requests_
.FirstMax().value();
1273 bool ClientSocketPoolBaseHelper::Group::HasConnectJobForHandle(
1274 const ClientSocketHandle
* handle
) const {
1275 // Search the first |jobs_.size()| pending requests for |handle|.
1276 // If it's farther back in the deque than that, it doesn't have a
1277 // corresponding ConnectJob.
1279 for (RequestQueue::Pointer pointer
= pending_requests_
.FirstMax();
1280 !pointer
.is_null() && i
< jobs_
.size();
1281 pointer
= pending_requests_
.GetNextTowardsLastMin(pointer
), ++i
) {
1282 if (pointer
.value()->handle() == handle
)
1288 void ClientSocketPoolBaseHelper::Group::InsertPendingRequest(
1289 scoped_ptr
<const Request
> request
) {
1290 // This value must be cached before we release |request|.
1291 RequestPriority priority
= request
->priority();
1292 if (request
->ignore_limits()) {
1293 // Put requests with ignore_limits == true (which should have
1294 // priority == MAXIMUM_PRIORITY) ahead of other requests with
1295 // MAXIMUM_PRIORITY.
1296 DCHECK_EQ(priority
, MAXIMUM_PRIORITY
);
1297 pending_requests_
.InsertAtFront(request
.release(), priority
);
1299 pending_requests_
.Insert(request
.release(), priority
);
1303 scoped_ptr
<const ClientSocketPoolBaseHelper::Request
>
1304 ClientSocketPoolBaseHelper::Group::PopNextPendingRequest() {
1305 if (pending_requests_
.empty())
1306 return scoped_ptr
<const ClientSocketPoolBaseHelper::Request
>();
1307 return RemovePendingRequest(pending_requests_
.FirstMax());
1310 scoped_ptr
<const ClientSocketPoolBaseHelper::Request
>
1311 ClientSocketPoolBaseHelper::Group::FindAndRemovePendingRequest(
1312 ClientSocketHandle
* handle
) {
1313 for (RequestQueue::Pointer pointer
= pending_requests_
.FirstMax();
1315 pointer
= pending_requests_
.GetNextTowardsLastMin(pointer
)) {
1316 if (pointer
.value()->handle() == handle
) {
1317 scoped_ptr
<const Request
> request
= RemovePendingRequest(pointer
);
1318 return request
.Pass();
1321 return scoped_ptr
<const ClientSocketPoolBaseHelper::Request
>();
1324 scoped_ptr
<const ClientSocketPoolBaseHelper::Request
>
1325 ClientSocketPoolBaseHelper::Group::RemovePendingRequest(
1326 const RequestQueue::Pointer
& pointer
) {
1327 scoped_ptr
<const Request
> request(pointer
.value());
1328 pending_requests_
.Erase(pointer
);
1329 // If there are no more requests, kill the backup timer.
1330 if (pending_requests_
.empty())
1331 backup_job_timer_
.Stop();
1332 return request
.Pass();
1335 } // namespace internal