[content shell] implement testRunner.overridePreference
[chromium-blink-merge.git] / net / socket / client_socket_pool_base.cc
blob3e9cd824d7a00765548b1c32bbf6ce2af7a2ab62
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/socket/client_socket_pool_base.h"
7 #include <math.h>
8 #include "base/compiler_specific.h"
9 #include "base/format_macros.h"
10 #include "base/logging.h"
11 #include "base/message_loop.h"
12 #include "base/metrics/stats_counters.h"
13 #include "base/stl_util.h"
14 #include "base/string_number_conversions.h"
15 #include "base/string_util.h"
16 #include "base/time.h"
17 #include "base/values.h"
18 #include "net/base/net_log.h"
19 #include "net/base/net_errors.h"
20 #include "net/socket/client_socket_handle.h"
22 using base::TimeDelta;
24 namespace {
26 // Indicate whether we should enable idle socket cleanup timer. When timer is
27 // disabled, sockets are closed next time a socket request is made.
28 bool g_cleanup_timer_enabled = true;
30 // The timeout value, in seconds, used to clean up idle sockets that can't be
31 // reused.
33 // Note: It's important to close idle sockets that have received data as soon
34 // as possible because the received data may cause BSOD on Windows XP under
35 // some conditions. See http://crbug.com/4606.
36 const int kCleanupInterval = 10; // DO NOT INCREASE THIS TIMEOUT.
38 // Indicate whether or not we should establish a new transport layer connection
39 // after a certain timeout has passed without receiving an ACK.
40 bool g_connect_backup_jobs_enabled = true;
42 double g_socket_reuse_policy_penalty_exponent = -1;
43 int g_socket_reuse_policy = -1;
45 } // namespace
47 namespace net {
49 int GetSocketReusePolicy() {
50 return g_socket_reuse_policy;
53 void SetSocketReusePolicy(int policy) {
54 DCHECK_GE(policy, 0);
55 DCHECK_LE(policy, 2);
56 if (policy > 2 || policy < 0) {
57 LOG(ERROR) << "Invalid socket reuse policy";
58 return;
61 double exponents[] = { 0, 0.25, -1 };
62 g_socket_reuse_policy_penalty_exponent = exponents[policy];
63 g_socket_reuse_policy = policy;
65 VLOG(1) << "Setting g_socket_reuse_policy_penalty_exponent = "
66 << g_socket_reuse_policy_penalty_exponent;
69 ConnectJob::ConnectJob(const std::string& group_name,
70 base::TimeDelta timeout_duration,
71 Delegate* delegate,
72 const BoundNetLog& net_log)
73 : group_name_(group_name),
74 timeout_duration_(timeout_duration),
75 delegate_(delegate),
76 net_log_(net_log),
77 idle_(true) {
78 DCHECK(!group_name.empty());
79 DCHECK(delegate);
80 net_log.BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB);
83 ConnectJob::~ConnectJob() {
84 net_log().EndEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB);
87 int ConnectJob::Connect() {
88 if (timeout_duration_ != base::TimeDelta())
89 timer_.Start(FROM_HERE, timeout_duration_, this, &ConnectJob::OnTimeout);
91 idle_ = false;
93 LogConnectStart();
95 int rv = ConnectInternal();
97 if (rv != ERR_IO_PENDING) {
98 LogConnectCompletion(rv);
99 delegate_ = NULL;
102 return rv;
105 void ConnectJob::set_socket(StreamSocket* socket) {
106 if (socket) {
107 net_log().AddEvent(NetLog::TYPE_CONNECT_JOB_SET_SOCKET,
108 socket->NetLog().source().ToEventParametersCallback());
110 socket_.reset(socket);
113 void ConnectJob::NotifyDelegateOfCompletion(int rv) {
114 // The delegate will delete |this|.
115 Delegate *delegate = delegate_;
116 delegate_ = NULL;
118 LogConnectCompletion(rv);
119 delegate->OnConnectJobComplete(rv, this);
122 void ConnectJob::ResetTimer(base::TimeDelta remaining_time) {
123 timer_.Stop();
124 timer_.Start(FROM_HERE, remaining_time, this, &ConnectJob::OnTimeout);
127 void ConnectJob::LogConnectStart() {
128 net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT,
129 NetLog::StringCallback("group_name", &group_name_));
132 void ConnectJob::LogConnectCompletion(int net_error) {
133 net_log().EndEventWithNetErrorCode(
134 NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT, net_error);
137 void ConnectJob::OnTimeout() {
138 // Make sure the socket is NULL before calling into |delegate|.
139 set_socket(NULL);
141 net_log_.AddEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_TIMED_OUT);
143 NotifyDelegateOfCompletion(ERR_TIMED_OUT);
146 namespace internal {
148 ClientSocketPoolBaseHelper::Request::Request(
149 ClientSocketHandle* handle,
150 const CompletionCallback& callback,
151 RequestPriority priority,
152 bool ignore_limits,
153 Flags flags,
154 const BoundNetLog& net_log)
155 : handle_(handle),
156 callback_(callback),
157 priority_(priority),
158 ignore_limits_(ignore_limits),
159 flags_(flags),
160 net_log_(net_log) {}
162 ClientSocketPoolBaseHelper::Request::~Request() {}
164 ClientSocketPoolBaseHelper::ClientSocketPoolBaseHelper(
165 int max_sockets,
166 int max_sockets_per_group,
167 base::TimeDelta unused_idle_socket_timeout,
168 base::TimeDelta used_idle_socket_timeout,
169 ConnectJobFactory* connect_job_factory)
170 : idle_socket_count_(0),
171 connecting_socket_count_(0),
172 handed_out_socket_count_(0),
173 max_sockets_(max_sockets),
174 max_sockets_per_group_(max_sockets_per_group),
175 use_cleanup_timer_(g_cleanup_timer_enabled),
176 unused_idle_socket_timeout_(unused_idle_socket_timeout),
177 used_idle_socket_timeout_(used_idle_socket_timeout),
178 connect_job_factory_(connect_job_factory),
179 connect_backup_jobs_enabled_(false),
180 pool_generation_number_(0),
181 ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {
182 DCHECK_LE(0, max_sockets_per_group);
183 DCHECK_LE(max_sockets_per_group, max_sockets);
185 NetworkChangeNotifier::AddIPAddressObserver(this);
188 ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() {
189 // Clean up any idle sockets and pending connect jobs. Assert that we have no
190 // remaining active sockets or pending requests. They should have all been
191 // cleaned up prior to |this| being destroyed.
192 FlushWithError(ERR_ABORTED);
193 DCHECK(group_map_.empty());
194 DCHECK(pending_callback_map_.empty());
195 DCHECK_EQ(0, connecting_socket_count_);
196 CHECK(higher_layer_pools_.empty());
198 NetworkChangeNotifier::RemoveIPAddressObserver(this);
201 ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair()
202 : result(OK) {
205 ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair(
206 const CompletionCallback& callback_in, int result_in)
207 : callback(callback_in),
208 result(result_in) {
211 ClientSocketPoolBaseHelper::CallbackResultPair::~CallbackResultPair() {}
213 // InsertRequestIntoQueue inserts the request into the queue based on
214 // priority. Highest priorities are closest to the front. Older requests are
215 // prioritized over requests of equal priority.
217 // static
218 void ClientSocketPoolBaseHelper::InsertRequestIntoQueue(
219 const Request* r, RequestQueue* pending_requests) {
220 RequestQueue::iterator it = pending_requests->begin();
221 while (it != pending_requests->end() && r->priority() <= (*it)->priority())
222 ++it;
223 pending_requests->insert(it, r);
226 // static
227 const ClientSocketPoolBaseHelper::Request*
228 ClientSocketPoolBaseHelper::RemoveRequestFromQueue(
229 const RequestQueue::iterator& it, Group* group) {
230 const Request* req = *it;
231 group->mutable_pending_requests()->erase(it);
232 // If there are no more requests, we kill the backup timer.
233 if (group->pending_requests().empty())
234 group->CleanupBackupJob();
235 return req;
238 void ClientSocketPoolBaseHelper::AddLayeredPool(LayeredPool* pool) {
239 CHECK(pool);
240 CHECK(!ContainsKey(higher_layer_pools_, pool));
241 higher_layer_pools_.insert(pool);
244 void ClientSocketPoolBaseHelper::RemoveLayeredPool(LayeredPool* pool) {
245 CHECK(pool);
246 CHECK(ContainsKey(higher_layer_pools_, pool));
247 higher_layer_pools_.erase(pool);
250 int ClientSocketPoolBaseHelper::RequestSocket(
251 const std::string& group_name,
252 const Request* request) {
253 CHECK(!request->callback().is_null());
254 CHECK(request->handle());
256 // Cleanup any timed-out idle sockets if no timer is used.
257 if (!use_cleanup_timer_)
258 CleanupIdleSockets(false);
260 request->net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL);
261 Group* group = GetOrCreateGroup(group_name);
263 int rv = RequestSocketInternal(group_name, request);
264 if (rv != ERR_IO_PENDING) {
265 request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv);
266 CHECK(!request->handle()->is_initialized());
267 delete request;
268 } else {
269 InsertRequestIntoQueue(request, group->mutable_pending_requests());
271 return rv;
274 void ClientSocketPoolBaseHelper::RequestSockets(
275 const std::string& group_name,
276 const Request& request,
277 int num_sockets) {
278 DCHECK(request.callback().is_null());
279 DCHECK(!request.handle());
281 // Cleanup any timed out idle sockets if no timer is used.
282 if (!use_cleanup_timer_)
283 CleanupIdleSockets(false);
285 if (num_sockets > max_sockets_per_group_) {
286 num_sockets = max_sockets_per_group_;
289 request.net_log().BeginEvent(
290 NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS,
291 NetLog::IntegerCallback("num_sockets", num_sockets));
293 Group* group = GetOrCreateGroup(group_name);
295 // RequestSocketsInternal() may delete the group.
296 bool deleted_group = false;
298 int rv = OK;
299 for (int num_iterations_left = num_sockets;
300 group->NumActiveSocketSlots() < num_sockets &&
301 num_iterations_left > 0 ; num_iterations_left--) {
302 rv = RequestSocketInternal(group_name, &request);
303 if (rv < 0 && rv != ERR_IO_PENDING) {
304 // We're encountering a synchronous error. Give up.
305 if (!ContainsKey(group_map_, group_name))
306 deleted_group = true;
307 break;
309 if (!ContainsKey(group_map_, group_name)) {
310 // Unexpected. The group should only be getting deleted on synchronous
311 // error.
312 NOTREACHED();
313 deleted_group = true;
314 break;
318 if (!deleted_group && group->IsEmpty())
319 RemoveGroup(group_name);
321 if (rv == ERR_IO_PENDING)
322 rv = OK;
323 request.net_log().EndEventWithNetErrorCode(
324 NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS, rv);
327 int ClientSocketPoolBaseHelper::RequestSocketInternal(
328 const std::string& group_name,
329 const Request* request) {
330 ClientSocketHandle* const handle = request->handle();
331 const bool preconnecting = !handle;
332 Group* group = GetOrCreateGroup(group_name);
334 if (!(request->flags() & NO_IDLE_SOCKETS)) {
335 // Try to reuse a socket.
336 if (AssignIdleSocketToRequest(request, group))
337 return OK;
340 // If there are more ConnectJobs than pending requests, don't need to do
341 // anything. Can just wait for the extra job to connect, and then assign it
342 // to the request.
343 if (!preconnecting && group->TryToUseUnassignedConnectJob())
344 return ERR_IO_PENDING;
346 // Can we make another active socket now?
347 if (!group->HasAvailableSocketSlot(max_sockets_per_group_) &&
348 !request->ignore_limits()) {
349 // TODO(willchan): Consider whether or not we need to close a socket in a
350 // higher layered group. I don't think this makes sense since we would just
351 // reuse that socket then if we needed one and wouldn't make it down to this
352 // layer.
353 request->net_log().AddEvent(
354 NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP);
355 return ERR_IO_PENDING;
358 if (ReachedMaxSocketsLimit() && !request->ignore_limits()) {
359 // NOTE(mmenke): Wonder if we really need different code for each case
360 // here. Only reason for them now seems to be preconnects.
361 if (idle_socket_count() > 0) {
362 // There's an idle socket in this pool. Either that's because there's
363 // still one in this group, but we got here due to preconnecting bypassing
364 // idle sockets, or because there's an idle socket in another group.
365 bool closed = CloseOneIdleSocketExceptInGroup(group);
366 if (preconnecting && !closed)
367 return ERR_PRECONNECT_MAX_SOCKET_LIMIT;
368 } else {
369 // We could check if we really have a stalled group here, but it requires
370 // a scan of all groups, so just flip a flag here, and do the check later.
371 request->net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS);
372 return ERR_IO_PENDING;
376 // We couldn't find a socket to reuse, and there's space to allocate one,
377 // so allocate and connect a new one.
378 scoped_ptr<ConnectJob> connect_job(
379 connect_job_factory_->NewConnectJob(group_name, *request, this));
381 int rv = connect_job->Connect();
382 if (rv == OK) {
383 LogBoundConnectJobToRequest(connect_job->net_log().source(), request);
384 if (!preconnecting) {
385 HandOutSocket(connect_job->ReleaseSocket(), false /* not reused */,
386 handle, base::TimeDelta(), group, request->net_log());
387 } else {
388 AddIdleSocket(connect_job->ReleaseSocket(), group);
390 } else if (rv == ERR_IO_PENDING) {
391 // If we don't have any sockets in this group, set a timer for potentially
392 // creating a new one. If the SYN is lost, this backup socket may complete
393 // before the slow socket, improving end user latency.
394 if (connect_backup_jobs_enabled_ &&
395 group->IsEmpty() && !group->HasBackupJob()) {
396 group->StartBackupSocketTimer(group_name, this);
399 connecting_socket_count_++;
401 group->AddJob(connect_job.release(), preconnecting);
402 } else {
403 LogBoundConnectJobToRequest(connect_job->net_log().source(), request);
404 StreamSocket* error_socket = NULL;
405 if (!preconnecting) {
406 DCHECK(handle);
407 connect_job->GetAdditionalErrorState(handle);
408 error_socket = connect_job->ReleaseSocket();
410 if (error_socket) {
411 HandOutSocket(error_socket, false /* not reused */, handle,
412 base::TimeDelta(), group, request->net_log());
413 } else if (group->IsEmpty()) {
414 RemoveGroup(group_name);
418 return rv;
421 bool ClientSocketPoolBaseHelper::AssignIdleSocketToRequest(
422 const Request* request, Group* group) {
423 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets();
424 std::list<IdleSocket>::iterator idle_socket_it = idle_sockets->end();
425 double max_score = -1;
427 // Iterate through the idle sockets forwards (oldest to newest)
428 // * Delete any disconnected ones.
429 // * If we find a used idle socket, assign to |idle_socket|. At the end,
430 // the |idle_socket_it| will be set to the newest used idle socket.
431 for (std::list<IdleSocket>::iterator it = idle_sockets->begin();
432 it != idle_sockets->end();) {
433 if (!it->socket->IsConnectedAndIdle()) {
434 DecrementIdleCount();
435 delete it->socket;
436 it = idle_sockets->erase(it);
437 continue;
440 if (it->socket->WasEverUsed()) {
441 // We found one we can reuse!
442 double score = 0;
443 int64 bytes_read = it->socket->NumBytesRead();
444 double num_kb = static_cast<double>(bytes_read) / 1024.0;
445 int idle_time_sec = (base::TimeTicks::Now() - it->start_time).InSeconds();
446 idle_time_sec = std::max(1, idle_time_sec);
448 if (g_socket_reuse_policy_penalty_exponent >= 0 && num_kb >= 0) {
449 score = num_kb / pow(idle_time_sec,
450 g_socket_reuse_policy_penalty_exponent);
453 // Equality to prefer recently used connection.
454 if (score >= max_score) {
455 idle_socket_it = it;
456 max_score = score;
460 ++it;
463 // If we haven't found an idle socket, that means there are no used idle
464 // sockets. Pick the oldest (first) idle socket (FIFO).
466 if (idle_socket_it == idle_sockets->end() && !idle_sockets->empty())
467 idle_socket_it = idle_sockets->begin();
469 if (idle_socket_it != idle_sockets->end()) {
470 DecrementIdleCount();
471 base::TimeDelta idle_time =
472 base::TimeTicks::Now() - idle_socket_it->start_time;
473 IdleSocket idle_socket = *idle_socket_it;
474 idle_sockets->erase(idle_socket_it);
475 HandOutSocket(
476 idle_socket.socket,
477 idle_socket.socket->WasEverUsed(),
478 request->handle(),
479 idle_time,
480 group,
481 request->net_log());
482 return true;
485 return false;
488 // static
489 void ClientSocketPoolBaseHelper::LogBoundConnectJobToRequest(
490 const NetLog::Source& connect_job_source, const Request* request) {
491 request->net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB,
492 connect_job_source.ToEventParametersCallback());
495 void ClientSocketPoolBaseHelper::CancelRequest(
496 const std::string& group_name, ClientSocketHandle* handle) {
497 PendingCallbackMap::iterator callback_it = pending_callback_map_.find(handle);
498 if (callback_it != pending_callback_map_.end()) {
499 int result = callback_it->second.result;
500 pending_callback_map_.erase(callback_it);
501 StreamSocket* socket = handle->release_socket();
502 if (socket) {
503 if (result != OK)
504 socket->Disconnect();
505 ReleaseSocket(handle->group_name(), socket, handle->id());
507 return;
510 CHECK(ContainsKey(group_map_, group_name));
512 Group* group = GetOrCreateGroup(group_name);
514 // Search pending_requests for matching handle.
515 RequestQueue::iterator it = group->mutable_pending_requests()->begin();
516 for (; it != group->pending_requests().end(); ++it) {
517 if ((*it)->handle() == handle) {
518 scoped_ptr<const Request> req(RemoveRequestFromQueue(it, group));
519 req->net_log().AddEvent(NetLog::TYPE_CANCELLED);
520 req->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL);
522 // We let the job run, unless we're at the socket limit.
523 if (group->jobs().size() && ReachedMaxSocketsLimit()) {
524 RemoveConnectJob(*group->jobs().begin(), group);
525 CheckForStalledSocketGroups();
527 break;
532 bool ClientSocketPoolBaseHelper::HasGroup(const std::string& group_name) const {
533 return ContainsKey(group_map_, group_name);
536 void ClientSocketPoolBaseHelper::CloseIdleSockets() {
537 CleanupIdleSockets(true);
538 DCHECK_EQ(0, idle_socket_count_);
541 int ClientSocketPoolBaseHelper::IdleSocketCountInGroup(
542 const std::string& group_name) const {
543 GroupMap::const_iterator i = group_map_.find(group_name);
544 CHECK(i != group_map_.end());
546 return i->second->idle_sockets().size();
549 LoadState ClientSocketPoolBaseHelper::GetLoadState(
550 const std::string& group_name,
551 const ClientSocketHandle* handle) const {
552 if (ContainsKey(pending_callback_map_, handle))
553 return LOAD_STATE_CONNECTING;
555 if (!ContainsKey(group_map_, group_name)) {
556 NOTREACHED() << "ClientSocketPool does not contain group: " << group_name
557 << " for handle: " << handle;
558 return LOAD_STATE_IDLE;
561 // Can't use operator[] since it is non-const.
562 const Group& group = *group_map_.find(group_name)->second;
564 // Search pending_requests for matching handle.
565 RequestQueue::const_iterator it = group.pending_requests().begin();
566 for (size_t i = 0; it != group.pending_requests().end(); ++it, ++i) {
567 if ((*it)->handle() == handle) {
568 if (i < group.jobs().size()) {
569 LoadState max_state = LOAD_STATE_IDLE;
570 for (ConnectJobSet::const_iterator job_it = group.jobs().begin();
571 job_it != group.jobs().end(); ++job_it) {
572 max_state = std::max(max_state, (*job_it)->GetLoadState());
574 return max_state;
575 } else {
576 // TODO(wtc): Add a state for being on the wait list.
577 // See http://crbug.com/5077.
578 return LOAD_STATE_IDLE;
583 NOTREACHED();
584 return LOAD_STATE_IDLE;
587 DictionaryValue* ClientSocketPoolBaseHelper::GetInfoAsValue(
588 const std::string& name, const std::string& type) const {
589 DictionaryValue* dict = new DictionaryValue();
590 dict->SetString("name", name);
591 dict->SetString("type", type);
592 dict->SetInteger("handed_out_socket_count", handed_out_socket_count_);
593 dict->SetInteger("connecting_socket_count", connecting_socket_count_);
594 dict->SetInteger("idle_socket_count", idle_socket_count_);
595 dict->SetInteger("max_socket_count", max_sockets_);
596 dict->SetInteger("max_sockets_per_group", max_sockets_per_group_);
597 dict->SetInteger("pool_generation_number", pool_generation_number_);
599 if (group_map_.empty())
600 return dict;
602 DictionaryValue* all_groups_dict = new DictionaryValue();
603 for (GroupMap::const_iterator it = group_map_.begin();
604 it != group_map_.end(); it++) {
605 const Group* group = it->second;
606 DictionaryValue* group_dict = new DictionaryValue();
608 group_dict->SetInteger("pending_request_count",
609 group->pending_requests().size());
610 if (!group->pending_requests().empty()) {
611 group_dict->SetInteger("top_pending_priority",
612 group->TopPendingPriority());
615 group_dict->SetInteger("active_socket_count", group->active_socket_count());
617 ListValue* idle_socket_list = new ListValue();
618 std::list<IdleSocket>::const_iterator idle_socket;
619 for (idle_socket = group->idle_sockets().begin();
620 idle_socket != group->idle_sockets().end();
621 idle_socket++) {
622 int source_id = idle_socket->socket->NetLog().source().id;
623 idle_socket_list->Append(Value::CreateIntegerValue(source_id));
625 group_dict->Set("idle_sockets", idle_socket_list);
627 ListValue* connect_jobs_list = new ListValue();
628 std::set<ConnectJob*>::const_iterator job = group->jobs().begin();
629 for (job = group->jobs().begin(); job != group->jobs().end(); job++) {
630 int source_id = (*job)->net_log().source().id;
631 connect_jobs_list->Append(Value::CreateIntegerValue(source_id));
633 group_dict->Set("connect_jobs", connect_jobs_list);
635 group_dict->SetBoolean("is_stalled",
636 group->IsStalledOnPoolMaxSockets(
637 max_sockets_per_group_));
638 group_dict->SetBoolean("has_backup_job", group->HasBackupJob());
640 all_groups_dict->SetWithoutPathExpansion(it->first, group_dict);
642 dict->Set("groups", all_groups_dict);
643 return dict;
646 bool ClientSocketPoolBaseHelper::IdleSocket::ShouldCleanup(
647 base::TimeTicks now,
648 base::TimeDelta timeout) const {
649 bool timed_out = (now - start_time) >= timeout;
650 if (timed_out)
651 return true;
652 if (socket->WasEverUsed())
653 return !socket->IsConnectedAndIdle();
654 return !socket->IsConnected();
657 void ClientSocketPoolBaseHelper::CleanupIdleSockets(bool force) {
658 if (idle_socket_count_ == 0)
659 return;
661 // Current time value. Retrieving it once at the function start rather than
662 // inside the inner loop, since it shouldn't change by any meaningful amount.
663 base::TimeTicks now = base::TimeTicks::Now();
665 GroupMap::iterator i = group_map_.begin();
666 while (i != group_map_.end()) {
667 Group* group = i->second;
669 std::list<IdleSocket>::iterator j = group->mutable_idle_sockets()->begin();
670 while (j != group->idle_sockets().end()) {
671 base::TimeDelta timeout =
672 j->socket->WasEverUsed() ?
673 used_idle_socket_timeout_ : unused_idle_socket_timeout_;
674 if (force || j->ShouldCleanup(now, timeout)) {
675 delete j->socket;
676 j = group->mutable_idle_sockets()->erase(j);
677 DecrementIdleCount();
678 } else {
679 ++j;
683 // Delete group if no longer needed.
684 if (group->IsEmpty()) {
685 RemoveGroup(i++);
686 } else {
687 ++i;
692 ClientSocketPoolBaseHelper::Group* ClientSocketPoolBaseHelper::GetOrCreateGroup(
693 const std::string& group_name) {
694 GroupMap::iterator it = group_map_.find(group_name);
695 if (it != group_map_.end())
696 return it->second;
697 Group* group = new Group;
698 group_map_[group_name] = group;
699 return group;
702 void ClientSocketPoolBaseHelper::RemoveGroup(const std::string& group_name) {
703 GroupMap::iterator it = group_map_.find(group_name);
704 CHECK(it != group_map_.end());
706 RemoveGroup(it);
709 void ClientSocketPoolBaseHelper::RemoveGroup(GroupMap::iterator it) {
710 delete it->second;
711 group_map_.erase(it);
714 // static
715 bool ClientSocketPoolBaseHelper::connect_backup_jobs_enabled() {
716 return g_connect_backup_jobs_enabled;
719 // static
720 bool ClientSocketPoolBaseHelper::set_connect_backup_jobs_enabled(bool enabled) {
721 bool old_value = g_connect_backup_jobs_enabled;
722 g_connect_backup_jobs_enabled = enabled;
723 return old_value;
726 void ClientSocketPoolBaseHelper::EnableConnectBackupJobs() {
727 connect_backup_jobs_enabled_ = g_connect_backup_jobs_enabled;
730 void ClientSocketPoolBaseHelper::IncrementIdleCount() {
731 if (++idle_socket_count_ == 1 && use_cleanup_timer_)
732 StartIdleSocketTimer();
735 void ClientSocketPoolBaseHelper::DecrementIdleCount() {
736 if (--idle_socket_count_ == 0)
737 timer_.Stop();
740 // static
741 bool ClientSocketPoolBaseHelper::cleanup_timer_enabled() {
742 return g_cleanup_timer_enabled;
745 // static
746 bool ClientSocketPoolBaseHelper::set_cleanup_timer_enabled(bool enabled) {
747 bool old_value = g_cleanup_timer_enabled;
748 g_cleanup_timer_enabled = enabled;
749 return old_value;
752 void ClientSocketPoolBaseHelper::StartIdleSocketTimer() {
753 timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kCleanupInterval), this,
754 &ClientSocketPoolBaseHelper::OnCleanupTimerFired);
757 void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string& group_name,
758 StreamSocket* socket,
759 int id) {
760 GroupMap::iterator i = group_map_.find(group_name);
761 CHECK(i != group_map_.end());
763 Group* group = i->second;
765 CHECK_GT(handed_out_socket_count_, 0);
766 handed_out_socket_count_--;
768 CHECK_GT(group->active_socket_count(), 0);
769 group->DecrementActiveSocketCount();
771 const bool can_reuse = socket->IsConnectedAndIdle() &&
772 id == pool_generation_number_;
773 if (can_reuse) {
774 // Add it to the idle list.
775 AddIdleSocket(socket, group);
776 OnAvailableSocketSlot(group_name, group);
777 } else {
778 delete socket;
781 CheckForStalledSocketGroups();
784 void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() {
785 // If we have idle sockets, see if we can give one to the top-stalled group.
786 std::string top_group_name;
787 Group* top_group = NULL;
788 if (!FindTopStalledGroup(&top_group, &top_group_name))
789 return;
791 if (ReachedMaxSocketsLimit()) {
792 if (idle_socket_count() > 0) {
793 CloseOneIdleSocket();
794 } else {
795 // We can't activate more sockets since we're already at our global
796 // limit.
797 return;
801 // Note: we don't loop on waking stalled groups. If the stalled group is at
802 // its limit, may be left with other stalled groups that could be
803 // woken. This isn't optimal, but there is no starvation, so to avoid
804 // the looping we leave it at this.
805 OnAvailableSocketSlot(top_group_name, top_group);
808 // Search for the highest priority pending request, amongst the groups that
809 // are not at the |max_sockets_per_group_| limit. Note: for requests with
810 // the same priority, the winner is based on group hash ordering (and not
811 // insertion order).
812 bool ClientSocketPoolBaseHelper::FindTopStalledGroup(
813 Group** group,
814 std::string* group_name) const {
815 CHECK((group && group_name) || (!group && !group_name));
816 Group* top_group = NULL;
817 const std::string* top_group_name = NULL;
818 bool has_stalled_group = false;
819 for (GroupMap::const_iterator i = group_map_.begin();
820 i != group_map_.end(); ++i) {
821 Group* curr_group = i->second;
822 const RequestQueue& queue = curr_group->pending_requests();
823 if (queue.empty())
824 continue;
825 if (curr_group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) {
826 if (!group)
827 return true;
828 has_stalled_group = true;
829 bool has_higher_priority = !top_group ||
830 curr_group->TopPendingPriority() > top_group->TopPendingPriority();
831 if (has_higher_priority) {
832 top_group = curr_group;
833 top_group_name = &i->first;
838 if (top_group) {
839 CHECK(group);
840 *group = top_group;
841 *group_name = *top_group_name;
842 } else {
843 CHECK(!has_stalled_group);
845 return has_stalled_group;
848 void ClientSocketPoolBaseHelper::OnConnectJobComplete(
849 int result, ConnectJob* job) {
850 DCHECK_NE(ERR_IO_PENDING, result);
851 const std::string group_name = job->group_name();
852 GroupMap::iterator group_it = group_map_.find(group_name);
853 CHECK(group_it != group_map_.end());
854 Group* group = group_it->second;
856 scoped_ptr<StreamSocket> socket(job->ReleaseSocket());
858 BoundNetLog job_log = job->net_log();
860 if (result == OK) {
861 DCHECK(socket.get());
862 RemoveConnectJob(job, group);
863 if (!group->pending_requests().empty()) {
864 scoped_ptr<const Request> r(RemoveRequestFromQueue(
865 group->mutable_pending_requests()->begin(), group));
866 LogBoundConnectJobToRequest(job_log.source(), r.get());
867 HandOutSocket(
868 socket.release(), false /* unused socket */, r->handle(),
869 base::TimeDelta(), group, r->net_log());
870 r->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL);
871 InvokeUserCallbackLater(r->handle(), r->callback(), result);
872 } else {
873 AddIdleSocket(socket.release(), group);
874 OnAvailableSocketSlot(group_name, group);
875 CheckForStalledSocketGroups();
877 } else {
878 // If we got a socket, it must contain error information so pass that
879 // up so that the caller can retrieve it.
880 bool handed_out_socket = false;
881 if (!group->pending_requests().empty()) {
882 scoped_ptr<const Request> r(RemoveRequestFromQueue(
883 group->mutable_pending_requests()->begin(), group));
884 LogBoundConnectJobToRequest(job_log.source(), r.get());
885 job->GetAdditionalErrorState(r->handle());
886 RemoveConnectJob(job, group);
887 if (socket.get()) {
888 handed_out_socket = true;
889 HandOutSocket(socket.release(), false /* unused socket */, r->handle(),
890 base::TimeDelta(), group, r->net_log());
892 r->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL,
893 result);
894 InvokeUserCallbackLater(r->handle(), r->callback(), result);
895 } else {
896 RemoveConnectJob(job, group);
898 if (!handed_out_socket) {
899 OnAvailableSocketSlot(group_name, group);
900 CheckForStalledSocketGroups();
905 void ClientSocketPoolBaseHelper::OnIPAddressChanged() {
906 FlushWithError(ERR_NETWORK_CHANGED);
909 void ClientSocketPoolBaseHelper::FlushWithError(int error) {
910 pool_generation_number_++;
911 CancelAllConnectJobs();
912 CloseIdleSockets();
913 CancelAllRequestsWithError(error);
916 bool ClientSocketPoolBaseHelper::IsStalled() const {
917 // If we are not using |max_sockets_|, then clearly we are not stalled
918 if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_)
919 return false;
920 // So in order to be stalled we need to be using |max_sockets_| AND
921 // we need to have a request that is actually stalled on the global
922 // socket limit. To find such a request, we look for a group that
923 // a has more requests that jobs AND where the number of jobs is less
924 // than |max_sockets_per_group_|. (If the number of jobs is equal to
925 // |max_sockets_per_group_|, then the request is stalled on the group,
926 // which does not count.)
927 for (GroupMap::const_iterator it = group_map_.begin();
928 it != group_map_.end(); ++it) {
929 if (it->second->IsStalledOnPoolMaxSockets(max_sockets_per_group_))
930 return true;
932 return false;
935 void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job,
936 Group* group) {
937 CHECK_GT(connecting_socket_count_, 0);
938 connecting_socket_count_--;
940 DCHECK(group);
941 DCHECK(ContainsKey(group->jobs(), job));
942 group->RemoveJob(job);
944 // If we've got no more jobs for this group, then we no longer need a
945 // backup job either.
946 if (group->jobs().empty())
947 group->CleanupBackupJob();
949 DCHECK(job);
950 delete job;
953 void ClientSocketPoolBaseHelper::OnAvailableSocketSlot(
954 const std::string& group_name, Group* group) {
955 DCHECK(ContainsKey(group_map_, group_name));
956 if (group->IsEmpty())
957 RemoveGroup(group_name);
958 else if (!group->pending_requests().empty())
959 ProcessPendingRequest(group_name, group);
962 void ClientSocketPoolBaseHelper::ProcessPendingRequest(
963 const std::string& group_name, Group* group) {
964 int rv = RequestSocketInternal(group_name,
965 *group->pending_requests().begin());
966 if (rv != ERR_IO_PENDING) {
967 scoped_ptr<const Request> request(RemoveRequestFromQueue(
968 group->mutable_pending_requests()->begin(), group));
969 if (group->IsEmpty())
970 RemoveGroup(group_name);
972 request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv);
973 InvokeUserCallbackLater(request->handle(), request->callback(), rv);
977 void ClientSocketPoolBaseHelper::HandOutSocket(
978 StreamSocket* socket,
979 bool reused,
980 ClientSocketHandle* handle,
981 base::TimeDelta idle_time,
982 Group* group,
983 const BoundNetLog& net_log) {
984 DCHECK(socket);
985 handle->set_socket(socket);
986 handle->set_is_reused(reused);
987 handle->set_idle_time(idle_time);
988 handle->set_pool_id(pool_generation_number_);
990 if (reused) {
991 net_log.AddEvent(
992 NetLog::TYPE_SOCKET_POOL_REUSED_AN_EXISTING_SOCKET,
993 NetLog::IntegerCallback(
994 "idle_ms", static_cast<int>(idle_time.InMilliseconds())));
997 net_log.AddEvent(NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET,
998 socket->NetLog().source().ToEventParametersCallback());
1000 handed_out_socket_count_++;
1001 group->IncrementActiveSocketCount();
1004 void ClientSocketPoolBaseHelper::AddIdleSocket(
1005 StreamSocket* socket, Group* group) {
1006 DCHECK(socket);
1007 IdleSocket idle_socket;
1008 idle_socket.socket = socket;
1009 idle_socket.start_time = base::TimeTicks::Now();
1011 group->mutable_idle_sockets()->push_back(idle_socket);
1012 IncrementIdleCount();
1015 void ClientSocketPoolBaseHelper::CancelAllConnectJobs() {
1016 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) {
1017 Group* group = i->second;
1018 connecting_socket_count_ -= group->jobs().size();
1019 group->RemoveAllJobs();
1021 // Delete group if no longer needed.
1022 if (group->IsEmpty()) {
1023 // RemoveGroup() will call .erase() which will invalidate the iterator,
1024 // but i will already have been incremented to a valid iterator before
1025 // RemoveGroup() is called.
1026 RemoveGroup(i++);
1027 } else {
1028 ++i;
1031 DCHECK_EQ(0, connecting_socket_count_);
1034 void ClientSocketPoolBaseHelper::CancelAllRequestsWithError(int error) {
1035 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) {
1036 Group* group = i->second;
1038 RequestQueue pending_requests;
1039 pending_requests.swap(*group->mutable_pending_requests());
1040 for (RequestQueue::iterator it2 = pending_requests.begin();
1041 it2 != pending_requests.end(); ++it2) {
1042 scoped_ptr<const Request> request(*it2);
1043 InvokeUserCallbackLater(
1044 request->handle(), request->callback(), error);
1047 // Delete group if no longer needed.
1048 if (group->IsEmpty()) {
1049 // RemoveGroup() will call .erase() which will invalidate the iterator,
1050 // but i will already have been incremented to a valid iterator before
1051 // RemoveGroup() is called.
1052 RemoveGroup(i++);
1053 } else {
1054 ++i;
1059 bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const {
1060 // Each connecting socket will eventually connect and be handed out.
1061 int total = handed_out_socket_count_ + connecting_socket_count_ +
1062 idle_socket_count();
1063 // There can be more sockets than the limit since some requests can ignore
1064 // the limit
1065 if (total < max_sockets_)
1066 return false;
1067 return true;
1070 bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
1071 if (idle_socket_count() == 0)
1072 return false;
1073 return CloseOneIdleSocketExceptInGroup(NULL);
1076 bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup(
1077 const Group* exception_group) {
1078 CHECK_GT(idle_socket_count(), 0);
1080 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) {
1081 Group* group = i->second;
1082 if (exception_group == group)
1083 continue;
1084 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets();
1086 if (!idle_sockets->empty()) {
1087 delete idle_sockets->front().socket;
1088 idle_sockets->pop_front();
1089 DecrementIdleCount();
1090 if (group->IsEmpty())
1091 RemoveGroup(i);
1093 return true;
1097 return false;
1100 bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInLayeredPool() {
1101 // This pool doesn't have any idle sockets. It's possible that a pool at a
1102 // higher layer is holding one of this sockets active, but it's actually idle.
1103 // Query the higher layers.
1104 for (std::set<LayeredPool*>::const_iterator it = higher_layer_pools_.begin();
1105 it != higher_layer_pools_.end(); ++it) {
1106 if ((*it)->CloseOneIdleConnection())
1107 return true;
1109 return false;
1112 void ClientSocketPoolBaseHelper::InvokeUserCallbackLater(
1113 ClientSocketHandle* handle, const CompletionCallback& callback, int rv) {
1114 CHECK(!ContainsKey(pending_callback_map_, handle));
1115 pending_callback_map_[handle] = CallbackResultPair(callback, rv);
1116 MessageLoop::current()->PostTask(
1117 FROM_HERE,
1118 base::Bind(&ClientSocketPoolBaseHelper::InvokeUserCallback,
1119 weak_factory_.GetWeakPtr(), handle));
1122 void ClientSocketPoolBaseHelper::InvokeUserCallback(
1123 ClientSocketHandle* handle) {
1124 PendingCallbackMap::iterator it = pending_callback_map_.find(handle);
1126 // Exit if the request has already been cancelled.
1127 if (it == pending_callback_map_.end())
1128 return;
1130 CHECK(!handle->is_initialized());
1131 CompletionCallback callback = it->second.callback;
1132 int result = it->second.result;
1133 pending_callback_map_.erase(it);
1134 callback.Run(result);
1137 ClientSocketPoolBaseHelper::Group::Group()
1138 : unassigned_job_count_(0),
1139 active_socket_count_(0),
1140 ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {}
1142 ClientSocketPoolBaseHelper::Group::~Group() {
1143 CleanupBackupJob();
1144 DCHECK_EQ(0u, unassigned_job_count_);
1147 void ClientSocketPoolBaseHelper::Group::StartBackupSocketTimer(
1148 const std::string& group_name,
1149 ClientSocketPoolBaseHelper* pool) {
1150 // Only allow one timer pending to create a backup socket.
1151 if (weak_factory_.HasWeakPtrs())
1152 return;
1154 MessageLoop::current()->PostDelayedTask(
1155 FROM_HERE,
1156 base::Bind(&Group::OnBackupSocketTimerFired, weak_factory_.GetWeakPtr(),
1157 group_name, pool),
1158 pool->ConnectRetryInterval());
1161 bool ClientSocketPoolBaseHelper::Group::TryToUseUnassignedConnectJob() {
1162 SanityCheck();
1164 if (unassigned_job_count_ == 0)
1165 return false;
1166 --unassigned_job_count_;
1167 return true;
1170 void ClientSocketPoolBaseHelper::Group::AddJob(ConnectJob* job,
1171 bool is_preconnect) {
1172 SanityCheck();
1174 if (is_preconnect)
1175 ++unassigned_job_count_;
1176 jobs_.insert(job);
1179 void ClientSocketPoolBaseHelper::Group::RemoveJob(ConnectJob* job) {
1180 SanityCheck();
1182 jobs_.erase(job);
1183 size_t job_count = jobs_.size();
1184 if (job_count < unassigned_job_count_)
1185 unassigned_job_count_ = job_count;
1188 void ClientSocketPoolBaseHelper::Group::OnBackupSocketTimerFired(
1189 std::string group_name,
1190 ClientSocketPoolBaseHelper* pool) {
1191 // If there are no more jobs pending, there is no work to do.
1192 // If we've done our cleanups correctly, this should not happen.
1193 if (jobs_.empty()) {
1194 NOTREACHED();
1195 return;
1198 // If our old job is waiting on DNS, or if we can't create any sockets
1199 // right now due to limits, just reset the timer.
1200 if (pool->ReachedMaxSocketsLimit() ||
1201 !HasAvailableSocketSlot(pool->max_sockets_per_group_) ||
1202 (*jobs_.begin())->GetLoadState() == LOAD_STATE_RESOLVING_HOST) {
1203 StartBackupSocketTimer(group_name, pool);
1204 return;
1207 if (pending_requests_.empty())
1208 return;
1210 ConnectJob* backup_job = pool->connect_job_factory_->NewConnectJob(
1211 group_name, **pending_requests_.begin(), pool);
1212 backup_job->net_log().AddEvent(NetLog::TYPE_SOCKET_BACKUP_CREATED);
1213 SIMPLE_STATS_COUNTER("socket.backup_created");
1214 int rv = backup_job->Connect();
1215 pool->connecting_socket_count_++;
1216 AddJob(backup_job, false);
1217 if (rv != ERR_IO_PENDING)
1218 pool->OnConnectJobComplete(rv, backup_job);
1221 void ClientSocketPoolBaseHelper::Group::SanityCheck() {
1222 DCHECK_LE(unassigned_job_count_, jobs_.size());
1225 void ClientSocketPoolBaseHelper::Group::RemoveAllJobs() {
1226 SanityCheck();
1228 // Delete active jobs.
1229 STLDeleteElements(&jobs_);
1230 unassigned_job_count_ = 0;
1232 // Cancel pending backup job.
1233 weak_factory_.InvalidateWeakPtrs();
1236 } // namespace internal
1238 } // namespace net