Supervised user whitelists: Cleanup
[chromium-blink-merge.git] / net / socket / client_socket_pool_base.cc
blob3d0ff567b3a6f0068a96f9112bcd38ba19c09456
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/socket/client_socket_pool_base.h"
7 #include <algorithm>
9 #include "base/compiler_specific.h"
10 #include "base/format_macros.h"
11 #include "base/logging.h"
12 #include "base/message_loop/message_loop.h"
13 #include "base/profiler/scoped_tracker.h"
14 #include "base/stl_util.h"
15 #include "base/strings/string_util.h"
16 #include "base/time/time.h"
17 #include "base/values.h"
18 #include "net/base/net_errors.h"
19 #include "net/log/net_log.h"
21 using base::TimeDelta;
23 namespace net {
25 namespace {
27 // Indicate whether we should enable idle socket cleanup timer. When timer is
28 // disabled, sockets are closed next time a socket request is made.
29 bool g_cleanup_timer_enabled = true;
31 // The timeout value, in seconds, used to clean up idle sockets that can't be
32 // reused.
34 // Note: It's important to close idle sockets that have received data as soon
35 // as possible because the received data may cause BSOD on Windows XP under
36 // some conditions. See http://crbug.com/4606.
37 const int kCleanupInterval = 10; // DO NOT INCREASE THIS TIMEOUT.
39 // Indicate whether or not we should establish a new transport layer connection
40 // after a certain timeout has passed without receiving an ACK.
41 bool g_connect_backup_jobs_enabled = true;
43 } // namespace
45 ConnectJob::ConnectJob(const std::string& group_name,
46 base::TimeDelta timeout_duration,
47 RequestPriority priority,
48 Delegate* delegate,
49 const BoundNetLog& net_log)
50 : group_name_(group_name),
51 timeout_duration_(timeout_duration),
52 priority_(priority),
53 delegate_(delegate),
54 net_log_(net_log),
55 idle_(true) {
56 DCHECK(!group_name.empty());
57 DCHECK(delegate);
58 net_log.BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB,
59 NetLog::StringCallback("group_name", &group_name_));
62 ConnectJob::~ConnectJob() {
63 net_log().EndEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB);
66 scoped_ptr<StreamSocket> ConnectJob::PassSocket() {
67 return socket_.Pass();
70 int ConnectJob::Connect() {
71 if (timeout_duration_ != base::TimeDelta())
72 timer_.Start(FROM_HERE, timeout_duration_, this, &ConnectJob::OnTimeout);
74 idle_ = false;
76 LogConnectStart();
78 int rv = ConnectInternal();
80 if (rv != ERR_IO_PENDING) {
81 LogConnectCompletion(rv);
82 delegate_ = NULL;
85 return rv;
88 void ConnectJob::SetSocket(scoped_ptr<StreamSocket> socket) {
89 if (socket) {
90 net_log().AddEvent(NetLog::TYPE_CONNECT_JOB_SET_SOCKET,
91 socket->NetLog().source().ToEventParametersCallback());
93 socket_ = socket.Pass();
96 void ConnectJob::NotifyDelegateOfCompletion(int rv) {
97 // The delegate will own |this|.
98 Delegate* delegate = delegate_;
99 delegate_ = NULL;
101 LogConnectCompletion(rv);
102 delegate->OnConnectJobComplete(rv, this);
105 void ConnectJob::ResetTimer(base::TimeDelta remaining_time) {
106 timer_.Stop();
107 timer_.Start(FROM_HERE, remaining_time, this, &ConnectJob::OnTimeout);
110 void ConnectJob::LogConnectStart() {
111 connect_timing_.connect_start = base::TimeTicks::Now();
112 net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT);
115 void ConnectJob::LogConnectCompletion(int net_error) {
116 connect_timing_.connect_end = base::TimeTicks::Now();
117 net_log().EndEventWithNetErrorCode(
118 NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT, net_error);
121 void ConnectJob::OnTimeout() {
122 // Make sure the socket is NULL before calling into |delegate|.
123 SetSocket(scoped_ptr<StreamSocket>());
125 net_log_.AddEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_TIMED_OUT);
127 NotifyDelegateOfCompletion(ERR_TIMED_OUT);
130 namespace internal {
132 ClientSocketPoolBaseHelper::Request::Request(
133 ClientSocketHandle* handle,
134 const CompletionCallback& callback,
135 RequestPriority priority,
136 bool ignore_limits,
137 Flags flags,
138 const BoundNetLog& net_log)
139 : handle_(handle),
140 callback_(callback),
141 priority_(priority),
142 ignore_limits_(ignore_limits),
143 flags_(flags),
144 net_log_(net_log) {
145 if (ignore_limits_)
146 DCHECK_EQ(priority_, MAXIMUM_PRIORITY);
149 ClientSocketPoolBaseHelper::Request::~Request() {}
151 ClientSocketPoolBaseHelper::ClientSocketPoolBaseHelper(
152 HigherLayeredPool* pool,
153 int max_sockets,
154 int max_sockets_per_group,
155 base::TimeDelta unused_idle_socket_timeout,
156 base::TimeDelta used_idle_socket_timeout,
157 ConnectJobFactory* connect_job_factory)
158 : idle_socket_count_(0),
159 connecting_socket_count_(0),
160 handed_out_socket_count_(0),
161 max_sockets_(max_sockets),
162 max_sockets_per_group_(max_sockets_per_group),
163 use_cleanup_timer_(g_cleanup_timer_enabled),
164 unused_idle_socket_timeout_(unused_idle_socket_timeout),
165 used_idle_socket_timeout_(used_idle_socket_timeout),
166 connect_job_factory_(connect_job_factory),
167 connect_backup_jobs_enabled_(false),
168 pool_generation_number_(0),
169 pool_(pool),
170 weak_factory_(this) {
171 DCHECK_LE(0, max_sockets_per_group);
172 DCHECK_LE(max_sockets_per_group, max_sockets);
174 NetworkChangeNotifier::AddIPAddressObserver(this);
177 ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() {
178 // Clean up any idle sockets and pending connect jobs. Assert that we have no
179 // remaining active sockets or pending requests. They should have all been
180 // cleaned up prior to |this| being destroyed.
181 FlushWithError(ERR_ABORTED);
182 DCHECK(group_map_.empty());
183 DCHECK(pending_callback_map_.empty());
184 DCHECK_EQ(0, connecting_socket_count_);
185 CHECK(higher_pools_.empty());
187 NetworkChangeNotifier::RemoveIPAddressObserver(this);
189 // Remove from lower layer pools.
190 for (std::set<LowerLayeredPool*>::iterator it = lower_pools_.begin();
191 it != lower_pools_.end();
192 ++it) {
193 (*it)->RemoveHigherLayeredPool(pool_);
197 ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair()
198 : result(OK) {
201 ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair(
202 const CompletionCallback& callback_in, int result_in)
203 : callback(callback_in),
204 result(result_in) {
207 ClientSocketPoolBaseHelper::CallbackResultPair::~CallbackResultPair() {}
209 bool ClientSocketPoolBaseHelper::IsStalled() const {
210 // If a lower layer pool is stalled, consider |this| stalled as well.
211 for (std::set<LowerLayeredPool*>::const_iterator it = lower_pools_.begin();
212 it != lower_pools_.end();
213 ++it) {
214 if ((*it)->IsStalled())
215 return true;
218 // If fewer than |max_sockets_| are in use, then clearly |this| is not
219 // stalled.
220 if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_)
221 return false;
222 // So in order to be stalled, |this| must be using at least |max_sockets_| AND
223 // |this| must have a request that is actually stalled on the global socket
224 // limit. To find such a request, look for a group that has more requests
225 // than jobs AND where the number of sockets is less than
226 // |max_sockets_per_group_|. (If the number of sockets is equal to
227 // |max_sockets_per_group_|, then the request is stalled on the group limit,
228 // which does not count.)
229 for (GroupMap::const_iterator it = group_map_.begin();
230 it != group_map_.end(); ++it) {
231 if (it->second->CanUseAdditionalSocketSlot(max_sockets_per_group_))
232 return true;
234 return false;
237 void ClientSocketPoolBaseHelper::AddLowerLayeredPool(
238 LowerLayeredPool* lower_pool) {
239 DCHECK(pool_);
240 CHECK(!ContainsKey(lower_pools_, lower_pool));
241 lower_pools_.insert(lower_pool);
242 lower_pool->AddHigherLayeredPool(pool_);
245 void ClientSocketPoolBaseHelper::AddHigherLayeredPool(
246 HigherLayeredPool* higher_pool) {
247 CHECK(higher_pool);
248 CHECK(!ContainsKey(higher_pools_, higher_pool));
249 higher_pools_.insert(higher_pool);
252 void ClientSocketPoolBaseHelper::RemoveHigherLayeredPool(
253 HigherLayeredPool* higher_pool) {
254 CHECK(higher_pool);
255 CHECK(ContainsKey(higher_pools_, higher_pool));
256 higher_pools_.erase(higher_pool);
259 int ClientSocketPoolBaseHelper::RequestSocket(
260 const std::string& group_name,
261 scoped_ptr<const Request> request) {
262 CHECK(!request->callback().is_null());
263 CHECK(request->handle());
265 // Cleanup any timed-out idle sockets if no timer is used.
266 if (!use_cleanup_timer_)
267 CleanupIdleSockets(false);
269 request->net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL);
270 Group* group = GetOrCreateGroup(group_name);
272 int rv = RequestSocketInternal(group_name, *request);
273 if (rv != ERR_IO_PENDING) {
274 request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv);
275 CHECK(!request->handle()->is_initialized());
276 request.reset();
277 } else {
278 group->InsertPendingRequest(request.Pass());
279 // Have to do this asynchronously, as closing sockets in higher level pools
280 // call back in to |this|, which will cause all sorts of fun and exciting
281 // re-entrancy issues if the socket pool is doing something else at the
282 // time.
283 if (group->CanUseAdditionalSocketSlot(max_sockets_per_group_)) {
284 base::MessageLoop::current()->PostTask(
285 FROM_HERE,
286 base::Bind(
287 &ClientSocketPoolBaseHelper::TryToCloseSocketsInLayeredPools,
288 weak_factory_.GetWeakPtr()));
291 return rv;
294 void ClientSocketPoolBaseHelper::RequestSockets(
295 const std::string& group_name,
296 const Request& request,
297 int num_sockets) {
298 DCHECK(request.callback().is_null());
299 DCHECK(!request.handle());
301 // Cleanup any timed out idle sockets if no timer is used.
302 if (!use_cleanup_timer_)
303 CleanupIdleSockets(false);
305 if (num_sockets > max_sockets_per_group_) {
306 num_sockets = max_sockets_per_group_;
309 request.net_log().BeginEvent(
310 NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS,
311 NetLog::IntegerCallback("num_sockets", num_sockets));
313 Group* group = GetOrCreateGroup(group_name);
315 // RequestSocketsInternal() may delete the group.
316 bool deleted_group = false;
318 int rv = OK;
319 for (int num_iterations_left = num_sockets;
320 group->NumActiveSocketSlots() < num_sockets &&
321 num_iterations_left > 0 ; num_iterations_left--) {
322 rv = RequestSocketInternal(group_name, request);
323 if (rv < 0 && rv != ERR_IO_PENDING) {
324 // We're encountering a synchronous error. Give up.
325 if (!ContainsKey(group_map_, group_name))
326 deleted_group = true;
327 break;
329 if (!ContainsKey(group_map_, group_name)) {
330 // Unexpected. The group should only be getting deleted on synchronous
331 // error.
332 NOTREACHED();
333 deleted_group = true;
334 break;
338 if (!deleted_group && group->IsEmpty())
339 RemoveGroup(group_name);
341 if (rv == ERR_IO_PENDING)
342 rv = OK;
343 request.net_log().EndEventWithNetErrorCode(
344 NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS, rv);
347 int ClientSocketPoolBaseHelper::RequestSocketInternal(
348 const std::string& group_name,
349 const Request& request) {
350 ClientSocketHandle* const handle = request.handle();
351 const bool preconnecting = !handle;
352 Group* group = GetOrCreateGroup(group_name);
354 if (!(request.flags() & NO_IDLE_SOCKETS)) {
355 // Try to reuse a socket.
356 if (AssignIdleSocketToRequest(request, group))
357 return OK;
360 // If there are more ConnectJobs than pending requests, don't need to do
361 // anything. Can just wait for the extra job to connect, and then assign it
362 // to the request.
363 if (!preconnecting && group->TryToUseUnassignedConnectJob())
364 return ERR_IO_PENDING;
366 // Can we make another active socket now?
367 if (!group->HasAvailableSocketSlot(max_sockets_per_group_) &&
368 !request.ignore_limits()) {
369 // TODO(willchan): Consider whether or not we need to close a socket in a
370 // higher layered group. I don't think this makes sense since we would just
371 // reuse that socket then if we needed one and wouldn't make it down to this
372 // layer.
373 request.net_log().AddEvent(
374 NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP);
375 return ERR_IO_PENDING;
378 if (ReachedMaxSocketsLimit() && !request.ignore_limits()) {
379 // NOTE(mmenke): Wonder if we really need different code for each case
380 // here. Only reason for them now seems to be preconnects.
381 if (idle_socket_count() > 0) {
382 // There's an idle socket in this pool. Either that's because there's
383 // still one in this group, but we got here due to preconnecting bypassing
384 // idle sockets, or because there's an idle socket in another group.
385 bool closed = CloseOneIdleSocketExceptInGroup(group);
386 if (preconnecting && !closed)
387 return ERR_PRECONNECT_MAX_SOCKET_LIMIT;
388 } else {
389 // We could check if we really have a stalled group here, but it requires
390 // a scan of all groups, so just flip a flag here, and do the check later.
391 request.net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS);
392 return ERR_IO_PENDING;
396 // We couldn't find a socket to reuse, and there's space to allocate one,
397 // so allocate and connect a new one.
398 scoped_ptr<ConnectJob> connect_job(
399 connect_job_factory_->NewConnectJob(group_name, request, this));
401 int rv = connect_job->Connect();
402 if (rv == OK) {
403 LogBoundConnectJobToRequest(connect_job->net_log().source(), request);
404 if (!preconnecting) {
405 HandOutSocket(connect_job->PassSocket(), ClientSocketHandle::UNUSED,
406 connect_job->connect_timing(), handle, base::TimeDelta(),
407 group, request.net_log());
408 } else {
409 AddIdleSocket(connect_job->PassSocket(), group);
411 } else if (rv == ERR_IO_PENDING) {
412 // If we don't have any sockets in this group, set a timer for potentially
413 // creating a new one. If the SYN is lost, this backup socket may complete
414 // before the slow socket, improving end user latency.
415 if (connect_backup_jobs_enabled_ && group->IsEmpty()) {
416 group->StartBackupJobTimer(group_name, this);
419 connecting_socket_count_++;
421 group->AddJob(connect_job.Pass(), preconnecting);
422 } else {
423 LogBoundConnectJobToRequest(connect_job->net_log().source(), request);
424 scoped_ptr<StreamSocket> error_socket;
425 if (!preconnecting) {
426 DCHECK(handle);
427 connect_job->GetAdditionalErrorState(handle);
428 error_socket = connect_job->PassSocket();
430 if (error_socket) {
431 HandOutSocket(error_socket.Pass(), ClientSocketHandle::UNUSED,
432 connect_job->connect_timing(), handle, base::TimeDelta(),
433 group, request.net_log());
434 } else if (group->IsEmpty()) {
435 RemoveGroup(group_name);
439 return rv;
442 bool ClientSocketPoolBaseHelper::AssignIdleSocketToRequest(
443 const Request& request, Group* group) {
444 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets();
445 std::list<IdleSocket>::iterator idle_socket_it = idle_sockets->end();
447 // Iterate through the idle sockets forwards (oldest to newest)
448 // * Delete any disconnected ones.
449 // * If we find a used idle socket, assign to |idle_socket|. At the end,
450 // the |idle_socket_it| will be set to the newest used idle socket.
451 for (std::list<IdleSocket>::iterator it = idle_sockets->begin();
452 it != idle_sockets->end();) {
453 if (!it->IsUsable()) {
454 DecrementIdleCount();
455 delete it->socket;
456 it = idle_sockets->erase(it);
457 continue;
460 if (it->socket->WasEverUsed()) {
461 // We found one we can reuse!
462 idle_socket_it = it;
465 ++it;
468 // If we haven't found an idle socket, that means there are no used idle
469 // sockets. Pick the oldest (first) idle socket (FIFO).
471 if (idle_socket_it == idle_sockets->end() && !idle_sockets->empty())
472 idle_socket_it = idle_sockets->begin();
474 if (idle_socket_it != idle_sockets->end()) {
475 DecrementIdleCount();
476 base::TimeDelta idle_time =
477 base::TimeTicks::Now() - idle_socket_it->start_time;
478 IdleSocket idle_socket = *idle_socket_it;
479 idle_sockets->erase(idle_socket_it);
480 // TODO(davidben): If |idle_time| is under some low watermark, consider
481 // treating as UNUSED rather than UNUSED_IDLE. This will avoid
482 // HttpNetworkTransaction retrying on some errors.
483 ClientSocketHandle::SocketReuseType reuse_type =
484 idle_socket.socket->WasEverUsed() ?
485 ClientSocketHandle::REUSED_IDLE :
486 ClientSocketHandle::UNUSED_IDLE;
487 HandOutSocket(
488 scoped_ptr<StreamSocket>(idle_socket.socket),
489 reuse_type,
490 LoadTimingInfo::ConnectTiming(),
491 request.handle(),
492 idle_time,
493 group,
494 request.net_log());
495 return true;
498 return false;
501 // static
502 void ClientSocketPoolBaseHelper::LogBoundConnectJobToRequest(
503 const NetLog::Source& connect_job_source, const Request& request) {
504 request.net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB,
505 connect_job_source.ToEventParametersCallback());
508 void ClientSocketPoolBaseHelper::CancelRequest(
509 const std::string& group_name, ClientSocketHandle* handle) {
510 PendingCallbackMap::iterator callback_it = pending_callback_map_.find(handle);
511 if (callback_it != pending_callback_map_.end()) {
512 int result = callback_it->second.result;
513 pending_callback_map_.erase(callback_it);
514 scoped_ptr<StreamSocket> socket = handle->PassSocket();
515 if (socket) {
516 if (result != OK)
517 socket->Disconnect();
518 ReleaseSocket(handle->group_name(), socket.Pass(), handle->id());
520 return;
523 CHECK(ContainsKey(group_map_, group_name));
525 Group* group = GetOrCreateGroup(group_name);
527 // Search pending_requests for matching handle.
528 scoped_ptr<const Request> request =
529 group->FindAndRemovePendingRequest(handle);
530 if (request) {
531 request->net_log().AddEvent(NetLog::TYPE_CANCELLED);
532 request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL);
534 // We let the job run, unless we're at the socket limit and there is
535 // not another request waiting on the job.
536 if (group->jobs().size() > group->pending_request_count() &&
537 ReachedMaxSocketsLimit()) {
538 RemoveConnectJob(*group->jobs().begin(), group);
539 CheckForStalledSocketGroups();
544 bool ClientSocketPoolBaseHelper::HasGroup(const std::string& group_name) const {
545 return ContainsKey(group_map_, group_name);
548 void ClientSocketPoolBaseHelper::CloseIdleSockets() {
549 CleanupIdleSockets(true);
550 DCHECK_EQ(0, idle_socket_count_);
553 int ClientSocketPoolBaseHelper::IdleSocketCountInGroup(
554 const std::string& group_name) const {
555 GroupMap::const_iterator i = group_map_.find(group_name);
556 CHECK(i != group_map_.end());
558 return i->second->idle_sockets().size();
561 LoadState ClientSocketPoolBaseHelper::GetLoadState(
562 const std::string& group_name,
563 const ClientSocketHandle* handle) const {
564 if (ContainsKey(pending_callback_map_, handle))
565 return LOAD_STATE_CONNECTING;
567 GroupMap::const_iterator group_it = group_map_.find(group_name);
568 if (group_it == group_map_.end()) {
569 // TODO(mmenke): This is actually reached in the wild, for unknown reasons.
570 // Would be great to understand why, and if it's a bug, fix it. If not,
571 // should have a test for that case.
572 NOTREACHED();
573 return LOAD_STATE_IDLE;
576 const Group& group = *group_it->second;
577 if (group.HasConnectJobForHandle(handle)) {
578 // Just return the state of the oldest ConnectJob.
579 return (*group.jobs().begin())->GetLoadState();
582 if (group.CanUseAdditionalSocketSlot(max_sockets_per_group_))
583 return LOAD_STATE_WAITING_FOR_STALLED_SOCKET_POOL;
584 return LOAD_STATE_WAITING_FOR_AVAILABLE_SOCKET;
587 base::DictionaryValue* ClientSocketPoolBaseHelper::GetInfoAsValue(
588 const std::string& name, const std::string& type) const {
589 base::DictionaryValue* dict = new base::DictionaryValue();
590 dict->SetString("name", name);
591 dict->SetString("type", type);
592 dict->SetInteger("handed_out_socket_count", handed_out_socket_count_);
593 dict->SetInteger("connecting_socket_count", connecting_socket_count_);
594 dict->SetInteger("idle_socket_count", idle_socket_count_);
595 dict->SetInteger("max_socket_count", max_sockets_);
596 dict->SetInteger("max_sockets_per_group", max_sockets_per_group_);
597 dict->SetInteger("pool_generation_number", pool_generation_number_);
599 if (group_map_.empty())
600 return dict;
602 base::DictionaryValue* all_groups_dict = new base::DictionaryValue();
603 for (GroupMap::const_iterator it = group_map_.begin();
604 it != group_map_.end(); it++) {
605 const Group* group = it->second;
606 base::DictionaryValue* group_dict = new base::DictionaryValue();
608 group_dict->SetInteger("pending_request_count",
609 group->pending_request_count());
610 if (group->has_pending_requests()) {
611 group_dict->SetString(
612 "top_pending_priority",
613 RequestPriorityToString(group->TopPendingPriority()));
616 group_dict->SetInteger("active_socket_count", group->active_socket_count());
618 base::ListValue* idle_socket_list = new base::ListValue();
619 std::list<IdleSocket>::const_iterator idle_socket;
620 for (idle_socket = group->idle_sockets().begin();
621 idle_socket != group->idle_sockets().end();
622 idle_socket++) {
623 int source_id = idle_socket->socket->NetLog().source().id;
624 idle_socket_list->Append(new base::FundamentalValue(source_id));
626 group_dict->Set("idle_sockets", idle_socket_list);
628 base::ListValue* connect_jobs_list = new base::ListValue();
629 std::list<ConnectJob*>::const_iterator job = group->jobs().begin();
630 for (job = group->jobs().begin(); job != group->jobs().end(); job++) {
631 int source_id = (*job)->net_log().source().id;
632 connect_jobs_list->Append(new base::FundamentalValue(source_id));
634 group_dict->Set("connect_jobs", connect_jobs_list);
636 group_dict->SetBoolean("is_stalled", group->CanUseAdditionalSocketSlot(
637 max_sockets_per_group_));
638 group_dict->SetBoolean("backup_job_timer_is_running",
639 group->BackupJobTimerIsRunning());
641 all_groups_dict->SetWithoutPathExpansion(it->first, group_dict);
643 dict->Set("groups", all_groups_dict);
644 return dict;
647 bool ClientSocketPoolBaseHelper::IdleSocket::IsUsable() const {
648 if (socket->WasEverUsed())
649 return socket->IsConnectedAndIdle();
650 return socket->IsConnected();
653 bool ClientSocketPoolBaseHelper::IdleSocket::ShouldCleanup(
654 base::TimeTicks now,
655 base::TimeDelta timeout) const {
656 bool timed_out = (now - start_time) >= timeout;
657 if (timed_out)
658 return true;
659 return !IsUsable();
662 void ClientSocketPoolBaseHelper::CleanupIdleSockets(bool force) {
663 if (idle_socket_count_ == 0)
664 return;
666 // Current time value. Retrieving it once at the function start rather than
667 // inside the inner loop, since it shouldn't change by any meaningful amount.
668 base::TimeTicks now = base::TimeTicks::Now();
670 GroupMap::iterator i = group_map_.begin();
671 while (i != group_map_.end()) {
672 Group* group = i->second;
674 std::list<IdleSocket>::iterator j = group->mutable_idle_sockets()->begin();
675 while (j != group->idle_sockets().end()) {
676 base::TimeDelta timeout =
677 j->socket->WasEverUsed() ?
678 used_idle_socket_timeout_ : unused_idle_socket_timeout_;
679 if (force || j->ShouldCleanup(now, timeout)) {
680 delete j->socket;
681 j = group->mutable_idle_sockets()->erase(j);
682 DecrementIdleCount();
683 } else {
684 ++j;
688 // Delete group if no longer needed.
689 if (group->IsEmpty()) {
690 RemoveGroup(i++);
691 } else {
692 ++i;
697 ClientSocketPoolBaseHelper::Group* ClientSocketPoolBaseHelper::GetOrCreateGroup(
698 const std::string& group_name) {
699 GroupMap::iterator it = group_map_.find(group_name);
700 if (it != group_map_.end())
701 return it->second;
702 Group* group = new Group;
703 group_map_[group_name] = group;
704 return group;
707 void ClientSocketPoolBaseHelper::RemoveGroup(const std::string& group_name) {
708 GroupMap::iterator it = group_map_.find(group_name);
709 CHECK(it != group_map_.end());
711 RemoveGroup(it);
714 void ClientSocketPoolBaseHelper::RemoveGroup(GroupMap::iterator it) {
715 delete it->second;
716 group_map_.erase(it);
719 // static
720 bool ClientSocketPoolBaseHelper::connect_backup_jobs_enabled() {
721 return g_connect_backup_jobs_enabled;
724 // static
725 bool ClientSocketPoolBaseHelper::set_connect_backup_jobs_enabled(bool enabled) {
726 bool old_value = g_connect_backup_jobs_enabled;
727 g_connect_backup_jobs_enabled = enabled;
728 return old_value;
731 void ClientSocketPoolBaseHelper::EnableConnectBackupJobs() {
732 connect_backup_jobs_enabled_ = g_connect_backup_jobs_enabled;
735 void ClientSocketPoolBaseHelper::IncrementIdleCount() {
736 if (++idle_socket_count_ == 1 && use_cleanup_timer_)
737 StartIdleSocketTimer();
740 void ClientSocketPoolBaseHelper::DecrementIdleCount() {
741 if (--idle_socket_count_ == 0)
742 timer_.Stop();
745 // static
746 bool ClientSocketPoolBaseHelper::cleanup_timer_enabled() {
747 return g_cleanup_timer_enabled;
750 // static
751 bool ClientSocketPoolBaseHelper::set_cleanup_timer_enabled(bool enabled) {
752 bool old_value = g_cleanup_timer_enabled;
753 g_cleanup_timer_enabled = enabled;
754 return old_value;
757 void ClientSocketPoolBaseHelper::StartIdleSocketTimer() {
758 timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kCleanupInterval), this,
759 &ClientSocketPoolBaseHelper::OnCleanupTimerFired);
762 void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string& group_name,
763 scoped_ptr<StreamSocket> socket,
764 int id) {
765 GroupMap::iterator i = group_map_.find(group_name);
766 CHECK(i != group_map_.end());
768 Group* group = i->second;
770 CHECK_GT(handed_out_socket_count_, 0);
771 handed_out_socket_count_--;
773 CHECK_GT(group->active_socket_count(), 0);
774 group->DecrementActiveSocketCount();
776 const bool can_reuse = socket->IsConnectedAndIdle() &&
777 id == pool_generation_number_;
778 if (can_reuse) {
779 // Add it to the idle list.
780 AddIdleSocket(socket.Pass(), group);
781 OnAvailableSocketSlot(group_name, group);
782 } else {
783 socket.reset();
786 CheckForStalledSocketGroups();
789 void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() {
790 // If we have idle sockets, see if we can give one to the top-stalled group.
791 std::string top_group_name;
792 Group* top_group = NULL;
793 if (!FindTopStalledGroup(&top_group, &top_group_name)) {
794 // There may still be a stalled group in a lower level pool.
795 for (std::set<LowerLayeredPool*>::iterator it = lower_pools_.begin();
796 it != lower_pools_.end();
797 ++it) {
798 if ((*it)->IsStalled()) {
799 CloseOneIdleSocket();
800 break;
803 return;
806 if (ReachedMaxSocketsLimit()) {
807 if (idle_socket_count() > 0) {
808 CloseOneIdleSocket();
809 } else {
810 // We can't activate more sockets since we're already at our global
811 // limit.
812 return;
816 // Note: we don't loop on waking stalled groups. If the stalled group is at
817 // its limit, may be left with other stalled groups that could be
818 // woken. This isn't optimal, but there is no starvation, so to avoid
819 // the looping we leave it at this.
820 OnAvailableSocketSlot(top_group_name, top_group);
823 // Search for the highest priority pending request, amongst the groups that
824 // are not at the |max_sockets_per_group_| limit. Note: for requests with
825 // the same priority, the winner is based on group hash ordering (and not
826 // insertion order).
827 bool ClientSocketPoolBaseHelper::FindTopStalledGroup(
828 Group** group,
829 std::string* group_name) const {
830 CHECK((group && group_name) || (!group && !group_name));
831 Group* top_group = NULL;
832 const std::string* top_group_name = NULL;
833 bool has_stalled_group = false;
834 for (GroupMap::const_iterator i = group_map_.begin();
835 i != group_map_.end(); ++i) {
836 Group* curr_group = i->second;
837 if (!curr_group->has_pending_requests())
838 continue;
839 if (curr_group->CanUseAdditionalSocketSlot(max_sockets_per_group_)) {
840 if (!group)
841 return true;
842 has_stalled_group = true;
843 bool has_higher_priority = !top_group ||
844 curr_group->TopPendingPriority() > top_group->TopPendingPriority();
845 if (has_higher_priority) {
846 top_group = curr_group;
847 top_group_name = &i->first;
852 if (top_group) {
853 CHECK(group);
854 *group = top_group;
855 *group_name = *top_group_name;
856 } else {
857 CHECK(!has_stalled_group);
859 return has_stalled_group;
862 void ClientSocketPoolBaseHelper::OnConnectJobComplete(
863 int result, ConnectJob* job) {
864 DCHECK_NE(ERR_IO_PENDING, result);
865 const std::string group_name = job->group_name();
866 GroupMap::iterator group_it = group_map_.find(group_name);
867 CHECK(group_it != group_map_.end());
868 Group* group = group_it->second;
870 scoped_ptr<StreamSocket> socket = job->PassSocket();
872 // Copies of these are needed because |job| may be deleted before they are
873 // accessed.
874 BoundNetLog job_log = job->net_log();
875 LoadTimingInfo::ConnectTiming connect_timing = job->connect_timing();
877 // RemoveConnectJob(job, _) must be called by all branches below;
878 // otherwise, |job| will be leaked.
880 if (result == OK) {
881 DCHECK(socket.get());
882 RemoveConnectJob(job, group);
883 scoped_ptr<const Request> request = group->PopNextPendingRequest();
884 if (request) {
885 LogBoundConnectJobToRequest(job_log.source(), *request);
886 HandOutSocket(
887 socket.Pass(), ClientSocketHandle::UNUSED, connect_timing,
888 request->handle(), base::TimeDelta(), group, request->net_log());
889 request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL);
890 InvokeUserCallbackLater(request->handle(), request->callback(), result);
891 } else {
892 AddIdleSocket(socket.Pass(), group);
893 OnAvailableSocketSlot(group_name, group);
894 CheckForStalledSocketGroups();
896 } else {
897 // If we got a socket, it must contain error information so pass that
898 // up so that the caller can retrieve it.
899 bool handed_out_socket = false;
900 scoped_ptr<const Request> request = group->PopNextPendingRequest();
901 if (request) {
902 LogBoundConnectJobToRequest(job_log.source(), *request);
903 job->GetAdditionalErrorState(request->handle());
904 RemoveConnectJob(job, group);
905 if (socket.get()) {
906 handed_out_socket = true;
907 HandOutSocket(socket.Pass(), ClientSocketHandle::UNUSED,
908 connect_timing, request->handle(), base::TimeDelta(),
909 group, request->net_log());
911 request->net_log().EndEventWithNetErrorCode(
912 NetLog::TYPE_SOCKET_POOL, result);
913 InvokeUserCallbackLater(request->handle(), request->callback(), result);
914 } else {
915 RemoveConnectJob(job, group);
917 if (!handed_out_socket) {
918 OnAvailableSocketSlot(group_name, group);
919 CheckForStalledSocketGroups();
924 void ClientSocketPoolBaseHelper::OnIPAddressChanged() {
925 FlushWithError(ERR_NETWORK_CHANGED);
928 void ClientSocketPoolBaseHelper::FlushWithError(int error) {
929 pool_generation_number_++;
930 CancelAllConnectJobs();
931 CloseIdleSockets();
932 CancelAllRequestsWithError(error);
935 void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job,
936 Group* group) {
937 CHECK_GT(connecting_socket_count_, 0);
938 connecting_socket_count_--;
940 DCHECK(group);
941 group->RemoveJob(job);
944 void ClientSocketPoolBaseHelper::OnAvailableSocketSlot(
945 const std::string& group_name, Group* group) {
946 DCHECK(ContainsKey(group_map_, group_name));
947 if (group->IsEmpty()) {
948 RemoveGroup(group_name);
949 } else if (group->has_pending_requests()) {
950 ProcessPendingRequest(group_name, group);
954 void ClientSocketPoolBaseHelper::ProcessPendingRequest(
955 const std::string& group_name, Group* group) {
956 const Request* next_request = group->GetNextPendingRequest();
957 DCHECK(next_request);
959 // If the group has no idle sockets, and can't make use of an additional slot,
960 // either because it's at the limit or because it's at the socket per group
961 // limit, then there's nothing to do.
962 if (group->idle_sockets().empty() &&
963 !group->CanUseAdditionalSocketSlot(max_sockets_per_group_)) {
964 return;
967 int rv = RequestSocketInternal(group_name, *next_request);
968 if (rv != ERR_IO_PENDING) {
969 scoped_ptr<const Request> request = group->PopNextPendingRequest();
970 DCHECK(request);
971 if (group->IsEmpty())
972 RemoveGroup(group_name);
974 request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv);
975 InvokeUserCallbackLater(request->handle(), request->callback(), rv);
979 void ClientSocketPoolBaseHelper::HandOutSocket(
980 scoped_ptr<StreamSocket> socket,
981 ClientSocketHandle::SocketReuseType reuse_type,
982 const LoadTimingInfo::ConnectTiming& connect_timing,
983 ClientSocketHandle* handle,
984 base::TimeDelta idle_time,
985 Group* group,
986 const BoundNetLog& net_log) {
987 DCHECK(socket);
988 handle->SetSocket(socket.Pass());
989 handle->set_reuse_type(reuse_type);
990 handle->set_idle_time(idle_time);
991 handle->set_pool_id(pool_generation_number_);
992 handle->set_connect_timing(connect_timing);
994 if (handle->is_reused()) {
995 net_log.AddEvent(
996 NetLog::TYPE_SOCKET_POOL_REUSED_AN_EXISTING_SOCKET,
997 NetLog::IntegerCallback(
998 "idle_ms", static_cast<int>(idle_time.InMilliseconds())));
1001 net_log.AddEvent(
1002 NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET,
1003 handle->socket()->NetLog().source().ToEventParametersCallback());
1005 handed_out_socket_count_++;
1006 group->IncrementActiveSocketCount();
1009 void ClientSocketPoolBaseHelper::AddIdleSocket(
1010 scoped_ptr<StreamSocket> socket,
1011 Group* group) {
1012 DCHECK(socket);
1013 IdleSocket idle_socket;
1014 idle_socket.socket = socket.release();
1015 idle_socket.start_time = base::TimeTicks::Now();
1017 group->mutable_idle_sockets()->push_back(idle_socket);
1018 IncrementIdleCount();
1021 void ClientSocketPoolBaseHelper::CancelAllConnectJobs() {
1022 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) {
1023 Group* group = i->second;
1024 connecting_socket_count_ -= group->jobs().size();
1025 group->RemoveAllJobs();
1027 // Delete group if no longer needed.
1028 if (group->IsEmpty()) {
1029 // RemoveGroup() will call .erase() which will invalidate the iterator,
1030 // but i will already have been incremented to a valid iterator before
1031 // RemoveGroup() is called.
1032 RemoveGroup(i++);
1033 } else {
1034 ++i;
1037 DCHECK_EQ(0, connecting_socket_count_);
1040 void ClientSocketPoolBaseHelper::CancelAllRequestsWithError(int error) {
1041 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) {
1042 Group* group = i->second;
1044 while (true) {
1045 scoped_ptr<const Request> request = group->PopNextPendingRequest();
1046 if (!request)
1047 break;
1048 InvokeUserCallbackLater(request->handle(), request->callback(), error);
1051 // Delete group if no longer needed.
1052 if (group->IsEmpty()) {
1053 // RemoveGroup() will call .erase() which will invalidate the iterator,
1054 // but i will already have been incremented to a valid iterator before
1055 // RemoveGroup() is called.
1056 RemoveGroup(i++);
1057 } else {
1058 ++i;
1063 bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const {
1064 // Each connecting socket will eventually connect and be handed out.
1065 int total = handed_out_socket_count_ + connecting_socket_count_ +
1066 idle_socket_count();
1067 // There can be more sockets than the limit since some requests can ignore
1068 // the limit
1069 if (total < max_sockets_)
1070 return false;
1071 return true;
1074 bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
1075 if (idle_socket_count() == 0)
1076 return false;
1077 return CloseOneIdleSocketExceptInGroup(NULL);
1080 bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup(
1081 const Group* exception_group) {
1082 CHECK_GT(idle_socket_count(), 0);
1084 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) {
1085 Group* group = i->second;
1086 if (exception_group == group)
1087 continue;
1088 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets();
1090 if (!idle_sockets->empty()) {
1091 delete idle_sockets->front().socket;
1092 idle_sockets->pop_front();
1093 DecrementIdleCount();
1094 if (group->IsEmpty())
1095 RemoveGroup(i);
1097 return true;
1101 return false;
1104 bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInHigherLayeredPool() {
1105 // This pool doesn't have any idle sockets. It's possible that a pool at a
1106 // higher layer is holding one of this sockets active, but it's actually idle.
1107 // Query the higher layers.
1108 for (std::set<HigherLayeredPool*>::const_iterator it = higher_pools_.begin();
1109 it != higher_pools_.end(); ++it) {
1110 if ((*it)->CloseOneIdleConnection())
1111 return true;
1113 return false;
1116 void ClientSocketPoolBaseHelper::InvokeUserCallbackLater(
1117 ClientSocketHandle* handle, const CompletionCallback& callback, int rv) {
1118 CHECK(!ContainsKey(pending_callback_map_, handle));
1119 pending_callback_map_[handle] = CallbackResultPair(callback, rv);
1120 base::MessageLoop::current()->PostTask(
1121 FROM_HERE,
1122 base::Bind(&ClientSocketPoolBaseHelper::InvokeUserCallback,
1123 weak_factory_.GetWeakPtr(), handle));
1126 void ClientSocketPoolBaseHelper::InvokeUserCallback(
1127 ClientSocketHandle* handle) {
1128 // TODO(pkasting): Remove ScopedTracker below once crbug.com/455884 is fixed.
1129 tracked_objects::ScopedTracker tracking_profile(
1130 FROM_HERE_WITH_EXPLICIT_FUNCTION(
1131 "455884 ClientSocketPoolBaseHelper::InvokeUserCallback"));
1132 PendingCallbackMap::iterator it = pending_callback_map_.find(handle);
1134 // Exit if the request has already been cancelled.
1135 if (it == pending_callback_map_.end())
1136 return;
1138 CHECK(!handle->is_initialized());
1139 CompletionCallback callback = it->second.callback;
1140 int result = it->second.result;
1141 pending_callback_map_.erase(it);
1142 callback.Run(result);
1145 void ClientSocketPoolBaseHelper::TryToCloseSocketsInLayeredPools() {
1146 while (IsStalled()) {
1147 // Closing a socket will result in calling back into |this| to use the freed
1148 // socket slot, so nothing else is needed.
1149 if (!CloseOneIdleConnectionInHigherLayeredPool())
1150 return;
1154 ClientSocketPoolBaseHelper::Group::Group()
1155 : unassigned_job_count_(0),
1156 pending_requests_(NUM_PRIORITIES),
1157 active_socket_count_(0) {}
1159 ClientSocketPoolBaseHelper::Group::~Group() {
1160 DCHECK_EQ(0u, unassigned_job_count_);
1163 void ClientSocketPoolBaseHelper::Group::StartBackupJobTimer(
1164 const std::string& group_name,
1165 ClientSocketPoolBaseHelper* pool) {
1166 // Only allow one timer to run at a time.
1167 if (BackupJobTimerIsRunning())
1168 return;
1170 // Unretained here is okay because |backup_job_timer_| is
1171 // automatically cancelled when it's destroyed.
1172 backup_job_timer_.Start(
1173 FROM_HERE, pool->ConnectRetryInterval(),
1174 base::Bind(&Group::OnBackupJobTimerFired, base::Unretained(this),
1175 group_name, pool));
1178 bool ClientSocketPoolBaseHelper::Group::BackupJobTimerIsRunning() const {
1179 return backup_job_timer_.IsRunning();
1182 bool ClientSocketPoolBaseHelper::Group::TryToUseUnassignedConnectJob() {
1183 SanityCheck();
1185 if (unassigned_job_count_ == 0)
1186 return false;
1187 --unassigned_job_count_;
1188 return true;
1191 void ClientSocketPoolBaseHelper::Group::AddJob(scoped_ptr<ConnectJob> job,
1192 bool is_preconnect) {
1193 SanityCheck();
1195 if (is_preconnect)
1196 ++unassigned_job_count_;
1197 jobs_.push_back(job.release());
1200 void ClientSocketPoolBaseHelper::Group::RemoveJob(ConnectJob* job) {
1201 scoped_ptr<ConnectJob> owned_job(job);
1202 SanityCheck();
1204 // Check that |job| is in the list.
1205 DCHECK_EQ(*std::find(jobs_.begin(), jobs_.end(), job), job);
1206 jobs_.remove(job);
1207 size_t job_count = jobs_.size();
1208 if (job_count < unassigned_job_count_)
1209 unassigned_job_count_ = job_count;
1211 // If we've got no more jobs for this group, then we no longer need a
1212 // backup job either.
1213 if (jobs_.empty())
1214 backup_job_timer_.Stop();
1217 void ClientSocketPoolBaseHelper::Group::OnBackupJobTimerFired(
1218 std::string group_name,
1219 ClientSocketPoolBaseHelper* pool) {
1220 // If there are no more jobs pending, there is no work to do.
1221 // If we've done our cleanups correctly, this should not happen.
1222 if (jobs_.empty()) {
1223 NOTREACHED();
1224 return;
1227 // If our old job is waiting on DNS, or if we can't create any sockets
1228 // right now due to limits, just reset the timer.
1229 if (pool->ReachedMaxSocketsLimit() ||
1230 !HasAvailableSocketSlot(pool->max_sockets_per_group_) ||
1231 (*jobs_.begin())->GetLoadState() == LOAD_STATE_RESOLVING_HOST) {
1232 StartBackupJobTimer(group_name, pool);
1233 return;
1236 if (pending_requests_.empty())
1237 return;
1239 scoped_ptr<ConnectJob> backup_job =
1240 pool->connect_job_factory_->NewConnectJob(
1241 group_name, *pending_requests_.FirstMax().value(), pool);
1242 backup_job->net_log().AddEvent(NetLog::TYPE_BACKUP_CONNECT_JOB_CREATED);
1243 int rv = backup_job->Connect();
1244 pool->connecting_socket_count_++;
1245 ConnectJob* raw_backup_job = backup_job.get();
1246 AddJob(backup_job.Pass(), false);
1247 if (rv != ERR_IO_PENDING)
1248 pool->OnConnectJobComplete(rv, raw_backup_job);
1251 void ClientSocketPoolBaseHelper::Group::SanityCheck() {
1252 DCHECK_LE(unassigned_job_count_, jobs_.size());
1255 void ClientSocketPoolBaseHelper::Group::RemoveAllJobs() {
1256 SanityCheck();
1258 // Delete active jobs.
1259 STLDeleteElements(&jobs_);
1260 unassigned_job_count_ = 0;
1262 // Stop backup job timer.
1263 backup_job_timer_.Stop();
1266 const ClientSocketPoolBaseHelper::Request*
1267 ClientSocketPoolBaseHelper::Group::GetNextPendingRequest() const {
1268 return
1269 pending_requests_.empty() ? NULL : pending_requests_.FirstMax().value();
1272 bool ClientSocketPoolBaseHelper::Group::HasConnectJobForHandle(
1273 const ClientSocketHandle* handle) const {
1274 // Search the first |jobs_.size()| pending requests for |handle|.
1275 // If it's farther back in the deque than that, it doesn't have a
1276 // corresponding ConnectJob.
1277 size_t i = 0;
1278 for (RequestQueue::Pointer pointer = pending_requests_.FirstMax();
1279 !pointer.is_null() && i < jobs_.size();
1280 pointer = pending_requests_.GetNextTowardsLastMin(pointer), ++i) {
1281 if (pointer.value()->handle() == handle)
1282 return true;
1284 return false;
1287 void ClientSocketPoolBaseHelper::Group::InsertPendingRequest(
1288 scoped_ptr<const Request> request) {
1289 // This value must be cached before we release |request|.
1290 RequestPriority priority = request->priority();
1291 if (request->ignore_limits()) {
1292 // Put requests with ignore_limits == true (which should have
1293 // priority == MAXIMUM_PRIORITY) ahead of other requests with
1294 // MAXIMUM_PRIORITY.
1295 DCHECK_EQ(priority, MAXIMUM_PRIORITY);
1296 pending_requests_.InsertAtFront(request.release(), priority);
1297 } else {
1298 pending_requests_.Insert(request.release(), priority);
1302 scoped_ptr<const ClientSocketPoolBaseHelper::Request>
1303 ClientSocketPoolBaseHelper::Group::PopNextPendingRequest() {
1304 if (pending_requests_.empty())
1305 return scoped_ptr<const ClientSocketPoolBaseHelper::Request>();
1306 return RemovePendingRequest(pending_requests_.FirstMax());
1309 scoped_ptr<const ClientSocketPoolBaseHelper::Request>
1310 ClientSocketPoolBaseHelper::Group::FindAndRemovePendingRequest(
1311 ClientSocketHandle* handle) {
1312 for (RequestQueue::Pointer pointer = pending_requests_.FirstMax();
1313 !pointer.is_null();
1314 pointer = pending_requests_.GetNextTowardsLastMin(pointer)) {
1315 if (pointer.value()->handle() == handle) {
1316 scoped_ptr<const Request> request = RemovePendingRequest(pointer);
1317 return request.Pass();
1320 return scoped_ptr<const ClientSocketPoolBaseHelper::Request>();
1323 scoped_ptr<const ClientSocketPoolBaseHelper::Request>
1324 ClientSocketPoolBaseHelper::Group::RemovePendingRequest(
1325 const RequestQueue::Pointer& pointer) {
1326 scoped_ptr<const Request> request(pointer.value());
1327 pending_requests_.Erase(pointer);
1328 // If there are no more requests, kill the backup timer.
1329 if (pending_requests_.empty())
1330 backup_job_timer_.Stop();
1331 return request.Pass();
1334 } // namespace internal
1336 } // namespace net