Make USB permissions work in the new permission message system
[chromium-blink-merge.git] / content / renderer / scheduler / resource_dispatch_throttler_unittest.cc
blob296c28a1abf279ea52be88402cda85c9271ff5d1
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/renderer/scheduler/resource_dispatch_throttler.h"
7 #include "base/memory/scoped_vector.h"
8 #include "content/common/resource_messages.h"
9 #include "content/test/fake_renderer_scheduler.h"
10 #include "testing/gtest/include/gtest/gtest.h"
12 namespace content {
13 namespace {
15 const uint32 kRequestsPerFlush = 4;
16 const double kFlushPeriodSeconds = 1.f / 60;
17 const int kRoutingId = 1;
19 typedef ScopedVector<IPC::Message> ScopedMessages;
21 int GetRequestId(const IPC::Message& msg) {
22 int request_id = -1;
23 switch (msg.type()) {
24 case ResourceHostMsg_RequestResource::ID: {
25 base::PickleIterator iter(msg);
26 int routing_id = -1;
27 if (!iter.ReadInt(&routing_id) || !iter.ReadInt(&request_id))
28 NOTREACHED() << "Invalid id for resource request message.";
29 } break;
31 case ResourceHostMsg_DidChangePriority::ID:
32 case ResourceHostMsg_ReleaseDownloadedFile::ID:
33 case ResourceHostMsg_CancelRequest::ID:
34 if (!base::PickleIterator(msg).ReadInt(&request_id))
35 NOTREACHED() << "Invalid id for resource message.";
36 break;
38 default:
39 NOTREACHED() << "Invalid message for resource throttling.";
40 break;
42 return request_id;
45 class RendererSchedulerForTest : public FakeRendererScheduler {
46 public:
47 RendererSchedulerForTest() : high_priority_work_anticipated_(false) {}
48 ~RendererSchedulerForTest() override {}
50 // RendererScheduler implementation:
51 bool IsHighPriorityWorkAnticipated() override {
52 return high_priority_work_anticipated_;
55 void set_high_priority_work_anticipated(bool anticipated) {
56 high_priority_work_anticipated_ = anticipated;
59 private:
60 bool high_priority_work_anticipated_;
63 } // namespace
65 class ResourceDispatchThrottlerForTest : public ResourceDispatchThrottler {
66 public:
67 ResourceDispatchThrottlerForTest(IPC::Sender* sender,
68 scheduler::RendererScheduler* scheduler)
69 : ResourceDispatchThrottler(
70 sender,
71 scheduler,
72 base::TimeDelta::FromSecondsD(kFlushPeriodSeconds),
73 kRequestsPerFlush),
74 flush_scheduled_(false) {}
75 ~ResourceDispatchThrottlerForTest() override {}
77 void Advance(base::TimeDelta delta) { now_ += delta; }
79 bool RunScheduledFlush() {
80 if (!flush_scheduled_)
81 return false;
83 flush_scheduled_ = false;
84 Flush();
85 return true;
88 bool flush_scheduled() const { return flush_scheduled_; }
90 private:
91 // ResourceDispatchThrottler overrides:
92 base::TimeTicks Now() const override { return now_; }
93 void ScheduleFlush() override { flush_scheduled_ = true; }
95 base::TimeTicks now_;
96 bool flush_scheduled_;
99 class ResourceDispatchThrottlerTest : public testing::Test, public IPC::Sender {
100 public:
101 ResourceDispatchThrottlerTest() : last_request_id_(0) {
102 throttler_.reset(new ResourceDispatchThrottlerForTest(this, &scheduler_));
104 ~ResourceDispatchThrottlerTest() override {}
106 // IPC::Sender implementation:
107 bool Send(IPC::Message* msg) override {
108 sent_messages_.push_back(msg);
109 return true;
112 protected:
113 void SetHighPriorityWorkAnticipated(bool anticipated) {
114 scheduler_.set_high_priority_work_anticipated(anticipated);
117 void Advance(base::TimeDelta delta) { throttler_->Advance(delta); }
119 bool RunScheduledFlush() { return throttler_->RunScheduledFlush(); }
121 bool FlushScheduled() { return throttler_->flush_scheduled(); }
123 bool RequestResource() {
124 ResourceHostMsg_Request request;
125 request.download_to_file = true;
126 return throttler_->Send(new ResourceHostMsg_RequestResource(
127 kRoutingId, ++last_request_id_, request));
130 bool RequestResourceSync() {
131 SyncLoadResult result;
132 return throttler_->Send(new ResourceHostMsg_SyncLoad(
133 kRoutingId, ++last_request_id_, ResourceHostMsg_Request(), &result));
136 void RequestResourcesUntilThrottled() {
137 SetHighPriorityWorkAnticipated(true);
138 GetAndResetSentMessageCount();
139 RequestResource();
140 while (GetAndResetSentMessageCount())
141 RequestResource();
144 bool UpdateRequestPriority(int request_id, net::RequestPriority priority) {
145 return throttler_->Send(
146 new ResourceHostMsg_DidChangePriority(request_id, priority, 0));
149 bool ReleaseDownloadedFile(int request_id) {
150 return throttler_->Send(
151 new ResourceHostMsg_ReleaseDownloadedFile(request_id));
154 bool CancelRequest(int request_id) {
155 return throttler_->Send(new ResourceHostMsg_CancelRequest(request_id));
158 size_t GetAndResetSentMessageCount() {
159 size_t sent_message_count = sent_messages_.size();
160 sent_messages_.clear();
161 return sent_message_count;
164 const IPC::Message* LastSentMessage() const {
165 return sent_messages_.empty() ? nullptr : sent_messages_.back();
168 int LastSentRequestId() const {
169 const IPC::Message* msg = LastSentMessage();
170 if (!msg)
171 return -1;
173 int routing_id = -1;
174 int request_id = -1;
175 base::PickleIterator iter(*msg);
176 CHECK(IPC::ReadParam(msg, &iter, &routing_id));
177 CHECK(IPC::ReadParam(msg, &iter, &request_id));
178 return request_id;
181 int last_request_id() const { return last_request_id_; }
183 ScopedMessages sent_messages_;
185 private:
186 scoped_ptr<ResourceDispatchThrottlerForTest> throttler_;
187 RendererSchedulerForTest scheduler_;
188 int last_request_id_;
189 bool flush_scheduled_;
191 DISALLOW_COPY_AND_ASSIGN(ResourceDispatchThrottlerTest);
194 TEST_F(ResourceDispatchThrottlerTest, NotThrottledByDefault) {
195 SetHighPriorityWorkAnticipated(false);
196 for (size_t i = 0; i < kRequestsPerFlush * 2; ++i) {
197 RequestResource();
198 EXPECT_EQ(i + 1, sent_messages_.size());
202 TEST_F(ResourceDispatchThrottlerTest, NotThrottledIfSendLimitNotReached) {
203 SetHighPriorityWorkAnticipated(true);
204 for (size_t i = 0; i < kRequestsPerFlush; ++i) {
205 RequestResource();
206 EXPECT_EQ(i + 1, sent_messages_.size());
210 TEST_F(ResourceDispatchThrottlerTest, ThrottledWhenHighPriorityWork) {
211 SetHighPriorityWorkAnticipated(true);
212 for (size_t i = 0; i < kRequestsPerFlush; ++i)
213 RequestResource();
214 ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
216 RequestResource();
217 EXPECT_EQ(0U, sent_messages_.size());
219 EXPECT_TRUE(RunScheduledFlush());
220 EXPECT_EQ(1U, sent_messages_.size());
223 TEST_F(ResourceDispatchThrottlerTest,
224 ThrottledWhenDeferredMessageQueueNonEmpty) {
225 SetHighPriorityWorkAnticipated(true);
226 for (size_t i = 0; i < kRequestsPerFlush; ++i)
227 RequestResource();
228 ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
230 RequestResource();
231 EXPECT_EQ(0U, sent_messages_.size());
232 SetHighPriorityWorkAnticipated(false);
233 RequestResource();
234 EXPECT_EQ(0U, sent_messages_.size());
236 EXPECT_TRUE(RunScheduledFlush());
237 EXPECT_EQ(2U, sent_messages_.size());
240 TEST_F(ResourceDispatchThrottlerTest, NotThrottledIfSufficientTimePassed) {
241 SetHighPriorityWorkAnticipated(true);
243 for (size_t i = 0; i < kRequestsPerFlush * 2; ++i) {
244 Advance(base::TimeDelta::FromSecondsD(kFlushPeriodSeconds * 2));
245 RequestResource();
246 EXPECT_EQ(1U, GetAndResetSentMessageCount());
247 EXPECT_FALSE(FlushScheduled());
251 TEST_F(ResourceDispatchThrottlerTest, NotThrottledIfSyncMessage) {
252 SetHighPriorityWorkAnticipated(true);
254 RequestResourceSync();
255 EXPECT_EQ(1U, GetAndResetSentMessageCount());
257 // Saturate the queue.
258 for (size_t i = 0; i < kRequestsPerFlush * 2; ++i)
259 RequestResource();
260 ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
262 // Synchronous messages should flush any previously throttled messages.
263 RequestResourceSync();
264 EXPECT_EQ(1U + kRequestsPerFlush, GetAndResetSentMessageCount());
265 RequestResourceSync();
266 EXPECT_EQ(1U, GetAndResetSentMessageCount());
268 // Previously throttled messages should already have been flushed.
269 RunScheduledFlush();
270 EXPECT_EQ(0U, GetAndResetSentMessageCount());
273 TEST_F(ResourceDispatchThrottlerTest, MultipleFlushes) {
274 SetHighPriorityWorkAnticipated(true);
275 for (size_t i = 0; i < kRequestsPerFlush * 4; ++i)
276 RequestResource();
277 ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
279 for (size_t i = 0; i < 3; ++i) {
280 SCOPED_TRACE(i);
281 EXPECT_TRUE(RunScheduledFlush());
282 EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
285 EXPECT_FALSE(FlushScheduled());
286 EXPECT_EQ(0U, sent_messages_.size());
289 TEST_F(ResourceDispatchThrottlerTest, MultipleFlushesWhileReceiving) {
290 SetHighPriorityWorkAnticipated(true);
291 for (size_t i = 0; i < kRequestsPerFlush * 4; ++i)
292 RequestResource();
293 ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
295 for (size_t i = 0; i < 3; ++i) {
296 SCOPED_TRACE(i);
297 EXPECT_TRUE(RunScheduledFlush());
298 EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
299 for (size_t j = 0; j < kRequestsPerFlush; ++j)
300 RequestResource();
301 EXPECT_EQ(0U, sent_messages_.size());
304 for (size_t i = 0; i < 3; ++i) {
305 EXPECT_TRUE(RunScheduledFlush());
306 EXPECT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
309 EXPECT_FALSE(FlushScheduled());
310 EXPECT_EQ(0U, sent_messages_.size());
313 TEST_F(ResourceDispatchThrottlerTest, NonRequestsNeverTriggerThrottling) {
314 RequestResource();
315 ASSERT_EQ(1U, GetAndResetSentMessageCount());
317 for (size_t i = 0; i < kRequestsPerFlush * 3; ++i)
318 UpdateRequestPriority(last_request_id(), net::HIGHEST);
319 EXPECT_EQ(kRequestsPerFlush * 3, sent_messages_.size());
321 RequestResource();
322 EXPECT_EQ(1U + kRequestsPerFlush * 3, GetAndResetSentMessageCount());
325 TEST_F(ResourceDispatchThrottlerTest, NonRequestsDeferredWhenThrottling) {
326 RequestResource();
327 ASSERT_EQ(1U, GetAndResetSentMessageCount());
329 RequestResourcesUntilThrottled();
330 UpdateRequestPriority(last_request_id(), net::HIGHEST);
331 ReleaseDownloadedFile(last_request_id());
332 CancelRequest(last_request_id());
334 EXPECT_TRUE(RunScheduledFlush());
335 EXPECT_EQ(4U, GetAndResetSentMessageCount());
336 EXPECT_FALSE(FlushScheduled());
339 TEST_F(ResourceDispatchThrottlerTest, MessageOrderingPreservedWhenThrottling) {
340 SetHighPriorityWorkAnticipated(true);
341 for (size_t i = 0; i < kRequestsPerFlush; ++i)
342 RequestResource();
343 ASSERT_EQ(kRequestsPerFlush, GetAndResetSentMessageCount());
345 for (size_t i = 0; i < kRequestsPerFlush; ++i) {
346 RequestResource();
347 UpdateRequestPriority(last_request_id(), net::HIGHEST);
348 CancelRequest(last_request_id() - 1);
350 ASSERT_EQ(0U, sent_messages_.size());
352 EXPECT_TRUE(RunScheduledFlush());
353 ASSERT_EQ(kRequestsPerFlush * 3, sent_messages_.size());
354 for (size_t i = 0; i < sent_messages_.size(); i += 3) {
355 SCOPED_TRACE(i);
356 const auto& request_msg = *sent_messages_[i];
357 const auto& priority_msg = *sent_messages_[i + 1];
358 const auto& cancel_msg = *sent_messages_[i + 2];
360 EXPECT_EQ(request_msg.type(), ResourceHostMsg_RequestResource::ID);
361 EXPECT_EQ(priority_msg.type(), ResourceHostMsg_DidChangePriority::ID);
362 EXPECT_EQ(cancel_msg.type(), ResourceHostMsg_CancelRequest::ID);
364 EXPECT_EQ(GetRequestId(request_msg), GetRequestId(priority_msg));
365 EXPECT_EQ(GetRequestId(request_msg) - 1, GetRequestId(cancel_msg));
367 EXPECT_FALSE(FlushScheduled());
370 } // namespace content