Remove linux_chromium_gn_dbg from the chromium CQ.
[chromium-blink-merge.git] / net / url_request / url_request_throttler_simulation_unittest.cc
blob808c6b5c168467954d48e0c14a744f33e6336268
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // The tests in this file attempt to verify the following through simulation:
6 // a) That a server experiencing overload will actually benefit from the
7 // anti-DDoS throttling logic, i.e. that its traffic spike will subside
8 // and be distributed over a longer period of time;
9 // b) That "well-behaved" clients of a server under DDoS attack actually
10 // benefit from the anti-DDoS throttling logic; and
11 // c) That the approximate increase in "perceived downtime" introduced by
12 // anti-DDoS throttling for various different actual downtimes is what
13 // we expect it to be.
15 #include <cmath>
16 #include <limits>
17 #include <vector>
19 #include "base/environment.h"
20 #include "base/memory/scoped_ptr.h"
21 #include "base/memory/scoped_vector.h"
22 #include "base/rand_util.h"
23 #include "base/time/time.h"
24 #include "net/base/request_priority.h"
25 #include "net/url_request/url_request.h"
26 #include "net/url_request/url_request_context.h"
27 #include "net/url_request/url_request_test_util.h"
28 #include "net/url_request/url_request_throttler_manager.h"
29 #include "net/url_request/url_request_throttler_test_support.h"
30 #include "testing/gtest/include/gtest/gtest.h"
32 using base::TimeDelta;
33 using base::TimeTicks;
35 namespace net {
36 namespace {
38 // Set this variable in your environment if you want to see verbose results
39 // of the simulation tests.
40 const char kShowSimulationVariableName[] = "SHOW_SIMULATION_RESULTS";
42 // Prints output only if a given environment variable is set. We use this
43 // to not print any output for human evaluation when the test is run without
44 // supervision.
45 void VerboseOut(const char* format, ...) {
46 static bool have_checked_environment = false;
47 static bool should_print = false;
48 if (!have_checked_environment) {
49 have_checked_environment = true;
50 scoped_ptr<base::Environment> env(base::Environment::Create());
51 if (env->HasVar(kShowSimulationVariableName))
52 should_print = true;
55 if (should_print) {
56 va_list arglist;
57 va_start(arglist, format);
58 vprintf(format, arglist);
59 va_end(arglist);
63 // A simple two-phase discrete time simulation. Actors are added in the order
64 // they should take action at every tick of the clock. Ticks of the clock
65 // are two-phase:
66 // - Phase 1 advances every actor's time to a new absolute time.
67 // - Phase 2 asks each actor to perform their action.
68 class DiscreteTimeSimulation {
69 public:
70 class Actor {
71 public:
72 virtual ~Actor() {}
73 virtual void AdvanceTime(const TimeTicks& absolute_time) = 0;
74 virtual void PerformAction() = 0;
77 DiscreteTimeSimulation() {}
79 // Adds an |actor| to the simulation. The client of the simulation maintains
80 // ownership of |actor| and must ensure its lifetime exceeds that of the
81 // simulation. Actors should be added in the order you wish for them to
82 // act at each tick of the simulation.
83 void AddActor(Actor* actor) {
84 actors_.push_back(actor);
87 // Runs the simulation for, pretending |time_between_ticks| passes from one
88 // tick to the next. The start time will be the current real time. The
89 // simulation will stop when the simulated duration is equal to or greater
90 // than |maximum_simulated_duration|.
91 void RunSimulation(const TimeDelta& maximum_simulated_duration,
92 const TimeDelta& time_between_ticks) {
93 TimeTicks start_time = TimeTicks();
94 TimeTicks now = start_time;
95 while ((now - start_time) <= maximum_simulated_duration) {
96 for (std::vector<Actor*>::iterator it = actors_.begin();
97 it != actors_.end();
98 ++it) {
99 (*it)->AdvanceTime(now);
102 for (std::vector<Actor*>::iterator it = actors_.begin();
103 it != actors_.end();
104 ++it) {
105 (*it)->PerformAction();
108 now += time_between_ticks;
112 private:
113 std::vector<Actor*> actors_;
115 DISALLOW_COPY_AND_ASSIGN(DiscreteTimeSimulation);
118 // Represents a web server in a simulation of a server under attack by
119 // a lot of clients. Must be added to the simulation's list of actors
120 // after all |Requester| objects.
121 class Server : public DiscreteTimeSimulation::Actor {
122 public:
123 Server(int max_queries_per_tick, double request_drop_ratio)
124 : max_queries_per_tick_(max_queries_per_tick),
125 request_drop_ratio_(request_drop_ratio),
126 num_overloaded_ticks_remaining_(0),
127 num_current_tick_queries_(0),
128 num_overloaded_ticks_(0),
129 max_experienced_queries_per_tick_(0),
130 mock_request_(context_.CreateRequest(GURL(), DEFAULT_PRIORITY, NULL)) {}
132 void SetDowntime(const TimeTicks& start_time, const TimeDelta& duration) {
133 start_downtime_ = start_time;
134 end_downtime_ = start_time + duration;
137 void AdvanceTime(const TimeTicks& absolute_time) override {
138 now_ = absolute_time;
141 void PerformAction() override {
142 // We are inserted at the end of the actor's list, so all Requester
143 // instances have already done their bit.
144 if (num_current_tick_queries_ > max_experienced_queries_per_tick_)
145 max_experienced_queries_per_tick_ = num_current_tick_queries_;
147 if (num_current_tick_queries_ > max_queries_per_tick_) {
148 // We pretend the server fails for the next several ticks after it
149 // gets overloaded.
150 num_overloaded_ticks_remaining_ = 5;
151 ++num_overloaded_ticks_;
152 } else if (num_overloaded_ticks_remaining_ > 0) {
153 --num_overloaded_ticks_remaining_;
156 requests_per_tick_.push_back(num_current_tick_queries_);
157 num_current_tick_queries_ = 0;
160 // This is called by Requester. It returns the response code from
161 // the server.
162 int HandleRequest() {
163 ++num_current_tick_queries_;
164 if (!start_downtime_.is_null() &&
165 start_downtime_ < now_ && now_ < end_downtime_) {
166 // For the simulation measuring the increase in perceived
167 // downtime, it might be interesting to count separately the
168 // queries seen by the server (assuming a front-end reverse proxy
169 // is what actually serves up the 503s in this case) so that we could
170 // visualize the traffic spike seen by the server when it comes up,
171 // which would in many situations be ameliorated by the anti-DDoS
172 // throttling.
173 return 503;
176 if ((num_overloaded_ticks_remaining_ > 0 ||
177 num_current_tick_queries_ > max_queries_per_tick_) &&
178 base::RandDouble() < request_drop_ratio_) {
179 return 503;
182 return 200;
185 int num_overloaded_ticks() const {
186 return num_overloaded_ticks_;
189 int max_experienced_queries_per_tick() const {
190 return max_experienced_queries_per_tick_;
193 const URLRequest& mock_request() const {
194 return *mock_request_.get();
197 std::string VisualizeASCII(int terminal_width) {
198 // Account for | characters we place at left of graph.
199 terminal_width -= 1;
201 VerboseOut("Overloaded for %d of %d ticks.\n",
202 num_overloaded_ticks_, requests_per_tick_.size());
203 VerboseOut("Got maximum of %d requests in a tick.\n\n",
204 max_experienced_queries_per_tick_);
206 VerboseOut("Traffic graph:\n\n");
208 // Printing the graph like this is a bit overkill, but was very useful
209 // while developing the various simulations to see if they were testing
210 // the corner cases we want to simulate.
212 // Find the smallest number of whole ticks we need to group into a
213 // column that will let all ticks fit into the column width we have.
214 int num_ticks = requests_per_tick_.size();
215 double ticks_per_column_exact =
216 static_cast<double>(num_ticks) / static_cast<double>(terminal_width);
217 int ticks_per_column = std::ceil(ticks_per_column_exact);
218 DCHECK_GE(ticks_per_column * terminal_width, num_ticks);
220 // Sum up the column values.
221 int num_columns = num_ticks / ticks_per_column;
222 if (num_ticks % ticks_per_column)
223 ++num_columns;
224 DCHECK_LE(num_columns, terminal_width);
225 scoped_ptr<int[]> columns(new int[num_columns]);
226 for (int tx = 0; tx < num_ticks; ++tx) {
227 int cx = tx / ticks_per_column;
228 if (tx % ticks_per_column == 0)
229 columns[cx] = 0;
230 columns[cx] += requests_per_tick_[tx];
233 // Find the lowest integer divisor that will let the column values
234 // be represented in a graph of maximum height 50.
235 int max_value = 0;
236 for (int cx = 0; cx < num_columns; ++cx)
237 max_value = std::max(max_value, columns[cx]);
238 const int kNumRows = 50;
239 double row_divisor_exact = max_value / static_cast<double>(kNumRows);
240 int row_divisor = std::ceil(row_divisor_exact);
241 DCHECK_GE(row_divisor * kNumRows, max_value);
243 // To show the overload line, we calculate the appropriate value.
244 int overload_value = max_queries_per_tick_ * ticks_per_column;
246 // When num_ticks is not a whole multiple of ticks_per_column, the last
247 // column includes fewer ticks than the others. In this case, don't
248 // print it so that we don't show an inconsistent value.
249 int num_printed_columns = num_columns;
250 if (num_ticks % ticks_per_column)
251 --num_printed_columns;
253 // This is a top-to-bottom traversal of rows, left-to-right per row.
254 std::string output;
255 for (int rx = 0; rx < kNumRows; ++rx) {
256 int range_min = (kNumRows - rx) * row_divisor;
257 int range_max = range_min + row_divisor;
258 if (range_min == 0)
259 range_min = -1; // Make 0 values fit in the bottom range.
260 output.append("|");
261 for (int cx = 0; cx < num_printed_columns; ++cx) {
262 char block = ' ';
263 // Show the overload line.
264 if (range_min < overload_value && overload_value <= range_max)
265 block = '-';
267 // Preferentially, show the graph line.
268 if (range_min < columns[cx] && columns[cx] <= range_max)
269 block = '#';
271 output.append(1, block);
273 output.append("\n");
275 output.append("|");
276 output.append(num_printed_columns, '=');
278 return output;
281 const URLRequestContext& context() const { return context_; }
283 private:
284 TimeTicks now_;
285 TimeTicks start_downtime_; // Can be 0 to say "no downtime".
286 TimeTicks end_downtime_;
287 const int max_queries_per_tick_;
288 const double request_drop_ratio_; // Ratio of requests to 503 when failing.
289 int num_overloaded_ticks_remaining_;
290 int num_current_tick_queries_;
291 int num_overloaded_ticks_;
292 int max_experienced_queries_per_tick_;
293 std::vector<int> requests_per_tick_;
295 TestURLRequestContext context_;
296 scoped_ptr<URLRequest> mock_request_;
298 DISALLOW_COPY_AND_ASSIGN(Server);
301 // Mock throttler entry used by Requester class.
302 class MockURLRequestThrottlerEntry : public URLRequestThrottlerEntry {
303 public:
304 explicit MockURLRequestThrottlerEntry(URLRequestThrottlerManager* manager)
305 : URLRequestThrottlerEntry(manager, std::string()),
306 backoff_entry_(&backoff_policy_, &fake_clock_) {}
308 const BackoffEntry* GetBackoffEntry() const override {
309 return &backoff_entry_;
312 BackoffEntry* GetBackoffEntry() override { return &backoff_entry_; }
314 TimeTicks ImplGetTimeNow() const override { return fake_clock_.NowTicks(); }
316 void SetFakeNow(const TimeTicks& fake_time) {
317 fake_clock_.set_now(fake_time);
320 protected:
321 ~MockURLRequestThrottlerEntry() override {}
323 private:
324 mutable TestTickClock fake_clock_;
325 BackoffEntry backoff_entry_;
328 // Registry of results for a class of |Requester| objects (e.g. attackers vs.
329 // regular clients).
330 class RequesterResults {
331 public:
332 RequesterResults()
333 : num_attempts_(0), num_successful_(0), num_failed_(0), num_blocked_(0) {
336 void AddSuccess() {
337 ++num_attempts_;
338 ++num_successful_;
341 void AddFailure() {
342 ++num_attempts_;
343 ++num_failed_;
346 void AddBlocked() {
347 ++num_attempts_;
348 ++num_blocked_;
351 int num_attempts() const { return num_attempts_; }
352 int num_successful() const { return num_successful_; }
353 int num_failed() const { return num_failed_; }
354 int num_blocked() const { return num_blocked_; }
356 double GetBlockedRatio() {
357 DCHECK(num_attempts_);
358 return static_cast<double>(num_blocked_) /
359 static_cast<double>(num_attempts_);
362 double GetSuccessRatio() {
363 DCHECK(num_attempts_);
364 return static_cast<double>(num_successful_) /
365 static_cast<double>(num_attempts_);
368 void PrintResults(const char* class_description) {
369 if (num_attempts_ == 0) {
370 VerboseOut("No data for %s\n", class_description);
371 return;
374 VerboseOut("Requester results for %s\n", class_description);
375 VerboseOut(" %d attempts\n", num_attempts_);
376 VerboseOut(" %d successes\n", num_successful_);
377 VerboseOut(" %d 5xx responses\n", num_failed_);
378 VerboseOut(" %d requests blocked\n", num_blocked_);
379 VerboseOut(" %.2f success ratio\n", GetSuccessRatio());
380 VerboseOut(" %.2f blocked ratio\n", GetBlockedRatio());
381 VerboseOut("\n");
384 private:
385 int num_attempts_;
386 int num_successful_;
387 int num_failed_;
388 int num_blocked_;
391 // Represents an Requester in a simulated DDoS situation, that periodically
392 // requests a specific resource.
393 class Requester : public DiscreteTimeSimulation::Actor {
394 public:
395 Requester(MockURLRequestThrottlerEntry* throttler_entry,
396 const TimeDelta& time_between_requests,
397 Server* server,
398 RequesterResults* results)
399 : throttler_entry_(throttler_entry),
400 time_between_requests_(time_between_requests),
401 last_attempt_was_failure_(false),
402 server_(server),
403 results_(results) {
404 DCHECK(server_);
407 void AdvanceTime(const TimeTicks& absolute_time) override {
408 if (time_of_last_success_.is_null())
409 time_of_last_success_ = absolute_time;
411 throttler_entry_->SetFakeNow(absolute_time);
414 void PerformAction() override {
415 TimeDelta effective_delay = time_between_requests_;
416 TimeDelta current_jitter = TimeDelta::FromMilliseconds(
417 request_jitter_.InMilliseconds() * base::RandDouble());
418 if (base::RandInt(0, 1)) {
419 effective_delay -= current_jitter;
420 } else {
421 effective_delay += current_jitter;
424 if (throttler_entry_->ImplGetTimeNow() - time_of_last_attempt_ >
425 effective_delay) {
426 if (!throttler_entry_->ShouldRejectRequest(server_->mock_request())) {
427 int status_code = server_->HandleRequest();
428 throttler_entry_->UpdateWithResponse(status_code);
430 if (status_code == 200) {
431 if (results_)
432 results_->AddSuccess();
434 if (last_attempt_was_failure_) {
435 last_downtime_duration_ =
436 throttler_entry_->ImplGetTimeNow() - time_of_last_success_;
439 time_of_last_success_ = throttler_entry_->ImplGetTimeNow();
440 last_attempt_was_failure_ = false;
441 } else {
442 if (results_)
443 results_->AddFailure();
444 last_attempt_was_failure_ = true;
446 } else {
447 if (results_)
448 results_->AddBlocked();
449 last_attempt_was_failure_ = true;
452 time_of_last_attempt_ = throttler_entry_->ImplGetTimeNow();
456 // Adds a delay until the first request, equal to a uniformly distributed
457 // value between now and now + max_delay.
458 void SetStartupJitter(const TimeDelta& max_delay) {
459 int delay_ms = base::RandInt(0, max_delay.InMilliseconds());
460 time_of_last_attempt_ = TimeTicks() +
461 TimeDelta::FromMilliseconds(delay_ms) - time_between_requests_;
464 void SetRequestJitter(const TimeDelta& request_jitter) {
465 request_jitter_ = request_jitter;
468 TimeDelta last_downtime_duration() const { return last_downtime_duration_; }
470 private:
471 scoped_refptr<MockURLRequestThrottlerEntry> throttler_entry_;
472 const TimeDelta time_between_requests_;
473 TimeDelta request_jitter_;
474 TimeTicks time_of_last_attempt_;
475 TimeTicks time_of_last_success_;
476 bool last_attempt_was_failure_;
477 TimeDelta last_downtime_duration_;
478 Server* const server_;
479 RequesterResults* const results_; // May be NULL.
481 DISALLOW_COPY_AND_ASSIGN(Requester);
484 void SimulateAttack(Server* server,
485 RequesterResults* attacker_results,
486 RequesterResults* client_results,
487 bool enable_throttling) {
488 const size_t kNumAttackers = 50;
489 const size_t kNumClients = 50;
490 DiscreteTimeSimulation simulation;
491 URLRequestThrottlerManager manager;
492 ScopedVector<Requester> requesters;
493 for (size_t i = 0; i < kNumAttackers; ++i) {
494 // Use a tiny time_between_requests so the attackers will ping the
495 // server at every tick of the simulation.
496 scoped_refptr<MockURLRequestThrottlerEntry> throttler_entry(
497 new MockURLRequestThrottlerEntry(&manager));
498 if (!enable_throttling)
499 throttler_entry->DisableBackoffThrottling();
501 Requester* attacker = new Requester(throttler_entry.get(),
502 TimeDelta::FromMilliseconds(1),
503 server,
504 attacker_results);
505 attacker->SetStartupJitter(TimeDelta::FromSeconds(120));
506 requesters.push_back(attacker);
507 simulation.AddActor(attacker);
509 for (size_t i = 0; i < kNumClients; ++i) {
510 // Normal clients only make requests every 2 minutes, plus/minus 1 minute.
511 scoped_refptr<MockURLRequestThrottlerEntry> throttler_entry(
512 new MockURLRequestThrottlerEntry(&manager));
513 if (!enable_throttling)
514 throttler_entry->DisableBackoffThrottling();
516 Requester* client = new Requester(throttler_entry.get(),
517 TimeDelta::FromMinutes(2),
518 server,
519 client_results);
520 client->SetStartupJitter(TimeDelta::FromSeconds(120));
521 client->SetRequestJitter(TimeDelta::FromMinutes(1));
522 requesters.push_back(client);
523 simulation.AddActor(client);
525 simulation.AddActor(server);
527 simulation.RunSimulation(TimeDelta::FromMinutes(6),
528 TimeDelta::FromSeconds(1));
531 TEST(URLRequestThrottlerSimulation, HelpsInAttack) {
532 Server unprotected_server(30, 1.0);
533 RequesterResults unprotected_attacker_results;
534 RequesterResults unprotected_client_results;
535 Server protected_server(30, 1.0);
536 RequesterResults protected_attacker_results;
537 RequesterResults protected_client_results;
538 SimulateAttack(&unprotected_server,
539 &unprotected_attacker_results,
540 &unprotected_client_results,
541 false);
542 SimulateAttack(&protected_server,
543 &protected_attacker_results,
544 &protected_client_results,
545 true);
547 // These assert that the DDoS protection actually benefits the
548 // server. Manual inspection of the traffic graphs will show this
549 // even more clearly.
550 EXPECT_GT(unprotected_server.num_overloaded_ticks(),
551 protected_server.num_overloaded_ticks());
552 EXPECT_GT(unprotected_server.max_experienced_queries_per_tick(),
553 protected_server.max_experienced_queries_per_tick());
555 // These assert that the DDoS protection actually benefits non-malicious
556 // (and non-degenerate/accidentally DDoSing) users.
557 EXPECT_LT(protected_client_results.GetBlockedRatio(),
558 protected_attacker_results.GetBlockedRatio());
559 EXPECT_GT(protected_client_results.GetSuccessRatio(),
560 unprotected_client_results.GetSuccessRatio());
562 // The rest is just for optional manual evaluation of the results;
563 // in particular the traffic pattern is interesting.
565 VerboseOut("\nUnprotected server's results:\n\n");
566 VerboseOut(unprotected_server.VisualizeASCII(132).c_str());
567 VerboseOut("\n\n");
568 VerboseOut("Protected server's results:\n\n");
569 VerboseOut(protected_server.VisualizeASCII(132).c_str());
570 VerboseOut("\n\n");
572 unprotected_attacker_results.PrintResults(
573 "attackers attacking unprotected server.");
574 unprotected_client_results.PrintResults(
575 "normal clients making requests to unprotected server.");
576 protected_attacker_results.PrintResults(
577 "attackers attacking protected server.");
578 protected_client_results.PrintResults(
579 "normal clients making requests to protected server.");
582 // Returns the downtime perceived by the client, as a ratio of the
583 // actual downtime.
584 double SimulateDowntime(const TimeDelta& duration,
585 const TimeDelta& average_client_interval,
586 bool enable_throttling) {
587 TimeDelta time_between_ticks = duration / 200;
588 TimeTicks start_downtime = TimeTicks() + (duration / 2);
590 // A server that never rejects requests, but will go down for maintenance.
591 Server server(std::numeric_limits<int>::max(), 1.0);
592 server.SetDowntime(start_downtime, duration);
594 URLRequestThrottlerManager manager;
595 scoped_refptr<MockURLRequestThrottlerEntry> throttler_entry(
596 new MockURLRequestThrottlerEntry(&manager));
597 if (!enable_throttling)
598 throttler_entry->DisableBackoffThrottling();
600 Requester requester(
601 throttler_entry.get(), average_client_interval, &server, NULL);
602 requester.SetStartupJitter(duration / 3);
603 requester.SetRequestJitter(average_client_interval);
605 DiscreteTimeSimulation simulation;
606 simulation.AddActor(&requester);
607 simulation.AddActor(&server);
609 simulation.RunSimulation(duration * 2, time_between_ticks);
611 return static_cast<double>(
612 requester.last_downtime_duration().InMilliseconds()) /
613 static_cast<double>(duration.InMilliseconds());
616 TEST(URLRequestThrottlerSimulation, PerceivedDowntimeRatio) {
617 struct Stats {
618 // Expected interval that we expect the ratio of downtime when anti-DDoS
619 // is enabled and downtime when anti-DDoS is not enabled to fall within.
621 // The expected interval depends on two things: The exponential back-off
622 // policy encoded in URLRequestThrottlerEntry, and the test or set of
623 // tests that the Stats object is tracking (e.g. a test where the client
624 // retries very rapidly on a very long downtime will tend to increase the
625 // number).
627 // To determine an appropriate new interval when parameters have changed,
628 // run the test a few times (you may have to Ctrl-C out of it after a few
629 // seconds) and choose an interval that the test converges quickly and
630 // reliably to. Then set the new interval, and run the test e.g. 20 times
631 // in succession to make sure it never takes an obscenely long time to
632 // converge to this interval.
633 double expected_min_increase;
634 double expected_max_increase;
636 size_t num_runs;
637 double total_ratio_unprotected;
638 double total_ratio_protected;
640 bool DidConverge(double* increase_ratio_out) {
641 double unprotected_ratio = total_ratio_unprotected / num_runs;
642 double protected_ratio = total_ratio_protected / num_runs;
643 double increase_ratio = protected_ratio / unprotected_ratio;
644 if (increase_ratio_out)
645 *increase_ratio_out = increase_ratio;
646 return expected_min_increase <= increase_ratio &&
647 increase_ratio <= expected_max_increase;
650 void ReportTrialResult(double increase_ratio) {
651 VerboseOut(
652 " Perceived downtime with throttling is %.4f times without.\n",
653 increase_ratio);
654 VerboseOut(" Test result after %d trials.\n", num_runs);
658 Stats global_stats = { 1.08, 1.15 };
660 struct Trial {
661 TimeDelta duration;
662 TimeDelta average_client_interval;
663 Stats stats;
665 void PrintTrialDescription() {
666 double duration_minutes =
667 static_cast<double>(duration.InSeconds()) / 60.0;
668 double interval_minutes =
669 static_cast<double>(average_client_interval.InSeconds()) / 60.0;
670 VerboseOut("Trial with %.2f min downtime, avg. interval %.2f min.\n",
671 duration_minutes, interval_minutes);
675 // We don't set or check expected ratio intervals on individual
676 // experiments as this might make the test too fragile, but we
677 // print them out at the end for manual evaluation (we want to be
678 // able to make claims about the expected ratios depending on the
679 // type of behavior of the client and the downtime, e.g. the difference
680 // in behavior between a client making requests every few minutes vs.
681 // one that makes a request every 15 seconds).
682 Trial trials[] = {
683 { TimeDelta::FromSeconds(10), TimeDelta::FromSeconds(3) },
684 { TimeDelta::FromSeconds(30), TimeDelta::FromSeconds(7) },
685 { TimeDelta::FromMinutes(5), TimeDelta::FromSeconds(30) },
686 { TimeDelta::FromMinutes(10), TimeDelta::FromSeconds(20) },
687 { TimeDelta::FromMinutes(20), TimeDelta::FromSeconds(15) },
688 { TimeDelta::FromMinutes(20), TimeDelta::FromSeconds(50) },
689 { TimeDelta::FromMinutes(30), TimeDelta::FromMinutes(2) },
690 { TimeDelta::FromMinutes(30), TimeDelta::FromMinutes(5) },
691 { TimeDelta::FromMinutes(40), TimeDelta::FromMinutes(7) },
692 { TimeDelta::FromMinutes(40), TimeDelta::FromMinutes(2) },
693 { TimeDelta::FromMinutes(40), TimeDelta::FromSeconds(15) },
694 { TimeDelta::FromMinutes(60), TimeDelta::FromMinutes(7) },
695 { TimeDelta::FromMinutes(60), TimeDelta::FromMinutes(2) },
696 { TimeDelta::FromMinutes(60), TimeDelta::FromSeconds(15) },
697 { TimeDelta::FromMinutes(80), TimeDelta::FromMinutes(20) },
698 { TimeDelta::FromMinutes(80), TimeDelta::FromMinutes(3) },
699 { TimeDelta::FromMinutes(80), TimeDelta::FromSeconds(15) },
701 // Most brutal?
702 { TimeDelta::FromMinutes(45), TimeDelta::FromMilliseconds(500) },
705 // If things don't converge by the time we've done 100K trials, then
706 // clearly one or more of the expected intervals are wrong.
707 while (global_stats.num_runs < 100000) {
708 for (size_t i = 0; i < arraysize(trials); ++i) {
709 ++global_stats.num_runs;
710 ++trials[i].stats.num_runs;
711 double ratio_unprotected = SimulateDowntime(
712 trials[i].duration, trials[i].average_client_interval, false);
713 double ratio_protected = SimulateDowntime(
714 trials[i].duration, trials[i].average_client_interval, true);
715 global_stats.total_ratio_unprotected += ratio_unprotected;
716 global_stats.total_ratio_protected += ratio_protected;
717 trials[i].stats.total_ratio_unprotected += ratio_unprotected;
718 trials[i].stats.total_ratio_protected += ratio_protected;
721 double increase_ratio;
722 if (global_stats.DidConverge(&increase_ratio))
723 break;
725 if (global_stats.num_runs > 200) {
726 VerboseOut("Test has not yet converged on expected interval.\n");
727 global_stats.ReportTrialResult(increase_ratio);
731 double average_increase_ratio;
732 EXPECT_TRUE(global_stats.DidConverge(&average_increase_ratio));
734 // Print individual trial results for optional manual evaluation.
735 double max_increase_ratio = 0.0;
736 for (size_t i = 0; i < arraysize(trials); ++i) {
737 double increase_ratio;
738 trials[i].stats.DidConverge(&increase_ratio);
739 max_increase_ratio = std::max(max_increase_ratio, increase_ratio);
740 trials[i].PrintTrialDescription();
741 trials[i].stats.ReportTrialResult(increase_ratio);
744 VerboseOut("Average increase ratio was %.4f\n", average_increase_ratio);
745 VerboseOut("Maximum increase ratio was %.4f\n", max_increase_ratio);
748 } // namespace
749 } // namespace net