ipc: Convert k(u)int*max types from basictypes.h to std::numeric_limits variants.
[chromium-blink-merge.git] / ipc / ipc_perftest_support.cc
blob5248cc84ff5906b2587d91ee3cb4598cfe2be445
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "ipc/ipc_perftest_support.h"
7 #include <algorithm>
8 #include <string>
10 #include "base/basictypes.h"
11 #include "base/logging.h"
12 #include "base/memory/scoped_ptr.h"
13 #include "base/pickle.h"
14 #include "base/strings/stringprintf.h"
15 #include "base/test/perf_time_logger.h"
16 #include "base/test/test_io_thread.h"
17 #include "base/threading/thread.h"
18 #include "base/time/time.h"
19 #include "build/build_config.h"
20 #include "ipc/ipc_channel.h"
21 #include "ipc/ipc_channel_proxy.h"
22 #include "ipc/ipc_descriptors.h"
23 #include "ipc/ipc_message_utils.h"
24 #include "ipc/ipc_sender.h"
26 namespace IPC {
27 namespace test {
29 // Avoid core 0 due to conflicts with Intel's Power Gadget.
30 // Setting thread affinity will fail harmlessly on single/dual core machines.
31 const int kSharedCore = 2;
33 // This class simply collects stats about abstract "events" (each of which has a
34 // start time and an end time).
35 class EventTimeTracker {
36 public:
37 explicit EventTimeTracker(const char* name)
38 : name_(name),
39 count_(0) {
42 void AddEvent(const base::TimeTicks& start, const base::TimeTicks& end) {
43 DCHECK(end >= start);
44 count_++;
45 base::TimeDelta duration = end - start;
46 total_duration_ += duration;
47 max_duration_ = std::max(max_duration_, duration);
50 void ShowResults() const {
51 VLOG(1) << name_ << " count: " << count_;
52 VLOG(1) << name_ << " total duration: "
53 << total_duration_.InMillisecondsF() << " ms";
54 VLOG(1) << name_ << " average duration: "
55 << (total_duration_.InMillisecondsF() / static_cast<double>(count_))
56 << " ms";
57 VLOG(1) << name_ << " maximum duration: "
58 << max_duration_.InMillisecondsF() << " ms";
61 void Reset() {
62 count_ = 0;
63 total_duration_ = base::TimeDelta();
64 max_duration_ = base::TimeDelta();
67 private:
68 const std::string name_;
70 uint64_t count_;
71 base::TimeDelta total_duration_;
72 base::TimeDelta max_duration_;
74 DISALLOW_COPY_AND_ASSIGN(EventTimeTracker);
77 // This channel listener just replies to all messages with the exact same
78 // message. It assumes each message has one string parameter. When the string
79 // "quit" is sent, it will exit.
80 class ChannelReflectorListener : public Listener {
81 public:
82 ChannelReflectorListener()
83 : channel_(NULL),
84 latency_tracker_("Client messages") {
85 VLOG(1) << "Client listener up";
88 ~ChannelReflectorListener() override {
89 VLOG(1) << "Client listener down";
90 latency_tracker_.ShowResults();
93 void Init(Channel* channel) {
94 DCHECK(!channel_);
95 channel_ = channel;
98 bool OnMessageReceived(const Message& message) override {
99 CHECK(channel_);
101 base::PickleIterator iter(message);
102 int64_t time_internal;
103 EXPECT_TRUE(iter.ReadInt64(&time_internal));
104 int msgid;
105 EXPECT_TRUE(iter.ReadInt(&msgid));
106 base::StringPiece payload;
107 EXPECT_TRUE(iter.ReadStringPiece(&payload));
109 // Include message deserialization in latency.
110 base::TimeTicks now = base::TimeTicks::Now();
112 if (payload == "hello") {
113 latency_tracker_.Reset();
114 } else if (payload == "quit") {
115 latency_tracker_.ShowResults();
116 base::MessageLoop::current()->QuitWhenIdle();
117 return true;
118 } else {
119 // Don't track hello and quit messages.
120 latency_tracker_.AddEvent(
121 base::TimeTicks::FromInternalValue(time_internal), now);
124 Message* msg = new Message(0, 2, Message::PRIORITY_NORMAL);
125 msg->WriteInt64(base::TimeTicks::Now().ToInternalValue());
126 msg->WriteInt(msgid);
127 msg->WriteString(payload);
128 channel_->Send(msg);
129 return true;
132 private:
133 Channel* channel_;
134 EventTimeTracker latency_tracker_;
137 class PerformanceChannelListener : public Listener {
138 public:
139 explicit PerformanceChannelListener(const std::string& label)
140 : label_(label),
141 sender_(NULL),
142 msg_count_(0),
143 msg_size_(0),
144 count_down_(0),
145 latency_tracker_("Server messages") {
146 VLOG(1) << "Server listener up";
149 ~PerformanceChannelListener() override {
150 VLOG(1) << "Server listener down";
153 void Init(Sender* sender) {
154 DCHECK(!sender_);
155 sender_ = sender;
158 // Call this before running the message loop.
159 void SetTestParams(int msg_count, size_t msg_size) {
160 DCHECK_EQ(0, count_down_);
161 msg_count_ = msg_count;
162 msg_size_ = msg_size;
163 count_down_ = msg_count_;
164 payload_ = std::string(msg_size_, 'a');
167 bool OnMessageReceived(const Message& message) override {
168 CHECK(sender_);
170 base::PickleIterator iter(message);
171 int64_t time_internal;
172 EXPECT_TRUE(iter.ReadInt64(&time_internal));
173 int msgid;
174 EXPECT_TRUE(iter.ReadInt(&msgid));
175 std::string reflected_payload;
176 EXPECT_TRUE(iter.ReadString(&reflected_payload));
178 // Include message deserialization in latency.
179 base::TimeTicks now = base::TimeTicks::Now();
181 if (reflected_payload == "hello") {
182 // Start timing on hello.
183 latency_tracker_.Reset();
184 DCHECK(!perf_logger_.get());
185 std::string test_name =
186 base::StringPrintf("IPC_%s_Perf_%dx_%u",
187 label_.c_str(),
188 msg_count_,
189 static_cast<unsigned>(msg_size_));
190 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
191 } else {
192 DCHECK_EQ(payload_.size(), reflected_payload.size());
194 latency_tracker_.AddEvent(
195 base::TimeTicks::FromInternalValue(time_internal), now);
197 CHECK(count_down_ > 0);
198 count_down_--;
199 if (count_down_ == 0) {
200 perf_logger_.reset(); // Stop the perf timer now.
201 latency_tracker_.ShowResults();
202 base::MessageLoop::current()->QuitWhenIdle();
203 return true;
207 Message* msg = new Message(0, 2, Message::PRIORITY_NORMAL);
208 msg->WriteInt64(base::TimeTicks::Now().ToInternalValue());
209 msg->WriteInt(count_down_);
210 msg->WriteString(payload_);
211 sender_->Send(msg);
212 return true;
215 private:
216 std::string label_;
217 Sender* sender_;
218 int msg_count_;
219 size_t msg_size_;
221 int count_down_;
222 std::string payload_;
223 EventTimeTracker latency_tracker_;
224 scoped_ptr<base::PerfTimeLogger> perf_logger_;
227 std::vector<PingPongTestParams>
228 IPCChannelPerfTestBase::GetDefaultTestParams() {
229 // Test several sizes. We use 12^N for message size, and limit the message
230 // count to keep the test duration reasonable.
231 std::vector<PingPongTestParams> list;
232 list.push_back(PingPongTestParams(12, 50000));
233 list.push_back(PingPongTestParams(144, 50000));
234 list.push_back(PingPongTestParams(1728, 50000));
235 list.push_back(PingPongTestParams(20736, 12000));
236 list.push_back(PingPongTestParams(248832, 1000));
237 return list;
240 void IPCChannelPerfTestBase::RunTestChannelPingPong(
241 const std::vector<PingPongTestParams>& params) {
242 Init("PerformanceClient");
244 // Set up IPC channel and start client.
245 PerformanceChannelListener listener("Channel");
246 CreateChannel(&listener);
247 listener.Init(channel());
248 ASSERT_TRUE(ConnectChannel());
249 ASSERT_TRUE(StartClient());
251 LockThreadAffinity thread_locker(kSharedCore);
252 for (size_t i = 0; i < params.size(); i++) {
253 listener.SetTestParams(params[i].message_count(),
254 params[i].message_size());
256 // This initial message will kick-start the ping-pong of messages.
257 Message* message =
258 new Message(0, 2, Message::PRIORITY_NORMAL);
259 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
260 message->WriteInt(-1);
261 message->WriteString("hello");
262 sender()->Send(message);
264 // Run message loop.
265 base::MessageLoop::current()->Run();
268 // Send quit message.
269 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL);
270 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
271 message->WriteInt(-1);
272 message->WriteString("quit");
273 sender()->Send(message);
275 EXPECT_TRUE(WaitForClientShutdown());
276 DestroyChannel();
279 void IPCChannelPerfTestBase::RunTestChannelProxyPingPong(
280 const std::vector<PingPongTestParams>& params) {
281 InitWithCustomMessageLoop("PerformanceClient",
282 make_scoped_ptr(new base::MessageLoop()));
284 base::TestIOThread io_thread(base::TestIOThread::kAutoStart);
286 // Set up IPC channel and start client.
287 PerformanceChannelListener listener("ChannelProxy");
288 CreateChannelProxy(&listener, io_thread.task_runner());
289 listener.Init(channel_proxy());
290 ASSERT_TRUE(StartClient());
292 LockThreadAffinity thread_locker(kSharedCore);
293 for (size_t i = 0; i < params.size(); i++) {
294 listener.SetTestParams(params[i].message_count(),
295 params[i].message_size());
297 // This initial message will kick-start the ping-pong of messages.
298 Message* message =
299 new Message(0, 2, Message::PRIORITY_NORMAL);
300 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
301 message->WriteInt(-1);
302 message->WriteString("hello");
303 sender()->Send(message);
305 // Run message loop.
306 base::MessageLoop::current()->Run();
309 // Send quit message.
310 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL);
311 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
312 message->WriteInt(-1);
313 message->WriteString("quit");
314 sender()->Send(message);
316 EXPECT_TRUE(WaitForClientShutdown());
317 DestroyChannelProxy();
321 PingPongTestClient::PingPongTestClient()
322 : listener_(new ChannelReflectorListener()) {
325 PingPongTestClient::~PingPongTestClient() {
328 scoped_ptr<Channel> PingPongTestClient::CreateChannel(
329 Listener* listener) {
330 return Channel::CreateClient(IPCTestBase::GetChannelName("PerformanceClient"),
331 listener, nullptr);
334 int PingPongTestClient::RunMain() {
335 LockThreadAffinity thread_locker(kSharedCore);
336 scoped_ptr<Channel> channel = CreateChannel(listener_.get());
337 listener_->Init(channel.get());
338 CHECK(channel->Connect());
340 base::MessageLoop::current()->Run();
341 return 0;
344 scoped_refptr<base::TaskRunner> PingPongTestClient::task_runner() {
345 return main_message_loop_.task_runner();
348 LockThreadAffinity::LockThreadAffinity(int cpu_number)
349 : affinity_set_ok_(false) {
350 #if defined(OS_WIN)
351 const DWORD_PTR thread_mask = 1 << cpu_number;
352 old_affinity_ = SetThreadAffinityMask(GetCurrentThread(), thread_mask);
353 affinity_set_ok_ = old_affinity_ != 0;
354 #elif defined(OS_LINUX)
355 cpu_set_t cpuset;
356 CPU_ZERO(&cpuset);
357 CPU_SET(cpu_number, &cpuset);
358 auto get_result = sched_getaffinity(0, sizeof(old_cpuset_), &old_cpuset_);
359 DCHECK_EQ(0, get_result);
360 auto set_result = sched_setaffinity(0, sizeof(cpuset), &cpuset);
361 // Check for get_result failure, even though it should always succeed.
362 affinity_set_ok_ = (set_result == 0) && (get_result == 0);
363 #endif
364 if (!affinity_set_ok_)
365 LOG(WARNING) << "Failed to set thread affinity to CPU " << cpu_number;
368 LockThreadAffinity::~LockThreadAffinity() {
369 if (!affinity_set_ok_)
370 return;
371 #if defined(OS_WIN)
372 auto set_result = SetThreadAffinityMask(GetCurrentThread(), old_affinity_);
373 DCHECK_NE(0u, set_result);
374 #elif defined(OS_LINUX)
375 auto set_result = sched_setaffinity(0, sizeof(old_cpuset_), &old_cpuset_);
376 DCHECK_EQ(0, set_result);
377 #endif
380 } // namespace test
381 } // namespace IPC