Revert 168224 - Update V8 to version 3.15.4.
[chromium-blink-merge.git] / chrome / nacl / nacl_ipc_adapter_unittest.cc
blob3c64ece9cc631db3864a1374c9c9cef4583690b8
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "chrome/nacl/nacl_ipc_adapter.h"
7 #include <string.h>
9 #include "base/memory/scoped_ptr.h"
10 #include "base/message_loop.h"
11 #include "base/message_loop_proxy.h"
12 #include "base/threading/platform_thread.h"
13 #include "base/threading/simple_thread.h"
14 #include "ipc/ipc_test_sink.h"
15 #include "native_client/src/trusted/desc/nacl_desc_custom.h"
16 #include "testing/gtest/include/gtest/gtest.h"
18 namespace {
20 class NaClIPCAdapterTest : public testing::Test {
21 public:
22 NaClIPCAdapterTest() {}
24 // testing::Test implementation.
25 virtual void SetUp() OVERRIDE {
26 sink_ = new IPC::TestSink;
28 // Takes ownership of the sink_ pointer. Note we provide the current message
29 // loop instead of using a real IO thread. This should work OK since we do
30 // not need real IPC for the tests.
31 adapter_ = new NaClIPCAdapter(scoped_ptr<IPC::Channel>(sink_),
32 base::MessageLoopProxy::current());
34 virtual void TearDown() OVERRIDE {
35 sink_ = NULL; // This pointer is actually owned by the IPCAdapter.
36 adapter_ = NULL;
37 // The adapter destructor has to post a task to destroy the Channel on the
38 // IO thread. For the purposes of the test, we just need to make sure that
39 // task gets run, or it will appear as a leak.
40 message_loop_.RunAllPending();
43 protected:
44 int BlockingReceive(void* buf, size_t buf_size) {
45 NaClImcMsgIoVec iov = {buf, buf_size};
46 NaClImcTypedMsgHdr msg = {&iov, 1};
47 return adapter_->BlockingReceive(&msg);
50 int Send(void* buf, size_t buf_size) {
51 NaClImcMsgIoVec iov = {buf, buf_size};
52 NaClImcTypedMsgHdr msg = {&iov, 1};
53 return adapter_->Send(&msg);
56 MessageLoop message_loop_;
58 scoped_refptr<NaClIPCAdapter> adapter_;
60 // Messages sent from nacl to the adapter end up here. Note that we create
61 // this pointer and pass ownership of it to the IPC adapter, who will keep
62 // it alive as long as the adapter is alive. This means that when the
63 // adapter goes away, this pointer will become invalid.
65 // In real life the adapter needs to take ownership so the channel can be
66 // destroyed on the right thread.
67 IPC::TestSink* sink_;
70 } // namespace
72 // Tests a simple message getting rewritten sent from native code to NaCl.
73 TEST_F(NaClIPCAdapterTest, SimpleReceiveRewriting) {
74 int routing_id = 0x89898989;
75 uint32 type = 0x55555555;
76 IPC::Message input(routing_id, type, IPC::Message::PRIORITY_NORMAL);
77 uint32 flags = input.flags();
79 int value = 0x12345678;
80 input.WriteInt(value);
81 adapter_->OnMessageReceived(input);
83 // Buffer just need to be big enough for our message with one int.
84 const int kBufSize = 64;
85 char buf[kBufSize];
87 int bytes_read = BlockingReceive(buf, kBufSize);
88 EXPECT_EQ(sizeof(NaClIPCAdapter::NaClMessageHeader) + sizeof(int),
89 static_cast<size_t>(bytes_read));
91 const NaClIPCAdapter::NaClMessageHeader* output_header =
92 reinterpret_cast<const NaClIPCAdapter::NaClMessageHeader*>(buf);
93 EXPECT_EQ(sizeof(int), output_header->payload_size);
94 EXPECT_EQ(routing_id, output_header->routing);
95 EXPECT_EQ(type, output_header->type);
96 EXPECT_EQ(flags, output_header->flags);
97 EXPECT_EQ(0u, output_header->num_fds);
98 EXPECT_EQ(0u, output_header->pad);
100 // Validate the payload.
101 EXPECT_EQ(value,
102 *reinterpret_cast<const int*>(&buf[
103 sizeof(NaClIPCAdapter::NaClMessageHeader)]));
106 // Tests a simple message getting rewritten sent from NaCl to native code.
107 TEST_F(NaClIPCAdapterTest, SendRewriting) {
108 int routing_id = 0x89898989;
109 uint32 type = 0x55555555;
110 int value = 0x12345678;
112 // Send a message with one int inside it.
113 const int buf_size = sizeof(NaClIPCAdapter::NaClMessageHeader) + sizeof(int);
114 char buf[buf_size] = {0};
116 NaClIPCAdapter::NaClMessageHeader* header =
117 reinterpret_cast<NaClIPCAdapter::NaClMessageHeader*>(buf);
118 header->payload_size = sizeof(int);
119 header->routing = routing_id;
120 header->type = type;
121 header->flags = 0;
122 header->num_fds = 0;
123 *reinterpret_cast<int*>(
124 &buf[sizeof(NaClIPCAdapter::NaClMessageHeader)]) = value;
126 int result = Send(buf, buf_size);
127 EXPECT_EQ(buf_size, result);
129 // Check that the message came out the other end in the test sink
130 // (messages are posted, so we have to pump).
131 message_loop_.RunAllPending();
132 ASSERT_EQ(1u, sink_->message_count());
133 const IPC::Message* msg = sink_->GetMessageAt(0);
135 EXPECT_EQ(sizeof(int), msg->payload_size());
136 EXPECT_EQ(header->routing, msg->routing_id());
137 EXPECT_EQ(header->type, msg->type());
139 // Now test the partial send case. We should be able to break the message
140 // into two parts and it should still work.
141 sink_->ClearMessages();
142 int first_chunk_size = 7;
143 result = Send(buf, first_chunk_size);
144 EXPECT_EQ(first_chunk_size, result);
146 // First partial send should not have made any messages.
147 message_loop_.RunAllPending();
148 ASSERT_EQ(0u, sink_->message_count());
150 // Second partial send should do the same.
151 int second_chunk_size = 2;
152 result = Send(&buf[first_chunk_size], second_chunk_size);
153 EXPECT_EQ(second_chunk_size, result);
154 message_loop_.RunAllPending();
155 ASSERT_EQ(0u, sink_->message_count());
157 // Send the rest of the message in a third chunk.
158 int third_chunk_size = buf_size - first_chunk_size - second_chunk_size;
159 result = Send(&buf[first_chunk_size + second_chunk_size],
160 third_chunk_size);
161 EXPECT_EQ(third_chunk_size, result);
163 // Last send should have generated one message.
164 message_loop_.RunAllPending();
165 ASSERT_EQ(1u, sink_->message_count());
166 msg = sink_->GetMessageAt(0);
167 EXPECT_EQ(sizeof(int), msg->payload_size());
168 EXPECT_EQ(header->routing, msg->routing_id());
169 EXPECT_EQ(header->type, msg->type());
172 // Tests when a buffer is too small to receive the entire message.
173 TEST_F(NaClIPCAdapterTest, PartialReceive) {
174 int routing_id_1 = 0x89898989;
175 uint32 type_1 = 0x55555555;
176 IPC::Message input_1(routing_id_1, type_1, IPC::Message::PRIORITY_NORMAL);
177 int value_1 = 0x12121212;
178 input_1.WriteInt(value_1);
179 adapter_->OnMessageReceived(input_1);
181 int routing_id_2 = 0x90909090;
182 uint32 type_2 = 0x66666666;
183 IPC::Message input_2(routing_id_2, type_2, IPC::Message::PRIORITY_NORMAL);
184 int value_2 = 0x23232323;
185 input_2.WriteInt(value_2);
186 adapter_->OnMessageReceived(input_2);
188 const int kBufSize = 64;
189 char buf[kBufSize];
191 // Read part of the first message.
192 int bytes_requested = 7;
193 int bytes_read = BlockingReceive(buf, bytes_requested);
194 ASSERT_EQ(bytes_requested, bytes_read);
196 // Read the rest, this should give us the rest of the first message only.
197 bytes_read += BlockingReceive(&buf[bytes_requested],
198 kBufSize - bytes_requested);
199 EXPECT_EQ(sizeof(NaClIPCAdapter::NaClMessageHeader) + sizeof(int),
200 static_cast<size_t>(bytes_read));
202 // Make sure we got the right message.
203 const NaClIPCAdapter::NaClMessageHeader* output_header =
204 reinterpret_cast<const NaClIPCAdapter::NaClMessageHeader*>(buf);
205 EXPECT_EQ(sizeof(int), output_header->payload_size);
206 EXPECT_EQ(routing_id_1, output_header->routing);
207 EXPECT_EQ(type_1, output_header->type);
209 // Read the second message to make sure we went on to it.
210 bytes_read = BlockingReceive(buf, kBufSize);
211 EXPECT_EQ(sizeof(NaClIPCAdapter::NaClMessageHeader) + sizeof(int),
212 static_cast<size_t>(bytes_read));
213 output_header =
214 reinterpret_cast<const NaClIPCAdapter::NaClMessageHeader*>(buf);
215 EXPECT_EQ(sizeof(int), output_header->payload_size);
216 EXPECT_EQ(routing_id_2, output_header->routing);
217 EXPECT_EQ(type_2, output_header->type);
220 // Tests sending messages that are too large. We test sends that are too
221 // small implicitly here and in the success case because in that case it
222 // succeeds and buffers the data.
223 TEST_F(NaClIPCAdapterTest, SendOverflow) {
224 int routing_id = 0x89898989;
225 uint32 type = 0x55555555;
226 int value = 0x12345678;
228 // Make a message with one int inside it. Reserve some extra space so
229 // we can test what happens when we send too much data.
230 const int buf_size = sizeof(NaClIPCAdapter::NaClMessageHeader) + sizeof(int);
231 const int big_buf_size = buf_size + 4;
232 char buf[big_buf_size] = {0};
234 NaClIPCAdapter::NaClMessageHeader* header =
235 reinterpret_cast<NaClIPCAdapter::NaClMessageHeader*>(buf);
236 header->payload_size = sizeof(int);
237 header->routing = routing_id;
238 header->type = type;
239 header->flags = 0;
240 header->num_fds = 0;
241 *reinterpret_cast<int*>(
242 &buf[sizeof(NaClIPCAdapter::NaClMessageHeader)]) = value;
244 // Send too much data and make sure that the send fails.
245 int result = Send(buf, big_buf_size);
246 EXPECT_EQ(-1, result);
247 message_loop_.RunAllPending();
248 ASSERT_EQ(0u, sink_->message_count());
250 // Send too much data in two chunks and make sure that the send fails.
251 int first_chunk_size = 7;
252 result = Send(buf, first_chunk_size);
253 EXPECT_EQ(first_chunk_size, result);
255 // First partial send should not have made any messages.
256 message_loop_.RunAllPending();
257 ASSERT_EQ(0u, sink_->message_count());
259 int second_chunk_size = big_buf_size - first_chunk_size;
260 result = Send(&buf[first_chunk_size], second_chunk_size);
261 EXPECT_EQ(-1, result);
262 message_loop_.RunAllPending();
263 ASSERT_EQ(0u, sink_->message_count());
266 // Tests that when the IPC channel reports an error, that waiting reads are
267 // unblocked and return a -1 error code.
268 TEST_F(NaClIPCAdapterTest, ReadWithChannelError) {
269 // Have a background thread that waits a bit and calls the channel error
270 // handler. This should wake up any waiting threads and immediately return
271 // -1. There is an inherent race condition in that we can't be sure if the
272 // other thread is actually waiting when this happens. This is OK, since the
273 // behavior (which we also explicitly test later) is to return -1 if the
274 // channel has already had an error when you start waiting.
275 class MyThread : public base::SimpleThread {
276 public:
277 explicit MyThread(NaClIPCAdapter* adapter)
278 : SimpleThread("NaClIPCAdapterThread"),
279 adapter_(adapter) {}
280 virtual void Run() {
281 base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(1));
282 adapter_->OnChannelError();
284 private:
285 scoped_refptr<NaClIPCAdapter> adapter_;
287 MyThread thread(adapter_);
289 // IMPORTANT: do not return early from here down (including ASSERT_*) because
290 // the thread needs to joined or it will assert.
291 thread.Start();
293 // Request data. This will normally (modulo races) block until data is
294 // received or there is an error, and the thread above will wake us up
295 // after 1s.
296 const int kBufSize = 64;
297 char buf[kBufSize];
298 int result = BlockingReceive(buf, kBufSize);
299 EXPECT_EQ(-1, result);
301 // Test the "previously had an error" case. BlockingReceive should return
302 // immediately if there was an error.
303 result = BlockingReceive(buf, kBufSize);
304 EXPECT_EQ(-1, result);
306 thread.Join();