Use Persistent::Reset.
[chromium-blink-merge.git] / ppapi / host / ppapi_host.cc
blob0b24e10d7d3b826934339c9972e6953c4cb94da3
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "ppapi/host/ppapi_host.h"
7 #include "base/logging.h"
8 #include "ppapi/c/pp_errors.h"
9 #include "ppapi/host/host_factory.h"
10 #include "ppapi/host/host_message_context.h"
11 #include "ppapi/host/instance_message_filter.h"
12 #include "ppapi/host/resource_host.h"
13 #include "ppapi/proxy/ppapi_messages.h"
14 #include "ppapi/proxy/resource_message_params.h"
15 #include "ppapi/shared_impl/host_resource.h"
17 namespace ppapi {
18 namespace host {
20 namespace {
22 // Put a cap on the maximum number of resources so we don't explode if the
23 // renderer starts spamming us.
24 const size_t kMaxResourcesPerPlugin = 1 << 14;
26 } // namespace
28 PpapiHost::PpapiHost(IPC::Sender* sender,
29 const PpapiPermissions& perms)
30 : sender_(sender),
31 permissions_(perms),
32 next_pending_resource_host_id_(1) {
35 PpapiHost::~PpapiHost() {
36 // Delete these explicitly before destruction since then the host is still
37 // technically alive in case one of the filters accesses us from the
38 // destructor.
39 instance_message_filters_.clear();
41 // The resources may also want to use us in their destructors.
42 resources_.clear();
43 pending_resource_hosts_.clear();
46 bool PpapiHost::Send(IPC::Message* msg) {
47 return sender_->Send(msg);
50 bool PpapiHost::OnMessageReceived(const IPC::Message& msg) {
51 bool handled = true;
52 IPC_BEGIN_MESSAGE_MAP(PpapiHost, msg)
53 IPC_MESSAGE_HANDLER(PpapiHostMsg_ResourceCall,
54 OnHostMsgResourceCall)
55 IPC_MESSAGE_HANDLER_DELAY_REPLY(PpapiHostMsg_ResourceSyncCall,
56 OnHostMsgResourceSyncCall)
57 IPC_MESSAGE_HANDLER(PpapiHostMsg_ResourceCreated,
58 OnHostMsgResourceCreated)
59 IPC_MESSAGE_HANDLER(PpapiHostMsg_AttachToPendingHost,
60 OnHostMsgAttachToPendingHost)
61 IPC_MESSAGE_HANDLER(PpapiHostMsg_ResourceDestroyed,
62 OnHostMsgResourceDestroyed)
63 IPC_MESSAGE_UNHANDLED(handled = false)
64 IPC_END_MESSAGE_MAP()
66 if (!handled) {
67 for (size_t i = 0; i < instance_message_filters_.size(); i++) {
68 if (instance_message_filters_[i]->OnInstanceMessageReceived(msg)) {
69 handled = true;
70 break;
75 return handled;
78 void PpapiHost::SendReply(const ReplyMessageContext& context,
79 const IPC::Message& msg) {
80 TRACE_EVENT2("ppapi proxy", "PpapiHost::SendReply",
81 "Class", IPC_MESSAGE_ID_CLASS(msg.type()),
82 "Line", IPC_MESSAGE_ID_LINE(msg.type()));
83 if (context.sync_reply_msg) {
84 PpapiHostMsg_ResourceSyncCall::WriteReplyParams(context.sync_reply_msg,
85 context.params, msg);
86 Send(context.sync_reply_msg);
87 } else {
88 Send(new PpapiPluginMsg_ResourceReply(context.params, msg));
92 void PpapiHost::SendUnsolicitedReply(PP_Resource resource,
93 const IPC::Message& msg) {
94 TRACE_EVENT2("ppapi proxy", "PpapiHost::SendUnsolicitedReply",
95 "Class", IPC_MESSAGE_ID_CLASS(msg.type()),
96 "Line", IPC_MESSAGE_ID_LINE(msg.type()));
97 DCHECK(resource); // If this fails, host is probably pending.
98 proxy::ResourceMessageReplyParams params(resource, 0);
99 Send(new PpapiPluginMsg_ResourceReply(params, msg));
102 int PpapiHost::AddPendingResourceHost(scoped_ptr<ResourceHost> resource_host) {
103 // The resource ID should not be assigned.
104 DCHECK(resource_host->pp_resource() == 0);
106 int pending_id = next_pending_resource_host_id_++;
107 pending_resource_hosts_[pending_id] =
108 linked_ptr<ResourceHost>(resource_host.release());
109 return pending_id;
112 void PpapiHost::AddHostFactoryFilter(scoped_ptr<HostFactory> filter) {
113 host_factory_filters_.push_back(filter.release());
116 void PpapiHost::AddInstanceMessageFilter(
117 scoped_ptr<InstanceMessageFilter> filter) {
118 instance_message_filters_.push_back(filter.release());
121 void PpapiHost::OnHostMsgResourceCall(
122 const proxy::ResourceMessageCallParams& params,
123 const IPC::Message& nested_msg) {
124 TRACE_EVENT2("ppapi proxy", "PpapiHost::OnHostMsgResourceCall",
125 "Class", IPC_MESSAGE_ID_CLASS(nested_msg.type()),
126 "Line", IPC_MESSAGE_ID_LINE(nested_msg.type()));
127 HostMessageContext context(params);
128 HandleResourceCall(params, nested_msg, &context);
131 void PpapiHost::OnHostMsgResourceSyncCall(
132 const proxy::ResourceMessageCallParams& params,
133 const IPC::Message& nested_msg,
134 IPC::Message* reply_msg) {
135 TRACE_EVENT2("ppapi proxy", "PpapiHost::OnHostMsgResourceSyncCall",
136 "Class", IPC_MESSAGE_ID_CLASS(nested_msg.type()),
137 "Line", IPC_MESSAGE_ID_LINE(nested_msg.type()));
138 // Sync messages should always have callback set because they always expect
139 // a reply from the host.
140 DCHECK(params.has_callback());
141 // Stash the |reply_msg| in the context so that it can be used to reply
142 // to the sync message.
143 HostMessageContext context(params, reply_msg);
144 HandleResourceCall(params, nested_msg, &context);
147 void PpapiHost::HandleResourceCall(
148 const proxy::ResourceMessageCallParams& params,
149 const IPC::Message& nested_msg,
150 HostMessageContext* context) {
151 ResourceHost* resource_host = GetResourceHost(params.pp_resource());
152 if (resource_host) {
153 // CAUTION: Handling the message may cause the destruction of this object.
154 resource_host->HandleMessage(nested_msg, context);
155 } else {
156 if (context->params.has_callback()) {
157 ReplyMessageContext reply_context = context->MakeReplyMessageContext();
158 reply_context.params.set_result(PP_ERROR_BADRESOURCE);
159 SendReply(reply_context, context->reply_msg);
164 void PpapiHost::OnHostMsgResourceCreated(
165 const proxy::ResourceMessageCallParams& params,
166 PP_Instance instance,
167 const IPC::Message& nested_msg) {
168 TRACE_EVENT2("ppapi proxy", "PpapiHost::OnHostMsgResourceCreated",
169 "Class", IPC_MESSAGE_ID_CLASS(nested_msg.type()),
170 "Line", IPC_MESSAGE_ID_LINE(nested_msg.type()));
171 if (resources_.size() >= kMaxResourcesPerPlugin)
172 return;
174 // Run through all filters until one grabs this message.
175 scoped_ptr<ResourceHost> resource_host;
176 DCHECK(!host_factory_filters_.empty()); // Caller forgot to add a factory.
177 for (size_t i = 0; i < host_factory_filters_.size(); i++) {
178 resource_host = host_factory_filters_[i]->CreateResourceHost(
179 this, params, instance, nested_msg).Pass();
180 if (resource_host.get())
181 break;
183 if (!resource_host.get()) {
184 NOTREACHED();
185 return;
188 // Resource should have been assigned a nonzero PP_Resource.
189 DCHECK(resource_host->pp_resource());
191 resources_[params.pp_resource()] =
192 linked_ptr<ResourceHost>(resource_host.release());
195 void PpapiHost::OnHostMsgAttachToPendingHost(PP_Resource pp_resource,
196 int pending_host_id) {
197 PendingHostResourceMap::iterator found =
198 pending_resource_hosts_.find(pending_host_id);
199 if (found == pending_resource_hosts_.end()) {
200 // Plugin sent a bad ID.
201 NOTREACHED();
202 return;
204 found->second->SetPPResourceForPendingHost(pp_resource);
205 resources_[pp_resource] = found->second;
206 pending_resource_hosts_.erase(found);
209 void PpapiHost::OnHostMsgResourceDestroyed(PP_Resource resource) {
210 ResourceMap::iterator found = resources_.find(resource);
211 if (found == resources_.end()) {
212 NOTREACHED();
213 return;
215 // Invoking the HostResource destructor might result in looking up the
216 // PP_Resource in resources_. std::map is not well specified as to whether the
217 // element will be there or not. Therefore, we delay destruction of the
218 // HostResource until after we've made sure the map no longer contains
219 // |resource|.
220 linked_ptr<ResourceHost> delete_at_end_of_scope(found->second);
221 resources_.erase(found);
224 ResourceHost* PpapiHost::GetResourceHost(PP_Resource resource) const {
225 ResourceMap::const_iterator found = resources_.find(resource);
226 return found == resources_.end() ? NULL : found->second.get();
229 } // namespace host
230 } // namespace ppapi