Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / powerpc / platforms / powernv / opal-async.c
blob18a355fa15e8286ab0a9ec41e6dc9e5ed546f431
1 /*
2 * PowerNV OPAL asynchronous completion interfaces
4 * Copyright 2013-2017 IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #undef DEBUG
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/semaphore.h>
19 #include <linux/spinlock.h>
20 #include <linux/wait.h>
21 #include <linux/gfp.h>
22 #include <linux/of.h>
23 #include <asm/machdep.h>
24 #include <asm/opal.h>
26 enum opal_async_token_state {
27 ASYNC_TOKEN_UNALLOCATED = 0,
28 ASYNC_TOKEN_ALLOCATED,
29 ASYNC_TOKEN_DISPATCHED,
30 ASYNC_TOKEN_ABANDONED,
31 ASYNC_TOKEN_COMPLETED
34 struct opal_async_token {
35 enum opal_async_token_state state;
36 struct opal_msg response;
39 static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait);
40 static DEFINE_SPINLOCK(opal_async_comp_lock);
41 static struct semaphore opal_async_sem;
42 static unsigned int opal_max_async_tokens;
43 static struct opal_async_token *opal_async_tokens;
45 static int __opal_async_get_token(void)
47 unsigned long flags;
48 int i, token = -EBUSY;
50 spin_lock_irqsave(&opal_async_comp_lock, flags);
52 for (i = 0; i < opal_max_async_tokens; i++) {
53 if (opal_async_tokens[i].state == ASYNC_TOKEN_UNALLOCATED) {
54 opal_async_tokens[i].state = ASYNC_TOKEN_ALLOCATED;
55 token = i;
56 break;
60 spin_unlock_irqrestore(&opal_async_comp_lock, flags);
61 return token;
65 * Note: If the returned token is used in an opal call and opal returns
66 * OPAL_ASYNC_COMPLETION you MUST call one of opal_async_wait_response() or
67 * opal_async_wait_response_interruptible() at least once before calling another
68 * opal_async_* function
70 int opal_async_get_token_interruptible(void)
72 int token;
74 /* Wait until a token is available */
75 if (down_interruptible(&opal_async_sem))
76 return -ERESTARTSYS;
78 token = __opal_async_get_token();
79 if (token < 0)
80 up(&opal_async_sem);
82 return token;
84 EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible);
86 static int __opal_async_release_token(int token)
88 unsigned long flags;
89 int rc;
91 if (token < 0 || token >= opal_max_async_tokens) {
92 pr_err("%s: Passed token is out of range, token %d\n",
93 __func__, token);
94 return -EINVAL;
97 spin_lock_irqsave(&opal_async_comp_lock, flags);
98 switch (opal_async_tokens[token].state) {
99 case ASYNC_TOKEN_COMPLETED:
100 case ASYNC_TOKEN_ALLOCATED:
101 opal_async_tokens[token].state = ASYNC_TOKEN_UNALLOCATED;
102 rc = 0;
103 break;
105 * DISPATCHED and ABANDONED tokens must wait for OPAL to respond.
106 * Mark a DISPATCHED token as ABANDONED so that the response handling
107 * code knows no one cares and that it can free it then.
109 case ASYNC_TOKEN_DISPATCHED:
110 opal_async_tokens[token].state = ASYNC_TOKEN_ABANDONED;
111 /* Fall through */
112 default:
113 rc = 1;
115 spin_unlock_irqrestore(&opal_async_comp_lock, flags);
117 return rc;
120 int opal_async_release_token(int token)
122 int ret;
124 ret = __opal_async_release_token(token);
125 if (!ret)
126 up(&opal_async_sem);
128 return ret;
130 EXPORT_SYMBOL_GPL(opal_async_release_token);
132 int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
134 if (token >= opal_max_async_tokens) {
135 pr_err("%s: Invalid token passed\n", __func__);
136 return -EINVAL;
139 if (!msg) {
140 pr_err("%s: Invalid message pointer passed\n", __func__);
141 return -EINVAL;
145 * There is no need to mark the token as dispatched, wait_event()
146 * will block until the token completes.
148 * Wakeup the poller before we wait for events to speed things
149 * up on platforms or simulators where the interrupts aren't
150 * functional.
152 opal_wake_poller();
153 wait_event(opal_async_wait, opal_async_tokens[token].state
154 == ASYNC_TOKEN_COMPLETED);
155 memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
157 return 0;
159 EXPORT_SYMBOL_GPL(opal_async_wait_response);
161 int opal_async_wait_response_interruptible(uint64_t token, struct opal_msg *msg)
163 unsigned long flags;
164 int ret;
166 if (token >= opal_max_async_tokens) {
167 pr_err("%s: Invalid token passed\n", __func__);
168 return -EINVAL;
171 if (!msg) {
172 pr_err("%s: Invalid message pointer passed\n", __func__);
173 return -EINVAL;
177 * The first time this gets called we mark the token as DISPATCHED
178 * so that if wait_event_interruptible() returns not zero and the
179 * caller frees the token, we know not to actually free the token
180 * until the response comes.
182 * Only change if the token is ALLOCATED - it may have been
183 * completed even before the caller gets around to calling this
184 * the first time.
186 * There is also a dirty great comment at the token allocation
187 * function that if the opal call returns OPAL_ASYNC_COMPLETION to
188 * the caller then the caller *must* call this or the not
189 * interruptible version before doing anything else with the
190 * token.
192 if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED) {
193 spin_lock_irqsave(&opal_async_comp_lock, flags);
194 if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED)
195 opal_async_tokens[token].state = ASYNC_TOKEN_DISPATCHED;
196 spin_unlock_irqrestore(&opal_async_comp_lock, flags);
200 * Wakeup the poller before we wait for events to speed things
201 * up on platforms or simulators where the interrupts aren't
202 * functional.
204 opal_wake_poller();
205 ret = wait_event_interruptible(opal_async_wait,
206 opal_async_tokens[token].state ==
207 ASYNC_TOKEN_COMPLETED);
208 if (!ret)
209 memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
211 return ret;
213 EXPORT_SYMBOL_GPL(opal_async_wait_response_interruptible);
215 /* Called from interrupt context */
216 static int opal_async_comp_event(struct notifier_block *nb,
217 unsigned long msg_type, void *msg)
219 struct opal_msg *comp_msg = msg;
220 enum opal_async_token_state state;
221 unsigned long flags;
222 uint64_t token;
224 if (msg_type != OPAL_MSG_ASYNC_COMP)
225 return 0;
227 token = be64_to_cpu(comp_msg->params[0]);
228 spin_lock_irqsave(&opal_async_comp_lock, flags);
229 state = opal_async_tokens[token].state;
230 opal_async_tokens[token].state = ASYNC_TOKEN_COMPLETED;
231 spin_unlock_irqrestore(&opal_async_comp_lock, flags);
233 if (state == ASYNC_TOKEN_ABANDONED) {
234 /* Free the token, no one else will */
235 opal_async_release_token(token);
236 return 0;
238 memcpy(&opal_async_tokens[token].response, comp_msg, sizeof(*comp_msg));
239 wake_up(&opal_async_wait);
241 return 0;
244 static struct notifier_block opal_async_comp_nb = {
245 .notifier_call = opal_async_comp_event,
246 .next = NULL,
247 .priority = 0,
250 int __init opal_async_comp_init(void)
252 struct device_node *opal_node;
253 const __be32 *async;
254 int err;
256 opal_node = of_find_node_by_path("/ibm,opal");
257 if (!opal_node) {
258 pr_err("%s: Opal node not found\n", __func__);
259 err = -ENOENT;
260 goto out;
263 async = of_get_property(opal_node, "opal-msg-async-num", NULL);
264 if (!async) {
265 pr_err("%s: %pOF has no opal-msg-async-num\n",
266 __func__, opal_node);
267 err = -ENOENT;
268 goto out_opal_node;
271 opal_max_async_tokens = be32_to_cpup(async);
272 opal_async_tokens = kcalloc(opal_max_async_tokens,
273 sizeof(*opal_async_tokens), GFP_KERNEL);
274 if (!opal_async_tokens) {
275 err = -ENOMEM;
276 goto out_opal_node;
279 err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP,
280 &opal_async_comp_nb);
281 if (err) {
282 pr_err("%s: Can't register OPAL event notifier (%d)\n",
283 __func__, err);
284 kfree(opal_async_tokens);
285 goto out_opal_node;
288 sema_init(&opal_async_sem, opal_max_async_tokens);
290 out_opal_node:
291 of_node_put(opal_node);
292 out:
293 return err;