WIP FPC-III support
[linux/fpc-iii.git] / arch / powerpc / platforms / powernv / opal-async.c
blobc094fdf5825c94329cb8de74501043b542873591
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * PowerNV OPAL asynchronous completion interfaces
5 * Copyright 2013-2017 IBM Corp.
6 */
8 #undef DEBUG
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/semaphore.h>
15 #include <linux/spinlock.h>
16 #include <linux/wait.h>
17 #include <linux/gfp.h>
18 #include <linux/of.h>
19 #include <asm/machdep.h>
20 #include <asm/opal.h>
22 enum opal_async_token_state {
23 ASYNC_TOKEN_UNALLOCATED = 0,
24 ASYNC_TOKEN_ALLOCATED,
25 ASYNC_TOKEN_DISPATCHED,
26 ASYNC_TOKEN_ABANDONED,
27 ASYNC_TOKEN_COMPLETED
30 struct opal_async_token {
31 enum opal_async_token_state state;
32 struct opal_msg response;
35 static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait);
36 static DEFINE_SPINLOCK(opal_async_comp_lock);
37 static struct semaphore opal_async_sem;
38 static unsigned int opal_max_async_tokens;
39 static struct opal_async_token *opal_async_tokens;
41 static int __opal_async_get_token(void)
43 unsigned long flags;
44 int i, token = -EBUSY;
46 spin_lock_irqsave(&opal_async_comp_lock, flags);
48 for (i = 0; i < opal_max_async_tokens; i++) {
49 if (opal_async_tokens[i].state == ASYNC_TOKEN_UNALLOCATED) {
50 opal_async_tokens[i].state = ASYNC_TOKEN_ALLOCATED;
51 token = i;
52 break;
56 spin_unlock_irqrestore(&opal_async_comp_lock, flags);
57 return token;
61 * Note: If the returned token is used in an opal call and opal returns
62 * OPAL_ASYNC_COMPLETION you MUST call one of opal_async_wait_response() or
63 * opal_async_wait_response_interruptible() at least once before calling another
64 * opal_async_* function
66 int opal_async_get_token_interruptible(void)
68 int token;
70 /* Wait until a token is available */
71 if (down_interruptible(&opal_async_sem))
72 return -ERESTARTSYS;
74 token = __opal_async_get_token();
75 if (token < 0)
76 up(&opal_async_sem);
78 return token;
80 EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible);
82 static int __opal_async_release_token(int token)
84 unsigned long flags;
85 int rc;
87 if (token < 0 || token >= opal_max_async_tokens) {
88 pr_err("%s: Passed token is out of range, token %d\n",
89 __func__, token);
90 return -EINVAL;
93 spin_lock_irqsave(&opal_async_comp_lock, flags);
94 switch (opal_async_tokens[token].state) {
95 case ASYNC_TOKEN_COMPLETED:
96 case ASYNC_TOKEN_ALLOCATED:
97 opal_async_tokens[token].state = ASYNC_TOKEN_UNALLOCATED;
98 rc = 0;
99 break;
101 * DISPATCHED and ABANDONED tokens must wait for OPAL to respond.
102 * Mark a DISPATCHED token as ABANDONED so that the response handling
103 * code knows no one cares and that it can free it then.
105 case ASYNC_TOKEN_DISPATCHED:
106 opal_async_tokens[token].state = ASYNC_TOKEN_ABANDONED;
107 fallthrough;
108 default:
109 rc = 1;
111 spin_unlock_irqrestore(&opal_async_comp_lock, flags);
113 return rc;
116 int opal_async_release_token(int token)
118 int ret;
120 ret = __opal_async_release_token(token);
121 if (!ret)
122 up(&opal_async_sem);
124 return ret;
126 EXPORT_SYMBOL_GPL(opal_async_release_token);
128 int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
130 if (token >= opal_max_async_tokens) {
131 pr_err("%s: Invalid token passed\n", __func__);
132 return -EINVAL;
135 if (!msg) {
136 pr_err("%s: Invalid message pointer passed\n", __func__);
137 return -EINVAL;
141 * There is no need to mark the token as dispatched, wait_event()
142 * will block until the token completes.
144 * Wakeup the poller before we wait for events to speed things
145 * up on platforms or simulators where the interrupts aren't
146 * functional.
148 opal_wake_poller();
149 wait_event(opal_async_wait, opal_async_tokens[token].state
150 == ASYNC_TOKEN_COMPLETED);
151 memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
153 return 0;
155 EXPORT_SYMBOL_GPL(opal_async_wait_response);
157 int opal_async_wait_response_interruptible(uint64_t token, struct opal_msg *msg)
159 unsigned long flags;
160 int ret;
162 if (token >= opal_max_async_tokens) {
163 pr_err("%s: Invalid token passed\n", __func__);
164 return -EINVAL;
167 if (!msg) {
168 pr_err("%s: Invalid message pointer passed\n", __func__);
169 return -EINVAL;
173 * The first time this gets called we mark the token as DISPATCHED
174 * so that if wait_event_interruptible() returns not zero and the
175 * caller frees the token, we know not to actually free the token
176 * until the response comes.
178 * Only change if the token is ALLOCATED - it may have been
179 * completed even before the caller gets around to calling this
180 * the first time.
182 * There is also a dirty great comment at the token allocation
183 * function that if the opal call returns OPAL_ASYNC_COMPLETION to
184 * the caller then the caller *must* call this or the not
185 * interruptible version before doing anything else with the
186 * token.
188 if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED) {
189 spin_lock_irqsave(&opal_async_comp_lock, flags);
190 if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED)
191 opal_async_tokens[token].state = ASYNC_TOKEN_DISPATCHED;
192 spin_unlock_irqrestore(&opal_async_comp_lock, flags);
196 * Wakeup the poller before we wait for events to speed things
197 * up on platforms or simulators where the interrupts aren't
198 * functional.
200 opal_wake_poller();
201 ret = wait_event_interruptible(opal_async_wait,
202 opal_async_tokens[token].state ==
203 ASYNC_TOKEN_COMPLETED);
204 if (!ret)
205 memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
207 return ret;
209 EXPORT_SYMBOL_GPL(opal_async_wait_response_interruptible);
211 /* Called from interrupt context */
212 static int opal_async_comp_event(struct notifier_block *nb,
213 unsigned long msg_type, void *msg)
215 struct opal_msg *comp_msg = msg;
216 enum opal_async_token_state state;
217 unsigned long flags;
218 uint64_t token;
220 if (msg_type != OPAL_MSG_ASYNC_COMP)
221 return 0;
223 token = be64_to_cpu(comp_msg->params[0]);
224 spin_lock_irqsave(&opal_async_comp_lock, flags);
225 state = opal_async_tokens[token].state;
226 opal_async_tokens[token].state = ASYNC_TOKEN_COMPLETED;
227 spin_unlock_irqrestore(&opal_async_comp_lock, flags);
229 if (state == ASYNC_TOKEN_ABANDONED) {
230 /* Free the token, no one else will */
231 opal_async_release_token(token);
232 return 0;
234 memcpy(&opal_async_tokens[token].response, comp_msg, sizeof(*comp_msg));
235 wake_up(&opal_async_wait);
237 return 0;
240 static struct notifier_block opal_async_comp_nb = {
241 .notifier_call = opal_async_comp_event,
242 .next = NULL,
243 .priority = 0,
246 int __init opal_async_comp_init(void)
248 struct device_node *opal_node;
249 const __be32 *async;
250 int err;
252 opal_node = of_find_node_by_path("/ibm,opal");
253 if (!opal_node) {
254 pr_err("%s: Opal node not found\n", __func__);
255 err = -ENOENT;
256 goto out;
259 async = of_get_property(opal_node, "opal-msg-async-num", NULL);
260 if (!async) {
261 pr_err("%s: %pOF has no opal-msg-async-num\n",
262 __func__, opal_node);
263 err = -ENOENT;
264 goto out_opal_node;
267 opal_max_async_tokens = be32_to_cpup(async);
268 opal_async_tokens = kcalloc(opal_max_async_tokens,
269 sizeof(*opal_async_tokens), GFP_KERNEL);
270 if (!opal_async_tokens) {
271 err = -ENOMEM;
272 goto out_opal_node;
275 err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP,
276 &opal_async_comp_nb);
277 if (err) {
278 pr_err("%s: Can't register OPAL event notifier (%d)\n",
279 __func__, err);
280 kfree(opal_async_tokens);
281 goto out_opal_node;
284 sema_init(&opal_async_sem, opal_max_async_tokens);
286 out_opal_node:
287 of_node_put(opal_node);
288 out:
289 return err;