2 * PowerNV OPAL asynchronous completion interfaces
4 * Copyright 2013-2017 IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/semaphore.h>
19 #include <linux/spinlock.h>
20 #include <linux/wait.h>
21 #include <linux/gfp.h>
23 #include <asm/machdep.h>
26 enum opal_async_token_state
{
27 ASYNC_TOKEN_UNALLOCATED
= 0,
28 ASYNC_TOKEN_ALLOCATED
,
29 ASYNC_TOKEN_DISPATCHED
,
30 ASYNC_TOKEN_ABANDONED
,
34 struct opal_async_token
{
35 enum opal_async_token_state state
;
36 struct opal_msg response
;
39 static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait
);
40 static DEFINE_SPINLOCK(opal_async_comp_lock
);
41 static struct semaphore opal_async_sem
;
42 static unsigned int opal_max_async_tokens
;
43 static struct opal_async_token
*opal_async_tokens
;
45 static int __opal_async_get_token(void)
48 int i
, token
= -EBUSY
;
50 spin_lock_irqsave(&opal_async_comp_lock
, flags
);
52 for (i
= 0; i
< opal_max_async_tokens
; i
++) {
53 if (opal_async_tokens
[i
].state
== ASYNC_TOKEN_UNALLOCATED
) {
54 opal_async_tokens
[i
].state
= ASYNC_TOKEN_ALLOCATED
;
60 spin_unlock_irqrestore(&opal_async_comp_lock
, flags
);
65 * Note: If the returned token is used in an opal call and opal returns
66 * OPAL_ASYNC_COMPLETION you MUST call one of opal_async_wait_response() or
67 * opal_async_wait_response_interruptible() at least once before calling another
68 * opal_async_* function
70 int opal_async_get_token_interruptible(void)
74 /* Wait until a token is available */
75 if (down_interruptible(&opal_async_sem
))
78 token
= __opal_async_get_token();
84 EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible
);
86 static int __opal_async_release_token(int token
)
91 if (token
< 0 || token
>= opal_max_async_tokens
) {
92 pr_err("%s: Passed token is out of range, token %d\n",
97 spin_lock_irqsave(&opal_async_comp_lock
, flags
);
98 switch (opal_async_tokens
[token
].state
) {
99 case ASYNC_TOKEN_COMPLETED
:
100 case ASYNC_TOKEN_ALLOCATED
:
101 opal_async_tokens
[token
].state
= ASYNC_TOKEN_UNALLOCATED
;
105 * DISPATCHED and ABANDONED tokens must wait for OPAL to respond.
106 * Mark a DISPATCHED token as ABANDONED so that the response handling
107 * code knows no one cares and that it can free it then.
109 case ASYNC_TOKEN_DISPATCHED
:
110 opal_async_tokens
[token
].state
= ASYNC_TOKEN_ABANDONED
;
115 spin_unlock_irqrestore(&opal_async_comp_lock
, flags
);
120 int opal_async_release_token(int token
)
124 ret
= __opal_async_release_token(token
);
130 EXPORT_SYMBOL_GPL(opal_async_release_token
);
132 int opal_async_wait_response(uint64_t token
, struct opal_msg
*msg
)
134 if (token
>= opal_max_async_tokens
) {
135 pr_err("%s: Invalid token passed\n", __func__
);
140 pr_err("%s: Invalid message pointer passed\n", __func__
);
145 * There is no need to mark the token as dispatched, wait_event()
146 * will block until the token completes.
148 * Wakeup the poller before we wait for events to speed things
149 * up on platforms or simulators where the interrupts aren't
153 wait_event(opal_async_wait
, opal_async_tokens
[token
].state
154 == ASYNC_TOKEN_COMPLETED
);
155 memcpy(msg
, &opal_async_tokens
[token
].response
, sizeof(*msg
));
159 EXPORT_SYMBOL_GPL(opal_async_wait_response
);
161 int opal_async_wait_response_interruptible(uint64_t token
, struct opal_msg
*msg
)
166 if (token
>= opal_max_async_tokens
) {
167 pr_err("%s: Invalid token passed\n", __func__
);
172 pr_err("%s: Invalid message pointer passed\n", __func__
);
177 * The first time this gets called we mark the token as DISPATCHED
178 * so that if wait_event_interruptible() returns not zero and the
179 * caller frees the token, we know not to actually free the token
180 * until the response comes.
182 * Only change if the token is ALLOCATED - it may have been
183 * completed even before the caller gets around to calling this
186 * There is also a dirty great comment at the token allocation
187 * function that if the opal call returns OPAL_ASYNC_COMPLETION to
188 * the caller then the caller *must* call this or the not
189 * interruptible version before doing anything else with the
192 if (opal_async_tokens
[token
].state
== ASYNC_TOKEN_ALLOCATED
) {
193 spin_lock_irqsave(&opal_async_comp_lock
, flags
);
194 if (opal_async_tokens
[token
].state
== ASYNC_TOKEN_ALLOCATED
)
195 opal_async_tokens
[token
].state
= ASYNC_TOKEN_DISPATCHED
;
196 spin_unlock_irqrestore(&opal_async_comp_lock
, flags
);
200 * Wakeup the poller before we wait for events to speed things
201 * up on platforms or simulators where the interrupts aren't
205 ret
= wait_event_interruptible(opal_async_wait
,
206 opal_async_tokens
[token
].state
==
207 ASYNC_TOKEN_COMPLETED
);
209 memcpy(msg
, &opal_async_tokens
[token
].response
, sizeof(*msg
));
213 EXPORT_SYMBOL_GPL(opal_async_wait_response_interruptible
);
215 /* Called from interrupt context */
216 static int opal_async_comp_event(struct notifier_block
*nb
,
217 unsigned long msg_type
, void *msg
)
219 struct opal_msg
*comp_msg
= msg
;
220 enum opal_async_token_state state
;
224 if (msg_type
!= OPAL_MSG_ASYNC_COMP
)
227 token
= be64_to_cpu(comp_msg
->params
[0]);
228 spin_lock_irqsave(&opal_async_comp_lock
, flags
);
229 state
= opal_async_tokens
[token
].state
;
230 opal_async_tokens
[token
].state
= ASYNC_TOKEN_COMPLETED
;
231 spin_unlock_irqrestore(&opal_async_comp_lock
, flags
);
233 if (state
== ASYNC_TOKEN_ABANDONED
) {
234 /* Free the token, no one else will */
235 opal_async_release_token(token
);
238 memcpy(&opal_async_tokens
[token
].response
, comp_msg
, sizeof(*comp_msg
));
239 wake_up(&opal_async_wait
);
244 static struct notifier_block opal_async_comp_nb
= {
245 .notifier_call
= opal_async_comp_event
,
250 int __init
opal_async_comp_init(void)
252 struct device_node
*opal_node
;
256 opal_node
= of_find_node_by_path("/ibm,opal");
258 pr_err("%s: Opal node not found\n", __func__
);
263 async
= of_get_property(opal_node
, "opal-msg-async-num", NULL
);
265 pr_err("%s: %pOF has no opal-msg-async-num\n",
266 __func__
, opal_node
);
271 opal_max_async_tokens
= be32_to_cpup(async
);
272 opal_async_tokens
= kcalloc(opal_max_async_tokens
,
273 sizeof(*opal_async_tokens
), GFP_KERNEL
);
274 if (!opal_async_tokens
) {
279 err
= opal_message_notifier_register(OPAL_MSG_ASYNC_COMP
,
280 &opal_async_comp_nb
);
282 pr_err("%s: Can't register OPAL event notifier (%d)\n",
284 kfree(opal_async_tokens
);
288 sema_init(&opal_async_sem
, opal_max_async_tokens
);
291 of_node_put(opal_node
);