1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * PowerNV OPAL asynchronous completion interfaces
5 * Copyright 2013-2017 IBM Corp.
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/semaphore.h>
15 #include <linux/spinlock.h>
16 #include <linux/wait.h>
17 #include <linux/gfp.h>
19 #include <asm/machdep.h>
22 enum opal_async_token_state
{
23 ASYNC_TOKEN_UNALLOCATED
= 0,
24 ASYNC_TOKEN_ALLOCATED
,
25 ASYNC_TOKEN_DISPATCHED
,
26 ASYNC_TOKEN_ABANDONED
,
30 struct opal_async_token
{
31 enum opal_async_token_state state
;
32 struct opal_msg response
;
35 static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait
);
36 static DEFINE_SPINLOCK(opal_async_comp_lock
);
37 static struct semaphore opal_async_sem
;
38 static unsigned int opal_max_async_tokens
;
39 static struct opal_async_token
*opal_async_tokens
;
41 static int __opal_async_get_token(void)
44 int i
, token
= -EBUSY
;
46 spin_lock_irqsave(&opal_async_comp_lock
, flags
);
48 for (i
= 0; i
< opal_max_async_tokens
; i
++) {
49 if (opal_async_tokens
[i
].state
== ASYNC_TOKEN_UNALLOCATED
) {
50 opal_async_tokens
[i
].state
= ASYNC_TOKEN_ALLOCATED
;
56 spin_unlock_irqrestore(&opal_async_comp_lock
, flags
);
61 * Note: If the returned token is used in an opal call and opal returns
62 * OPAL_ASYNC_COMPLETION you MUST call one of opal_async_wait_response() or
63 * opal_async_wait_response_interruptible() at least once before calling another
64 * opal_async_* function
66 int opal_async_get_token_interruptible(void)
70 /* Wait until a token is available */
71 if (down_interruptible(&opal_async_sem
))
74 token
= __opal_async_get_token();
80 EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible
);
82 static int __opal_async_release_token(int token
)
87 if (token
< 0 || token
>= opal_max_async_tokens
) {
88 pr_err("%s: Passed token is out of range, token %d\n",
93 spin_lock_irqsave(&opal_async_comp_lock
, flags
);
94 switch (opal_async_tokens
[token
].state
) {
95 case ASYNC_TOKEN_COMPLETED
:
96 case ASYNC_TOKEN_ALLOCATED
:
97 opal_async_tokens
[token
].state
= ASYNC_TOKEN_UNALLOCATED
;
101 * DISPATCHED and ABANDONED tokens must wait for OPAL to respond.
102 * Mark a DISPATCHED token as ABANDONED so that the response handling
103 * code knows no one cares and that it can free it then.
105 case ASYNC_TOKEN_DISPATCHED
:
106 opal_async_tokens
[token
].state
= ASYNC_TOKEN_ABANDONED
;
111 spin_unlock_irqrestore(&opal_async_comp_lock
, flags
);
116 int opal_async_release_token(int token
)
120 ret
= __opal_async_release_token(token
);
126 EXPORT_SYMBOL_GPL(opal_async_release_token
);
128 int opal_async_wait_response(uint64_t token
, struct opal_msg
*msg
)
130 if (token
>= opal_max_async_tokens
) {
131 pr_err("%s: Invalid token passed\n", __func__
);
136 pr_err("%s: Invalid message pointer passed\n", __func__
);
141 * There is no need to mark the token as dispatched, wait_event()
142 * will block until the token completes.
144 * Wakeup the poller before we wait for events to speed things
145 * up on platforms or simulators where the interrupts aren't
149 wait_event(opal_async_wait
, opal_async_tokens
[token
].state
150 == ASYNC_TOKEN_COMPLETED
);
151 memcpy(msg
, &opal_async_tokens
[token
].response
, sizeof(*msg
));
155 EXPORT_SYMBOL_GPL(opal_async_wait_response
);
157 int opal_async_wait_response_interruptible(uint64_t token
, struct opal_msg
*msg
)
162 if (token
>= opal_max_async_tokens
) {
163 pr_err("%s: Invalid token passed\n", __func__
);
168 pr_err("%s: Invalid message pointer passed\n", __func__
);
173 * The first time this gets called we mark the token as DISPATCHED
174 * so that if wait_event_interruptible() returns not zero and the
175 * caller frees the token, we know not to actually free the token
176 * until the response comes.
178 * Only change if the token is ALLOCATED - it may have been
179 * completed even before the caller gets around to calling this
182 * There is also a dirty great comment at the token allocation
183 * function that if the opal call returns OPAL_ASYNC_COMPLETION to
184 * the caller then the caller *must* call this or the not
185 * interruptible version before doing anything else with the
188 if (opal_async_tokens
[token
].state
== ASYNC_TOKEN_ALLOCATED
) {
189 spin_lock_irqsave(&opal_async_comp_lock
, flags
);
190 if (opal_async_tokens
[token
].state
== ASYNC_TOKEN_ALLOCATED
)
191 opal_async_tokens
[token
].state
= ASYNC_TOKEN_DISPATCHED
;
192 spin_unlock_irqrestore(&opal_async_comp_lock
, flags
);
196 * Wakeup the poller before we wait for events to speed things
197 * up on platforms or simulators where the interrupts aren't
201 ret
= wait_event_interruptible(opal_async_wait
,
202 opal_async_tokens
[token
].state
==
203 ASYNC_TOKEN_COMPLETED
);
205 memcpy(msg
, &opal_async_tokens
[token
].response
, sizeof(*msg
));
209 EXPORT_SYMBOL_GPL(opal_async_wait_response_interruptible
);
211 /* Called from interrupt context */
212 static int opal_async_comp_event(struct notifier_block
*nb
,
213 unsigned long msg_type
, void *msg
)
215 struct opal_msg
*comp_msg
= msg
;
216 enum opal_async_token_state state
;
220 if (msg_type
!= OPAL_MSG_ASYNC_COMP
)
223 token
= be64_to_cpu(comp_msg
->params
[0]);
224 spin_lock_irqsave(&opal_async_comp_lock
, flags
);
225 state
= opal_async_tokens
[token
].state
;
226 opal_async_tokens
[token
].state
= ASYNC_TOKEN_COMPLETED
;
227 spin_unlock_irqrestore(&opal_async_comp_lock
, flags
);
229 if (state
== ASYNC_TOKEN_ABANDONED
) {
230 /* Free the token, no one else will */
231 opal_async_release_token(token
);
234 memcpy(&opal_async_tokens
[token
].response
, comp_msg
, sizeof(*comp_msg
));
235 wake_up(&opal_async_wait
);
240 static struct notifier_block opal_async_comp_nb
= {
241 .notifier_call
= opal_async_comp_event
,
246 int __init
opal_async_comp_init(void)
248 struct device_node
*opal_node
;
252 opal_node
= of_find_node_by_path("/ibm,opal");
254 pr_err("%s: Opal node not found\n", __func__
);
259 async
= of_get_property(opal_node
, "opal-msg-async-num", NULL
);
261 pr_err("%s: %pOF has no opal-msg-async-num\n",
262 __func__
, opal_node
);
267 opal_max_async_tokens
= be32_to_cpup(async
);
268 opal_async_tokens
= kcalloc(opal_max_async_tokens
,
269 sizeof(*opal_async_tokens
), GFP_KERNEL
);
270 if (!opal_async_tokens
) {
275 err
= opal_message_notifier_register(OPAL_MSG_ASYNC_COMP
,
276 &opal_async_comp_nb
);
278 pr_err("%s: Can't register OPAL event notifier (%d)\n",
280 kfree(opal_async_tokens
);
284 sema_init(&opal_async_sem
, opal_max_async_tokens
);
287 of_node_put(opal_node
);