1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2016, 2023
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Adjunct processor bus, queue related code.
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
19 static void __ap_flush_queue(struct ap_queue
*aq
);
22 * some AP queue helper functions
25 static inline bool ap_q_supported_in_se(struct ap_queue
*aq
)
27 return aq
->card
->hwinfo
.ep11
|| aq
->card
->hwinfo
.accel
;
30 static inline bool ap_q_supports_bind(struct ap_queue
*aq
)
32 return aq
->card
->hwinfo
.ep11
|| aq
->card
->hwinfo
.accel
;
35 static inline bool ap_q_supports_assoc(struct ap_queue
*aq
)
37 return aq
->card
->hwinfo
.ep11
;
40 static inline bool ap_q_needs_bind(struct ap_queue
*aq
)
42 return ap_q_supports_bind(aq
) && ap_sb_available();
46 * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
48 * @ind: the notification indicator byte
50 * Enables interruption on AP queue via ap_aqic(). Based on the return
51 * value it waits a while and tests the AP queue if interrupts
52 * have been switched on using ap_test_queue().
54 static int ap_queue_enable_irq(struct ap_queue
*aq
, void *ind
)
56 union ap_qirq_ctrl qirqctrl
= { .value
= 0 };
57 struct ap_queue_status status
;
60 qirqctrl
.isc
= AP_ISC
;
61 status
= ap_aqic(aq
->qid
, qirqctrl
, virt_to_phys(ind
));
64 switch (status
.response_code
) {
65 case AP_RESPONSE_NORMAL
:
66 case AP_RESPONSE_OTHERWISE_CHANGED
:
68 case AP_RESPONSE_Q_NOT_AVAIL
:
69 case AP_RESPONSE_DECONFIGURED
:
70 case AP_RESPONSE_CHECKSTOPPED
:
71 case AP_RESPONSE_INVALID_ADDRESS
:
72 pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
74 AP_QID_QUEUE(aq
->qid
));
76 case AP_RESPONSE_RESET_IN_PROGRESS
:
77 case AP_RESPONSE_BUSY
:
84 * __ap_send(): Send message to adjunct processor queue.
85 * @qid: The AP queue number
86 * @psmid: The program supplied message identifier
87 * @msg: The message text
88 * @msglen: The message length
89 * @special: Special Bit
91 * Returns AP queue status structure.
92 * Condition code 1 on NQAP can't happen because the L bit is 1.
93 * Condition code 2 on NQAP also means the send is incomplete,
94 * because a segment boundary was reached. The NQAP is repeated.
96 static inline struct ap_queue_status
97 __ap_send(ap_qid_t qid
, unsigned long psmid
, void *msg
, size_t msglen
,
102 return ap_nqap(qid
, psmid
, msg
, msglen
);
105 /* State machine definitions and helpers */
107 static enum ap_sm_wait
ap_sm_nop(struct ap_queue
*aq
)
109 return AP_SM_WAIT_NONE
;
113 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
114 * not change the state of the device.
115 * @aq: pointer to the AP queue
117 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
119 static struct ap_queue_status
ap_sm_recv(struct ap_queue
*aq
)
121 struct ap_queue_status status
;
122 struct ap_message
*ap_msg
;
125 unsigned long resgr0
= 0;
129 * DQAP loop until response code and resgr0 indicate that
130 * the msg is totally received. As we use the very same buffer
131 * the msg is overwritten with each invocation. That's intended
132 * and the receiver of the msg is informed with a msg rc code
133 * of EMSGSIZE in such a case.
136 status
= ap_dqap(aq
->qid
, &aq
->reply
->psmid
,
137 aq
->reply
->msg
, aq
->reply
->bufsize
,
138 &aq
->reply
->len
, &reslen
, &resgr0
);
140 } while (status
.response_code
== 0xFF && resgr0
!= 0);
142 switch (status
.response_code
) {
143 case AP_RESPONSE_NORMAL
:
144 print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS
, 16, 1,
145 aq
->reply
->msg
, aq
->reply
->len
, false);
146 aq
->queue_count
= max_t(int, 0, aq
->queue_count
- 1);
147 if (!status
.queue_empty
&& !aq
->queue_count
)
149 if (aq
->queue_count
> 0)
150 mod_timer(&aq
->timeout
,
151 jiffies
+ aq
->request_timeout
);
152 list_for_each_entry(ap_msg
, &aq
->pendingq
, list
) {
153 if (ap_msg
->psmid
!= aq
->reply
->psmid
)
155 list_del_init(&ap_msg
->list
);
156 aq
->pendingq_count
--;
158 ap_msg
->rc
= -EMSGSIZE
;
159 ap_msg
->receive(aq
, ap_msg
, NULL
);
161 ap_msg
->receive(aq
, ap_msg
, aq
->reply
);
167 AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
168 __func__
, aq
->reply
->psmid
,
169 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
172 case AP_RESPONSE_NO_PENDING_REPLY
:
173 if (!status
.queue_empty
|| aq
->queue_count
<= 0)
175 /* The card shouldn't forget requests but who knows. */
177 list_splice_init(&aq
->pendingq
, &aq
->requestq
);
178 aq
->requestq_count
+= aq
->pendingq_count
;
179 pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
180 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
),
181 aq
->pendingq_count
, aq
->requestq_count
);
182 aq
->pendingq_count
= 0;
191 * ap_sm_read(): Receive pending reply messages from an AP queue.
192 * @aq: pointer to the AP queue
194 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
196 static enum ap_sm_wait
ap_sm_read(struct ap_queue
*aq
)
198 struct ap_queue_status status
;
201 return AP_SM_WAIT_NONE
;
202 status
= ap_sm_recv(aq
);
204 return AP_SM_WAIT_NONE
;
205 switch (status
.response_code
) {
206 case AP_RESPONSE_NORMAL
:
207 if (aq
->queue_count
> 0) {
208 aq
->sm_state
= AP_SM_STATE_WORKING
;
209 return AP_SM_WAIT_AGAIN
;
211 aq
->sm_state
= AP_SM_STATE_IDLE
;
213 case AP_RESPONSE_NO_PENDING_REPLY
:
214 if (aq
->queue_count
> 0)
215 return status
.irq_enabled
?
216 AP_SM_WAIT_INTERRUPT
: AP_SM_WAIT_HIGH_TIMEOUT
;
217 aq
->sm_state
= AP_SM_STATE_IDLE
;
220 aq
->dev_state
= AP_DEV_STATE_ERROR
;
221 aq
->last_err_rc
= status
.response_code
;
222 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
223 __func__
, status
.response_code
,
224 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
225 return AP_SM_WAIT_NONE
;
227 /* Check and maybe enable irq support (again) on this queue */
228 if (!status
.irq_enabled
&& status
.queue_empty
) {
229 void *lsi_ptr
= ap_airq_ptr();
231 if (lsi_ptr
&& ap_queue_enable_irq(aq
, lsi_ptr
) == 0) {
232 aq
->sm_state
= AP_SM_STATE_SETIRQ_WAIT
;
233 return AP_SM_WAIT_AGAIN
;
236 return AP_SM_WAIT_NONE
;
240 * ap_sm_write(): Send messages from the request queue to an AP queue.
241 * @aq: pointer to the AP queue
243 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
245 static enum ap_sm_wait
ap_sm_write(struct ap_queue
*aq
)
247 struct ap_queue_status status
;
248 struct ap_message
*ap_msg
;
249 ap_qid_t qid
= aq
->qid
;
251 if (aq
->requestq_count
<= 0)
252 return AP_SM_WAIT_NONE
;
254 /* Start the next request on the queue. */
255 ap_msg
= list_entry(aq
->requestq
.next
, struct ap_message
, list
);
256 print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS
, 16, 1,
257 ap_msg
->msg
, ap_msg
->len
, false);
258 status
= __ap_send(qid
, ap_msg
->psmid
,
259 ap_msg
->msg
, ap_msg
->len
,
260 ap_msg
->flags
& AP_MSG_FLAG_SPECIAL
);
262 return AP_SM_WAIT_NONE
;
263 switch (status
.response_code
) {
264 case AP_RESPONSE_NORMAL
:
265 aq
->queue_count
= max_t(int, 1, aq
->queue_count
+ 1);
266 if (aq
->queue_count
== 1)
267 mod_timer(&aq
->timeout
, jiffies
+ aq
->request_timeout
);
268 list_move_tail(&ap_msg
->list
, &aq
->pendingq
);
269 aq
->requestq_count
--;
270 aq
->pendingq_count
++;
271 if (aq
->queue_count
< aq
->card
->hwinfo
.qd
) {
272 aq
->sm_state
= AP_SM_STATE_WORKING
;
273 return AP_SM_WAIT_AGAIN
;
276 case AP_RESPONSE_Q_FULL
:
277 aq
->sm_state
= AP_SM_STATE_QUEUE_FULL
;
278 return status
.irq_enabled
?
279 AP_SM_WAIT_INTERRUPT
: AP_SM_WAIT_HIGH_TIMEOUT
;
280 case AP_RESPONSE_RESET_IN_PROGRESS
:
281 aq
->sm_state
= AP_SM_STATE_RESET_WAIT
;
282 return AP_SM_WAIT_LOW_TIMEOUT
;
283 case AP_RESPONSE_INVALID_DOMAIN
:
284 AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__
);
286 case AP_RESPONSE_MESSAGE_TOO_BIG
:
287 case AP_RESPONSE_REQ_FAC_NOT_INST
:
288 list_del_init(&ap_msg
->list
);
289 aq
->requestq_count
--;
290 ap_msg
->rc
= -EINVAL
;
291 ap_msg
->receive(aq
, ap_msg
, NULL
);
292 return AP_SM_WAIT_AGAIN
;
294 aq
->dev_state
= AP_DEV_STATE_ERROR
;
295 aq
->last_err_rc
= status
.response_code
;
296 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
297 __func__
, status
.response_code
,
298 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
299 return AP_SM_WAIT_NONE
;
304 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
305 * @aq: pointer to the AP queue
307 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
309 static enum ap_sm_wait
ap_sm_read_write(struct ap_queue
*aq
)
311 return min(ap_sm_read(aq
), ap_sm_write(aq
));
315 * ap_sm_reset(): Reset an AP queue.
318 * Submit the Reset command to an AP queue.
320 static enum ap_sm_wait
ap_sm_reset(struct ap_queue
*aq
)
322 struct ap_queue_status status
;
324 status
= ap_rapq(aq
->qid
, aq
->rapq_fbit
);
326 return AP_SM_WAIT_NONE
;
327 switch (status
.response_code
) {
328 case AP_RESPONSE_NORMAL
:
329 case AP_RESPONSE_RESET_IN_PROGRESS
:
330 aq
->sm_state
= AP_SM_STATE_RESET_WAIT
;
332 return AP_SM_WAIT_LOW_TIMEOUT
;
334 aq
->dev_state
= AP_DEV_STATE_ERROR
;
335 aq
->last_err_rc
= status
.response_code
;
336 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
337 __func__
, status
.response_code
,
338 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
339 return AP_SM_WAIT_NONE
;
344 * ap_sm_reset_wait(): Test queue for completion of the reset operation
345 * @aq: pointer to the AP queue
347 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
349 static enum ap_sm_wait
ap_sm_reset_wait(struct ap_queue
*aq
)
351 struct ap_queue_status status
;
352 struct ap_tapq_hwinfo hwinfo
;
355 /* Get the status with TAPQ */
356 status
= ap_test_queue(aq
->qid
, 1, &hwinfo
);
358 switch (status
.response_code
) {
359 case AP_RESPONSE_NORMAL
:
360 aq
->se_bstate
= hwinfo
.bs
;
361 lsi_ptr
= ap_airq_ptr();
362 if (lsi_ptr
&& ap_queue_enable_irq(aq
, lsi_ptr
) == 0)
363 aq
->sm_state
= AP_SM_STATE_SETIRQ_WAIT
;
365 aq
->sm_state
= (aq
->queue_count
> 0) ?
366 AP_SM_STATE_WORKING
: AP_SM_STATE_IDLE
;
367 return AP_SM_WAIT_AGAIN
;
368 case AP_RESPONSE_BUSY
:
369 case AP_RESPONSE_RESET_IN_PROGRESS
:
370 return AP_SM_WAIT_LOW_TIMEOUT
;
371 case AP_RESPONSE_Q_NOT_AVAIL
:
372 case AP_RESPONSE_DECONFIGURED
:
373 case AP_RESPONSE_CHECKSTOPPED
:
375 aq
->dev_state
= AP_DEV_STATE_ERROR
;
376 aq
->last_err_rc
= status
.response_code
;
377 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
378 __func__
, status
.response_code
,
379 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
380 return AP_SM_WAIT_NONE
;
385 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
386 * @aq: pointer to the AP queue
388 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
390 static enum ap_sm_wait
ap_sm_setirq_wait(struct ap_queue
*aq
)
392 struct ap_queue_status status
;
394 if (aq
->queue_count
> 0 && aq
->reply
)
395 /* Try to read a completed message and get the status */
396 status
= ap_sm_recv(aq
);
398 /* Get the status with TAPQ */
399 status
= ap_tapq(aq
->qid
, NULL
);
401 if (status
.irq_enabled
== 1) {
402 /* Irqs are now enabled */
403 aq
->sm_state
= (aq
->queue_count
> 0) ?
404 AP_SM_STATE_WORKING
: AP_SM_STATE_IDLE
;
407 switch (status
.response_code
) {
408 case AP_RESPONSE_NORMAL
:
409 if (aq
->queue_count
> 0)
410 return AP_SM_WAIT_AGAIN
;
412 case AP_RESPONSE_NO_PENDING_REPLY
:
413 return AP_SM_WAIT_LOW_TIMEOUT
;
415 aq
->dev_state
= AP_DEV_STATE_ERROR
;
416 aq
->last_err_rc
= status
.response_code
;
417 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
418 __func__
, status
.response_code
,
419 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
420 return AP_SM_WAIT_NONE
;
425 * ap_sm_assoc_wait(): Test queue for completion of a pending
426 * association request.
427 * @aq: pointer to the AP queue
429 static enum ap_sm_wait
ap_sm_assoc_wait(struct ap_queue
*aq
)
431 struct ap_queue_status status
;
432 struct ap_tapq_hwinfo hwinfo
;
434 status
= ap_test_queue(aq
->qid
, 1, &hwinfo
);
435 /* handle asynchronous error on this queue */
436 if (status
.async
&& status
.response_code
) {
437 aq
->dev_state
= AP_DEV_STATE_ERROR
;
438 aq
->last_err_rc
= status
.response_code
;
439 AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
440 __func__
, status
.response_code
,
441 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
442 return AP_SM_WAIT_NONE
;
444 if (status
.response_code
> AP_RESPONSE_BUSY
) {
445 aq
->dev_state
= AP_DEV_STATE_ERROR
;
446 aq
->last_err_rc
= status
.response_code
;
447 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
448 __func__
, status
.response_code
,
449 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
450 return AP_SM_WAIT_NONE
;
453 /* update queue's SE bind state */
454 aq
->se_bstate
= hwinfo
.bs
;
459 /* association is through */
460 aq
->sm_state
= AP_SM_STATE_IDLE
;
461 pr_debug("queue 0x%02x.%04x associated with %u\n",
462 AP_QID_CARD(aq
->qid
),
463 AP_QID_QUEUE(aq
->qid
), aq
->assoc_idx
);
464 return AP_SM_WAIT_NONE
;
465 case AP_BS_Q_USABLE_NO_SECURE_KEY
:
466 /* association still pending */
467 return AP_SM_WAIT_LOW_TIMEOUT
;
469 /* reset from 'outside' happened or no idea at all */
470 aq
->assoc_idx
= ASSOC_IDX_INVALID
;
471 aq
->dev_state
= AP_DEV_STATE_ERROR
;
472 aq
->last_err_rc
= status
.response_code
;
473 AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
475 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
476 return AP_SM_WAIT_NONE
;
481 * AP state machine jump table
483 static ap_func_t
*ap_jumptable
[NR_AP_SM_STATES
][NR_AP_SM_EVENTS
] = {
484 [AP_SM_STATE_RESET_START
] = {
485 [AP_SM_EVENT_POLL
] = ap_sm_reset
,
486 [AP_SM_EVENT_TIMEOUT
] = ap_sm_nop
,
488 [AP_SM_STATE_RESET_WAIT
] = {
489 [AP_SM_EVENT_POLL
] = ap_sm_reset_wait
,
490 [AP_SM_EVENT_TIMEOUT
] = ap_sm_nop
,
492 [AP_SM_STATE_SETIRQ_WAIT
] = {
493 [AP_SM_EVENT_POLL
] = ap_sm_setirq_wait
,
494 [AP_SM_EVENT_TIMEOUT
] = ap_sm_nop
,
496 [AP_SM_STATE_IDLE
] = {
497 [AP_SM_EVENT_POLL
] = ap_sm_write
,
498 [AP_SM_EVENT_TIMEOUT
] = ap_sm_nop
,
500 [AP_SM_STATE_WORKING
] = {
501 [AP_SM_EVENT_POLL
] = ap_sm_read_write
,
502 [AP_SM_EVENT_TIMEOUT
] = ap_sm_reset
,
504 [AP_SM_STATE_QUEUE_FULL
] = {
505 [AP_SM_EVENT_POLL
] = ap_sm_read
,
506 [AP_SM_EVENT_TIMEOUT
] = ap_sm_reset
,
508 [AP_SM_STATE_ASSOC_WAIT
] = {
509 [AP_SM_EVENT_POLL
] = ap_sm_assoc_wait
,
510 [AP_SM_EVENT_TIMEOUT
] = ap_sm_reset
,
514 enum ap_sm_wait
ap_sm_event(struct ap_queue
*aq
, enum ap_sm_event event
)
516 if (aq
->config
&& !aq
->chkstop
&&
517 aq
->dev_state
> AP_DEV_STATE_UNINITIATED
)
518 return ap_jumptable
[aq
->sm_state
][event
](aq
);
520 return AP_SM_WAIT_NONE
;
523 enum ap_sm_wait
ap_sm_event_loop(struct ap_queue
*aq
, enum ap_sm_event event
)
525 enum ap_sm_wait wait
;
527 while ((wait
= ap_sm_event(aq
, event
)) == AP_SM_WAIT_AGAIN
)
533 * AP queue related attributes.
535 static ssize_t
request_count_show(struct device
*dev
,
536 struct device_attribute
*attr
,
539 struct ap_queue
*aq
= to_ap_queue(dev
);
543 spin_lock_bh(&aq
->lock
);
544 if (aq
->dev_state
> AP_DEV_STATE_UNINITIATED
) {
545 req_cnt
= aq
->total_request_count
;
548 spin_unlock_bh(&aq
->lock
);
551 return sysfs_emit(buf
, "%llu\n", req_cnt
);
553 return sysfs_emit(buf
, "-\n");
556 static ssize_t
request_count_store(struct device
*dev
,
557 struct device_attribute
*attr
,
558 const char *buf
, size_t count
)
560 struct ap_queue
*aq
= to_ap_queue(dev
);
562 spin_lock_bh(&aq
->lock
);
563 aq
->total_request_count
= 0;
564 spin_unlock_bh(&aq
->lock
);
569 static DEVICE_ATTR_RW(request_count
);
571 static ssize_t
requestq_count_show(struct device
*dev
,
572 struct device_attribute
*attr
, char *buf
)
574 struct ap_queue
*aq
= to_ap_queue(dev
);
575 unsigned int reqq_cnt
= 0;
577 spin_lock_bh(&aq
->lock
);
578 if (aq
->dev_state
> AP_DEV_STATE_UNINITIATED
)
579 reqq_cnt
= aq
->requestq_count
;
580 spin_unlock_bh(&aq
->lock
);
581 return sysfs_emit(buf
, "%d\n", reqq_cnt
);
584 static DEVICE_ATTR_RO(requestq_count
);
586 static ssize_t
pendingq_count_show(struct device
*dev
,
587 struct device_attribute
*attr
, char *buf
)
589 struct ap_queue
*aq
= to_ap_queue(dev
);
590 unsigned int penq_cnt
= 0;
592 spin_lock_bh(&aq
->lock
);
593 if (aq
->dev_state
> AP_DEV_STATE_UNINITIATED
)
594 penq_cnt
= aq
->pendingq_count
;
595 spin_unlock_bh(&aq
->lock
);
596 return sysfs_emit(buf
, "%d\n", penq_cnt
);
599 static DEVICE_ATTR_RO(pendingq_count
);
601 static ssize_t
reset_show(struct device
*dev
,
602 struct device_attribute
*attr
, char *buf
)
604 struct ap_queue
*aq
= to_ap_queue(dev
);
607 spin_lock_bh(&aq
->lock
);
608 switch (aq
->sm_state
) {
609 case AP_SM_STATE_RESET_START
:
610 case AP_SM_STATE_RESET_WAIT
:
611 rc
= sysfs_emit(buf
, "Reset in progress.\n");
613 case AP_SM_STATE_WORKING
:
614 case AP_SM_STATE_QUEUE_FULL
:
615 rc
= sysfs_emit(buf
, "Reset Timer armed.\n");
618 rc
= sysfs_emit(buf
, "No Reset Timer set.\n");
620 spin_unlock_bh(&aq
->lock
);
624 static ssize_t
reset_store(struct device
*dev
,
625 struct device_attribute
*attr
,
626 const char *buf
, size_t count
)
628 struct ap_queue
*aq
= to_ap_queue(dev
);
630 spin_lock_bh(&aq
->lock
);
631 __ap_flush_queue(aq
);
632 aq
->sm_state
= AP_SM_STATE_RESET_START
;
633 ap_wait(ap_sm_event(aq
, AP_SM_EVENT_POLL
));
634 spin_unlock_bh(&aq
->lock
);
636 AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
637 __func__
, AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
642 static DEVICE_ATTR_RW(reset
);
644 static ssize_t
interrupt_show(struct device
*dev
,
645 struct device_attribute
*attr
, char *buf
)
647 struct ap_queue
*aq
= to_ap_queue(dev
);
648 struct ap_queue_status status
;
651 spin_lock_bh(&aq
->lock
);
652 if (aq
->sm_state
== AP_SM_STATE_SETIRQ_WAIT
) {
653 rc
= sysfs_emit(buf
, "Enable Interrupt pending.\n");
655 status
= ap_tapq(aq
->qid
, NULL
);
656 if (status
.irq_enabled
)
657 rc
= sysfs_emit(buf
, "Interrupts enabled.\n");
659 rc
= sysfs_emit(buf
, "Interrupts disabled.\n");
661 spin_unlock_bh(&aq
->lock
);
666 static DEVICE_ATTR_RO(interrupt
);
668 static ssize_t
config_show(struct device
*dev
,
669 struct device_attribute
*attr
, char *buf
)
671 struct ap_queue
*aq
= to_ap_queue(dev
);
674 spin_lock_bh(&aq
->lock
);
675 rc
= sysfs_emit(buf
, "%d\n", aq
->config
? 1 : 0);
676 spin_unlock_bh(&aq
->lock
);
680 static DEVICE_ATTR_RO(config
);
682 static ssize_t
chkstop_show(struct device
*dev
,
683 struct device_attribute
*attr
, char *buf
)
685 struct ap_queue
*aq
= to_ap_queue(dev
);
688 spin_lock_bh(&aq
->lock
);
689 rc
= sysfs_emit(buf
, "%d\n", aq
->chkstop
? 1 : 0);
690 spin_unlock_bh(&aq
->lock
);
694 static DEVICE_ATTR_RO(chkstop
);
696 static ssize_t
ap_functions_show(struct device
*dev
,
697 struct device_attribute
*attr
, char *buf
)
699 struct ap_queue
*aq
= to_ap_queue(dev
);
700 struct ap_queue_status status
;
701 struct ap_tapq_hwinfo hwinfo
;
703 status
= ap_test_queue(aq
->qid
, 1, &hwinfo
);
704 if (status
.response_code
> AP_RESPONSE_BUSY
) {
705 pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
706 status
.response_code
,
707 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
711 return sysfs_emit(buf
, "0x%08X\n", hwinfo
.fac
);
714 static DEVICE_ATTR_RO(ap_functions
);
716 #ifdef CONFIG_AP_DEBUG
717 static ssize_t
states_show(struct device
*dev
,
718 struct device_attribute
*attr
, char *buf
)
720 struct ap_queue
*aq
= to_ap_queue(dev
);
723 spin_lock_bh(&aq
->lock
);
724 /* queue device state */
725 switch (aq
->dev_state
) {
726 case AP_DEV_STATE_UNINITIATED
:
727 rc
= sysfs_emit(buf
, "UNINITIATED\n");
729 case AP_DEV_STATE_OPERATING
:
730 rc
= sysfs_emit(buf
, "OPERATING");
732 case AP_DEV_STATE_SHUTDOWN
:
733 rc
= sysfs_emit(buf
, "SHUTDOWN");
735 case AP_DEV_STATE_ERROR
:
736 rc
= sysfs_emit(buf
, "ERROR");
739 rc
= sysfs_emit(buf
, "UNKNOWN");
741 /* state machine state */
743 switch (aq
->sm_state
) {
744 case AP_SM_STATE_RESET_START
:
745 rc
+= sysfs_emit_at(buf
, rc
, " [RESET_START]\n");
747 case AP_SM_STATE_RESET_WAIT
:
748 rc
+= sysfs_emit_at(buf
, rc
, " [RESET_WAIT]\n");
750 case AP_SM_STATE_SETIRQ_WAIT
:
751 rc
+= sysfs_emit_at(buf
, rc
, " [SETIRQ_WAIT]\n");
753 case AP_SM_STATE_IDLE
:
754 rc
+= sysfs_emit_at(buf
, rc
, " [IDLE]\n");
756 case AP_SM_STATE_WORKING
:
757 rc
+= sysfs_emit_at(buf
, rc
, " [WORKING]\n");
759 case AP_SM_STATE_QUEUE_FULL
:
760 rc
+= sysfs_emit_at(buf
, rc
, " [FULL]\n");
762 case AP_SM_STATE_ASSOC_WAIT
:
763 rc
+= sysfs_emit_at(buf
, rc
, " [ASSOC_WAIT]\n");
766 rc
+= sysfs_emit_at(buf
, rc
, " [UNKNOWN]\n");
769 spin_unlock_bh(&aq
->lock
);
773 static DEVICE_ATTR_RO(states
);
775 static ssize_t
last_err_rc_show(struct device
*dev
,
776 struct device_attribute
*attr
, char *buf
)
778 struct ap_queue
*aq
= to_ap_queue(dev
);
781 spin_lock_bh(&aq
->lock
);
782 rc
= aq
->last_err_rc
;
783 spin_unlock_bh(&aq
->lock
);
786 case AP_RESPONSE_NORMAL
:
787 return sysfs_emit(buf
, "NORMAL\n");
788 case AP_RESPONSE_Q_NOT_AVAIL
:
789 return sysfs_emit(buf
, "Q_NOT_AVAIL\n");
790 case AP_RESPONSE_RESET_IN_PROGRESS
:
791 return sysfs_emit(buf
, "RESET_IN_PROGRESS\n");
792 case AP_RESPONSE_DECONFIGURED
:
793 return sysfs_emit(buf
, "DECONFIGURED\n");
794 case AP_RESPONSE_CHECKSTOPPED
:
795 return sysfs_emit(buf
, "CHECKSTOPPED\n");
796 case AP_RESPONSE_BUSY
:
797 return sysfs_emit(buf
, "BUSY\n");
798 case AP_RESPONSE_INVALID_ADDRESS
:
799 return sysfs_emit(buf
, "INVALID_ADDRESS\n");
800 case AP_RESPONSE_OTHERWISE_CHANGED
:
801 return sysfs_emit(buf
, "OTHERWISE_CHANGED\n");
802 case AP_RESPONSE_Q_FULL
:
803 return sysfs_emit(buf
, "Q_FULL/NO_PENDING_REPLY\n");
804 case AP_RESPONSE_INDEX_TOO_BIG
:
805 return sysfs_emit(buf
, "INDEX_TOO_BIG\n");
806 case AP_RESPONSE_NO_FIRST_PART
:
807 return sysfs_emit(buf
, "NO_FIRST_PART\n");
808 case AP_RESPONSE_MESSAGE_TOO_BIG
:
809 return sysfs_emit(buf
, "MESSAGE_TOO_BIG\n");
810 case AP_RESPONSE_REQ_FAC_NOT_INST
:
811 return sysfs_emit(buf
, "REQ_FAC_NOT_INST\n");
813 return sysfs_emit(buf
, "response code %d\n", rc
);
816 static DEVICE_ATTR_RO(last_err_rc
);
819 static struct attribute
*ap_queue_dev_attrs
[] = {
820 &dev_attr_request_count
.attr
,
821 &dev_attr_requestq_count
.attr
,
822 &dev_attr_pendingq_count
.attr
,
823 &dev_attr_reset
.attr
,
824 &dev_attr_interrupt
.attr
,
825 &dev_attr_config
.attr
,
826 &dev_attr_chkstop
.attr
,
827 &dev_attr_ap_functions
.attr
,
828 #ifdef CONFIG_AP_DEBUG
829 &dev_attr_states
.attr
,
830 &dev_attr_last_err_rc
.attr
,
835 static struct attribute_group ap_queue_dev_attr_group
= {
836 .attrs
= ap_queue_dev_attrs
839 static const struct attribute_group
*ap_queue_dev_attr_groups
[] = {
840 &ap_queue_dev_attr_group
,
844 static struct device_type ap_queue_type
= {
846 .groups
= ap_queue_dev_attr_groups
,
849 static ssize_t
se_bind_show(struct device
*dev
,
850 struct device_attribute
*attr
, char *buf
)
852 struct ap_queue
*aq
= to_ap_queue(dev
);
853 struct ap_queue_status status
;
854 struct ap_tapq_hwinfo hwinfo
;
856 if (!ap_q_supports_bind(aq
))
857 return sysfs_emit(buf
, "-\n");
859 status
= ap_test_queue(aq
->qid
, 1, &hwinfo
);
860 if (status
.response_code
> AP_RESPONSE_BUSY
) {
861 pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
862 status
.response_code
,
863 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
867 /* update queue's SE bind state */
868 spin_lock_bh(&aq
->lock
);
869 aq
->se_bstate
= hwinfo
.bs
;
870 spin_unlock_bh(&aq
->lock
);
874 case AP_BS_Q_USABLE_NO_SECURE_KEY
:
875 return sysfs_emit(buf
, "bound\n");
877 return sysfs_emit(buf
, "unbound\n");
881 static ssize_t
se_bind_store(struct device
*dev
,
882 struct device_attribute
*attr
,
883 const char *buf
, size_t count
)
885 struct ap_queue
*aq
= to_ap_queue(dev
);
886 struct ap_queue_status status
;
887 struct ap_tapq_hwinfo hwinfo
;
891 if (!ap_q_supports_bind(aq
))
894 /* only 0 (unbind) and 1 (bind) allowed */
895 rc
= kstrtobool(buf
, &value
);
900 /* Unbind. Set F bit arg and trigger RAPQ */
901 spin_lock_bh(&aq
->lock
);
902 __ap_flush_queue(aq
);
904 _ap_queue_init_state(aq
);
909 /* Bind. Check current SE bind state */
910 status
= ap_test_queue(aq
->qid
, 1, &hwinfo
);
911 if (status
.response_code
) {
912 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
913 __func__
, status
.response_code
,
914 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
918 /* Update BS state */
919 spin_lock_bh(&aq
->lock
);
920 aq
->se_bstate
= hwinfo
.bs
;
921 if (hwinfo
.bs
!= AP_BS_Q_AVAIL_FOR_BINDING
) {
922 AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
924 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
930 if (aq
->sm_state
< AP_SM_STATE_IDLE
) {
936 status
= ap_bapq(aq
->qid
);
937 if (status
.response_code
) {
938 AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
939 __func__
, status
.response_code
,
940 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
944 aq
->assoc_idx
= ASSOC_IDX_INVALID
;
946 /* verify SE bind state */
947 status
= ap_test_queue(aq
->qid
, 1, &hwinfo
);
948 if (status
.response_code
) {
949 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
950 __func__
, status
.response_code
,
951 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
955 aq
->se_bstate
= hwinfo
.bs
;
956 if (!(hwinfo
.bs
== AP_BS_Q_USABLE
||
957 hwinfo
.bs
== AP_BS_Q_USABLE_NO_SECURE_KEY
)) {
958 AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
960 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
965 /* SE bind was successful */
966 AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__
,
967 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
971 spin_unlock_bh(&aq
->lock
);
975 static DEVICE_ATTR_RW(se_bind
);
977 static ssize_t
se_associate_show(struct device
*dev
,
978 struct device_attribute
*attr
, char *buf
)
980 struct ap_queue
*aq
= to_ap_queue(dev
);
981 struct ap_queue_status status
;
982 struct ap_tapq_hwinfo hwinfo
;
984 if (!ap_q_supports_assoc(aq
))
985 return sysfs_emit(buf
, "-\n");
987 status
= ap_test_queue(aq
->qid
, 1, &hwinfo
);
988 if (status
.response_code
> AP_RESPONSE_BUSY
) {
989 pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
990 status
.response_code
,
991 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
995 /* update queue's SE bind state */
996 spin_lock_bh(&aq
->lock
);
997 aq
->se_bstate
= hwinfo
.bs
;
998 spin_unlock_bh(&aq
->lock
);
1000 switch (hwinfo
.bs
) {
1001 case AP_BS_Q_USABLE
:
1002 if (aq
->assoc_idx
== ASSOC_IDX_INVALID
) {
1003 AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__
);
1006 return sysfs_emit(buf
, "associated %u\n", aq
->assoc_idx
);
1007 case AP_BS_Q_USABLE_NO_SECURE_KEY
:
1008 if (aq
->assoc_idx
!= ASSOC_IDX_INVALID
)
1009 return sysfs_emit(buf
, "association pending\n");
1012 return sysfs_emit(buf
, "unassociated\n");
1016 static ssize_t
se_associate_store(struct device
*dev
,
1017 struct device_attribute
*attr
,
1018 const char *buf
, size_t count
)
1020 struct ap_queue
*aq
= to_ap_queue(dev
);
1021 struct ap_queue_status status
;
1022 struct ap_tapq_hwinfo hwinfo
;
1026 if (!ap_q_supports_assoc(aq
))
1029 /* association index needs to be >= 0 */
1030 rc
= kstrtouint(buf
, 0, &value
);
1033 if (value
>= ASSOC_IDX_INVALID
)
1036 /* check current SE bind state */
1037 status
= ap_test_queue(aq
->qid
, 1, &hwinfo
);
1038 if (status
.response_code
) {
1039 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1040 __func__
, status
.response_code
,
1041 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
1044 spin_lock_bh(&aq
->lock
);
1045 aq
->se_bstate
= hwinfo
.bs
;
1046 if (hwinfo
.bs
!= AP_BS_Q_USABLE_NO_SECURE_KEY
) {
1047 AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
1048 __func__
, hwinfo
.bs
,
1049 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
1054 /* check SM state */
1055 if (aq
->sm_state
!= AP_SM_STATE_IDLE
) {
1060 /* trigger the asynchronous association request */
1061 status
= ap_aapq(aq
->qid
, value
);
1062 switch (status
.response_code
) {
1063 case AP_RESPONSE_NORMAL
:
1064 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS
:
1065 aq
->sm_state
= AP_SM_STATE_ASSOC_WAIT
;
1066 aq
->assoc_idx
= value
;
1067 ap_wait(ap_sm_event(aq
, AP_SM_EVENT_POLL
));
1070 AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1071 __func__
, status
.response_code
,
1072 AP_QID_CARD(aq
->qid
), AP_QID_QUEUE(aq
->qid
));
1080 spin_unlock_bh(&aq
->lock
);
1084 static DEVICE_ATTR_RW(se_associate
);
1086 static struct attribute
*ap_queue_dev_sb_attrs
[] = {
1087 &dev_attr_se_bind
.attr
,
1088 &dev_attr_se_associate
.attr
,
1092 static struct attribute_group ap_queue_dev_sb_attr_group
= {
1093 .attrs
= ap_queue_dev_sb_attrs
1096 static const struct attribute_group
*ap_queue_dev_sb_attr_groups
[] = {
1097 &ap_queue_dev_sb_attr_group
,
1101 static void ap_queue_device_release(struct device
*dev
)
1103 struct ap_queue
*aq
= to_ap_queue(dev
);
1105 spin_lock_bh(&ap_queues_lock
);
1106 hash_del(&aq
->hnode
);
1107 spin_unlock_bh(&ap_queues_lock
);
1112 struct ap_queue
*ap_queue_create(ap_qid_t qid
, struct ap_card
*ac
)
1114 struct ap_queue
*aq
;
1116 aq
= kzalloc(sizeof(*aq
), GFP_KERNEL
);
1120 aq
->ap_dev
.device
.release
= ap_queue_device_release
;
1121 aq
->ap_dev
.device
.type
= &ap_queue_type
;
1122 aq
->ap_dev
.device_type
= ac
->ap_dev
.device_type
;
1123 /* in SE environment add bind/associate attributes group */
1124 if (ap_is_se_guest() && ap_q_supported_in_se(aq
))
1125 aq
->ap_dev
.device
.groups
= ap_queue_dev_sb_attr_groups
;
1127 spin_lock_init(&aq
->lock
);
1128 INIT_LIST_HEAD(&aq
->pendingq
);
1129 INIT_LIST_HEAD(&aq
->requestq
);
1130 timer_setup(&aq
->timeout
, ap_request_timeout
, 0);
1135 void ap_queue_init_reply(struct ap_queue
*aq
, struct ap_message
*reply
)
1139 spin_lock_bh(&aq
->lock
);
1140 ap_wait(ap_sm_event(aq
, AP_SM_EVENT_POLL
));
1141 spin_unlock_bh(&aq
->lock
);
1143 EXPORT_SYMBOL(ap_queue_init_reply
);
1146 * ap_queue_message(): Queue a request to an AP device.
1147 * @aq: The AP device to queue the message to
1148 * @ap_msg: The message that is to be added
1150 int ap_queue_message(struct ap_queue
*aq
, struct ap_message
*ap_msg
)
1154 /* msg needs to have a valid receive-callback */
1155 BUG_ON(!ap_msg
->receive
);
1157 spin_lock_bh(&aq
->lock
);
1159 /* only allow to queue new messages if device state is ok */
1160 if (aq
->dev_state
== AP_DEV_STATE_OPERATING
) {
1161 list_add_tail(&ap_msg
->list
, &aq
->requestq
);
1162 aq
->requestq_count
++;
1163 aq
->total_request_count
++;
1164 atomic64_inc(&aq
->card
->total_request_count
);
1169 /* Send/receive as many request from the queue as possible. */
1170 ap_wait(ap_sm_event_loop(aq
, AP_SM_EVENT_POLL
));
1172 spin_unlock_bh(&aq
->lock
);
1176 EXPORT_SYMBOL(ap_queue_message
);
1179 * ap_queue_usable(): Check if queue is usable just now.
1180 * @aq: The AP queue device to test for usability.
1181 * This function is intended for the scheduler to query if it makes
1182 * sense to enqueue a message into this AP queue device by calling
1183 * ap_queue_message(). The perspective is very short-term as the
1184 * state machine and device state(s) may change at any time.
1186 bool ap_queue_usable(struct ap_queue
*aq
)
1190 spin_lock_bh(&aq
->lock
);
1192 /* check for not configured or checkstopped */
1193 if (!aq
->config
|| aq
->chkstop
) {
1195 goto unlock_and_out
;
1198 /* device state needs to be ok */
1199 if (aq
->dev_state
!= AP_DEV_STATE_OPERATING
) {
1201 goto unlock_and_out
;
1204 /* SE guest's queues additionally need to be bound */
1205 if (ap_is_se_guest()) {
1206 if (!ap_q_supported_in_se(aq
)) {
1208 goto unlock_and_out
;
1210 if (ap_q_needs_bind(aq
) &&
1211 !(aq
->se_bstate
== AP_BS_Q_USABLE
||
1212 aq
->se_bstate
== AP_BS_Q_USABLE_NO_SECURE_KEY
))
1217 spin_unlock_bh(&aq
->lock
);
1220 EXPORT_SYMBOL(ap_queue_usable
);
1223 * ap_cancel_message(): Cancel a crypto request.
1224 * @aq: The AP device that has the message queued
1225 * @ap_msg: The message that is to be removed
1227 * Cancel a crypto request. This is done by removing the request
1228 * from the device pending or request queue. Note that the
1229 * request stays on the AP queue. When it finishes the message
1230 * reply will be discarded because the psmid can't be found.
1232 void ap_cancel_message(struct ap_queue
*aq
, struct ap_message
*ap_msg
)
1234 struct ap_message
*tmp
;
1236 spin_lock_bh(&aq
->lock
);
1237 if (!list_empty(&ap_msg
->list
)) {
1238 list_for_each_entry(tmp
, &aq
->pendingq
, list
)
1239 if (tmp
->psmid
== ap_msg
->psmid
) {
1240 aq
->pendingq_count
--;
1243 aq
->requestq_count
--;
1245 list_del_init(&ap_msg
->list
);
1247 spin_unlock_bh(&aq
->lock
);
1249 EXPORT_SYMBOL(ap_cancel_message
);
1252 * __ap_flush_queue(): Flush requests.
1253 * @aq: Pointer to the AP queue
1255 * Flush all requests from the request/pending queue of an AP device.
1257 static void __ap_flush_queue(struct ap_queue
*aq
)
1259 struct ap_message
*ap_msg
, *next
;
1261 list_for_each_entry_safe(ap_msg
, next
, &aq
->pendingq
, list
) {
1262 list_del_init(&ap_msg
->list
);
1263 aq
->pendingq_count
--;
1264 ap_msg
->rc
= -EAGAIN
;
1265 ap_msg
->receive(aq
, ap_msg
, NULL
);
1267 list_for_each_entry_safe(ap_msg
, next
, &aq
->requestq
, list
) {
1268 list_del_init(&ap_msg
->list
);
1269 aq
->requestq_count
--;
1270 ap_msg
->rc
= -EAGAIN
;
1271 ap_msg
->receive(aq
, ap_msg
, NULL
);
1273 aq
->queue_count
= 0;
1276 void ap_flush_queue(struct ap_queue
*aq
)
1278 spin_lock_bh(&aq
->lock
);
1279 __ap_flush_queue(aq
);
1280 spin_unlock_bh(&aq
->lock
);
1282 EXPORT_SYMBOL(ap_flush_queue
);
1284 void ap_queue_prepare_remove(struct ap_queue
*aq
)
1286 spin_lock_bh(&aq
->lock
);
1288 __ap_flush_queue(aq
);
1289 /* move queue device state to SHUTDOWN in progress */
1290 aq
->dev_state
= AP_DEV_STATE_SHUTDOWN
;
1291 spin_unlock_bh(&aq
->lock
);
1292 del_timer_sync(&aq
->timeout
);
1295 void ap_queue_remove(struct ap_queue
*aq
)
1298 * all messages have been flushed and the device state
1299 * is SHUTDOWN. Now reset with zero which also clears
1300 * the irq registration and move the device state
1301 * to the initial value AP_DEV_STATE_UNINITIATED.
1303 spin_lock_bh(&aq
->lock
);
1304 ap_zapq(aq
->qid
, 0);
1305 aq
->dev_state
= AP_DEV_STATE_UNINITIATED
;
1306 spin_unlock_bh(&aq
->lock
);
1309 void _ap_queue_init_state(struct ap_queue
*aq
)
1311 aq
->dev_state
= AP_DEV_STATE_OPERATING
;
1312 aq
->sm_state
= AP_SM_STATE_RESET_START
;
1313 aq
->last_err_rc
= 0;
1314 aq
->assoc_idx
= ASSOC_IDX_INVALID
;
1315 ap_wait(ap_sm_event(aq
, AP_SM_EVENT_POLL
));
1318 void ap_queue_init_state(struct ap_queue
*aq
)
1320 spin_lock_bh(&aq
->lock
);
1321 _ap_queue_init_state(aq
);
1322 spin_unlock_bh(&aq
->lock
);
1324 EXPORT_SYMBOL(ap_queue_init_state
);