2 * linux/drivers/s390/crypto/ap_bus.c
4 * Copyright (C) 2006 IBM Corporation
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
9 * Adjunct processor bus.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/delay.h>
29 #include <linux/err.h>
30 #include <linux/interrupt.h>
31 #include <linux/workqueue.h>
32 #include <linux/notifier.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <asm/s390_rdev.h>
36 #include <asm/reset.h>
40 /* Some prototypes. */
41 static void ap_scan_bus(struct work_struct
*);
42 static void ap_poll_all(unsigned long);
43 static void ap_poll_timeout(unsigned long);
44 static int ap_poll_thread_start(void);
45 static void ap_poll_thread_stop(void);
46 static void ap_request_timeout(unsigned long);
51 MODULE_AUTHOR("IBM Corporation");
52 MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
53 "Copyright 2006 IBM Corporation");
54 MODULE_LICENSE("GPL");
59 int ap_domain_index
= -1; /* Adjunct Processor Domain Index */
60 module_param_named(domain
, ap_domain_index
, int, 0000);
61 MODULE_PARM_DESC(domain
, "domain index for ap devices");
62 EXPORT_SYMBOL(ap_domain_index
);
64 static int ap_thread_flag
= 1;
65 module_param_named(poll_thread
, ap_thread_flag
, int, 0000);
66 MODULE_PARM_DESC(poll_thread
, "Turn on/off poll thread, default is 1 (on).");
68 static struct device
*ap_root_device
= NULL
;
69 static DEFINE_SPINLOCK(ap_device_lock
);
70 static LIST_HEAD(ap_device_list
);
73 * Workqueue & timer for bus rescan.
75 static struct workqueue_struct
*ap_work_queue
;
76 static struct timer_list ap_config_timer
;
77 static int ap_config_time
= AP_CONFIG_TIME
;
78 static DECLARE_WORK(ap_config_work
, ap_scan_bus
);
81 * Tasklet & timer for AP request polling.
83 static struct timer_list ap_poll_timer
= TIMER_INITIALIZER(ap_poll_timeout
,0,0);
84 static DECLARE_TASKLET(ap_tasklet
, ap_poll_all
, 0);
85 static atomic_t ap_poll_requests
= ATOMIC_INIT(0);
86 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait
);
87 static struct task_struct
*ap_poll_kthread
= NULL
;
88 static DEFINE_MUTEX(ap_poll_thread_mutex
);
91 * Test if ap instructions are available.
93 * Returns 0 if the ap instructions are installed.
95 static inline int ap_instructions_available(void)
97 register unsigned long reg0
asm ("0") = AP_MKQID(0,0);
98 register unsigned long reg1
asm ("1") = -ENODEV
;
99 register unsigned long reg2
asm ("2") = 0UL;
102 " .long 0xb2af0000\n" /* PQAP(TAPQ) */
106 : "+d" (reg0
), "+d" (reg1
), "+d" (reg2
) : : "cc" );
111 * Test adjunct processor queue.
112 * @qid: the ap queue number
113 * @queue_depth: pointer to queue depth value
114 * @device_type: pointer to device type value
116 * Returns ap queue status structure.
118 static inline struct ap_queue_status
119 ap_test_queue(ap_qid_t qid
, int *queue_depth
, int *device_type
)
121 register unsigned long reg0
asm ("0") = qid
;
122 register struct ap_queue_status reg1
asm ("1");
123 register unsigned long reg2
asm ("2") = 0UL;
125 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
126 : "+d" (reg0
), "=d" (reg1
), "+d" (reg2
) : : "cc");
127 *device_type
= (int) (reg2
>> 24);
128 *queue_depth
= (int) (reg2
& 0xff);
133 * Reset adjunct processor queue.
134 * @qid: the ap queue number
136 * Returns ap queue status structure.
138 static inline struct ap_queue_status
ap_reset_queue(ap_qid_t qid
)
140 register unsigned long reg0
asm ("0") = qid
| 0x01000000UL
;
141 register struct ap_queue_status reg1
asm ("1");
142 register unsigned long reg2
asm ("2") = 0UL;
145 ".long 0xb2af0000" /* PQAP(RAPQ) */
146 : "+d" (reg0
), "=d" (reg1
), "+d" (reg2
) : : "cc");
151 * Send message to adjunct processor queue.
152 * @qid: the ap queue number
153 * @psmid: the program supplied message identifier
154 * @msg: the message text
155 * @length: the message length
157 * Returns ap queue status structure.
159 * Condition code 1 on NQAP can't happen because the L bit is 1.
161 * Condition code 2 on NQAP also means the send is incomplete,
162 * because a segment boundary was reached. The NQAP is repeated.
164 static inline struct ap_queue_status
165 __ap_send(ap_qid_t qid
, unsigned long long psmid
, void *msg
, size_t length
)
167 typedef struct { char _
[length
]; } msgblock
;
168 register unsigned long reg0
asm ("0") = qid
| 0x40000000UL
;
169 register struct ap_queue_status reg1
asm ("1");
170 register unsigned long reg2
asm ("2") = (unsigned long) msg
;
171 register unsigned long reg3
asm ("3") = (unsigned long) length
;
172 register unsigned long reg4
asm ("4") = (unsigned int) (psmid
>> 32);
173 register unsigned long reg5
asm ("5") = (unsigned int) psmid
;
176 "0: .long 0xb2ad0042\n" /* DQAP */
178 : "+d" (reg0
), "=d" (reg1
), "+d" (reg2
), "+d" (reg3
)
179 : "d" (reg4
), "d" (reg5
), "m" (*(msgblock
*) msg
)
184 int ap_send(ap_qid_t qid
, unsigned long long psmid
, void *msg
, size_t length
)
186 struct ap_queue_status status
;
188 status
= __ap_send(qid
, psmid
, msg
, length
);
189 switch (status
.response_code
) {
190 case AP_RESPONSE_NORMAL
:
192 case AP_RESPONSE_Q_FULL
:
193 case AP_RESPONSE_RESET_IN_PROGRESS
:
195 default: /* Device is gone. */
199 EXPORT_SYMBOL(ap_send
);
202 * Receive message from adjunct processor queue.
203 * @qid: the ap queue number
204 * @psmid: pointer to program supplied message identifier
205 * @msg: the message text
206 * @length: the message length
208 * Returns ap queue status structure.
210 * Condition code 1 on DQAP means the receive has taken place
211 * but only partially. The response is incomplete, hence the
214 * Condition code 2 on DQAP also means the receive is incomplete,
215 * this time because a segment boundary was reached. Again, the
218 * Note that gpr2 is used by the DQAP instruction to keep track of
219 * any 'residual' length, in case the instruction gets interrupted.
220 * Hence it gets zeroed before the instruction.
222 static inline struct ap_queue_status
223 __ap_recv(ap_qid_t qid
, unsigned long long *psmid
, void *msg
, size_t length
)
225 typedef struct { char _
[length
]; } msgblock
;
226 register unsigned long reg0
asm("0") = qid
| 0x80000000UL
;
227 register struct ap_queue_status reg1
asm ("1");
228 register unsigned long reg2
asm("2") = 0UL;
229 register unsigned long reg4
asm("4") = (unsigned long) msg
;
230 register unsigned long reg5
asm("5") = (unsigned long) length
;
231 register unsigned long reg6
asm("6") = 0UL;
232 register unsigned long reg7
asm("7") = 0UL;
236 "0: .long 0xb2ae0064\n"
238 : "+d" (reg0
), "=d" (reg1
), "+d" (reg2
),
239 "+d" (reg4
), "+d" (reg5
), "+d" (reg6
), "+d" (reg7
),
240 "=m" (*(msgblock
*) msg
) : : "cc" );
241 *psmid
= (((unsigned long long) reg6
) << 32) + reg7
;
245 int ap_recv(ap_qid_t qid
, unsigned long long *psmid
, void *msg
, size_t length
)
247 struct ap_queue_status status
;
249 status
= __ap_recv(qid
, psmid
, msg
, length
);
250 switch (status
.response_code
) {
251 case AP_RESPONSE_NORMAL
:
253 case AP_RESPONSE_NO_PENDING_REPLY
:
254 if (status
.queue_empty
)
257 case AP_RESPONSE_RESET_IN_PROGRESS
:
263 EXPORT_SYMBOL(ap_recv
);
266 * Check if an AP queue is available. The test is repeated for
267 * AP_MAX_RESET times.
268 * @qid: the ap queue number
269 * @queue_depth: pointer to queue depth value
270 * @device_type: pointer to device type value
272 static int ap_query_queue(ap_qid_t qid
, int *queue_depth
, int *device_type
)
274 struct ap_queue_status status
;
275 int t_depth
, t_device_type
, rc
, i
;
278 for (i
= 0; i
< AP_MAX_RESET
; i
++) {
279 status
= ap_test_queue(qid
, &t_depth
, &t_device_type
);
280 switch (status
.response_code
) {
281 case AP_RESPONSE_NORMAL
:
282 *queue_depth
= t_depth
+ 1;
283 *device_type
= t_device_type
;
286 case AP_RESPONSE_Q_NOT_AVAIL
:
289 case AP_RESPONSE_RESET_IN_PROGRESS
:
291 case AP_RESPONSE_DECONFIGURED
:
294 case AP_RESPONSE_CHECKSTOPPED
:
297 case AP_RESPONSE_BUSY
:
304 if (i
< AP_MAX_RESET
- 1)
311 * Reset an AP queue and wait for it to become available again.
312 * @qid: the ap queue number
314 static int ap_init_queue(ap_qid_t qid
)
316 struct ap_queue_status status
;
320 status
= ap_reset_queue(qid
);
321 for (i
= 0; i
< AP_MAX_RESET
; i
++) {
322 switch (status
.response_code
) {
323 case AP_RESPONSE_NORMAL
:
324 if (status
.queue_empty
)
327 case AP_RESPONSE_Q_NOT_AVAIL
:
328 case AP_RESPONSE_DECONFIGURED
:
329 case AP_RESPONSE_CHECKSTOPPED
:
330 i
= AP_MAX_RESET
; /* return with -ENODEV */
332 case AP_RESPONSE_RESET_IN_PROGRESS
:
334 case AP_RESPONSE_BUSY
:
338 if (rc
!= -ENODEV
&& rc
!= -EBUSY
)
340 if (i
< AP_MAX_RESET
- 1) {
342 status
= ap_test_queue(qid
, &dummy
, &dummy
);
349 * Arm request timeout if a AP device was idle and a new request is submitted.
351 static void ap_increase_queue_count(struct ap_device
*ap_dev
)
353 int timeout
= ap_dev
->drv
->request_timeout
;
355 ap_dev
->queue_count
++;
356 if (ap_dev
->queue_count
== 1) {
357 mod_timer(&ap_dev
->timeout
, jiffies
+ timeout
);
358 ap_dev
->reset
= AP_RESET_ARMED
;
363 * AP device is still alive, re-schedule request timeout if there are still
366 static void ap_decrease_queue_count(struct ap_device
*ap_dev
)
368 int timeout
= ap_dev
->drv
->request_timeout
;
370 ap_dev
->queue_count
--;
371 if (ap_dev
->queue_count
> 0)
372 mod_timer(&ap_dev
->timeout
, jiffies
+ timeout
);
375 * The timeout timer should to be disabled now - since
376 * del_timer_sync() is very expensive, we just tell via the
377 * reset flag to ignore the pending timeout timer.
379 ap_dev
->reset
= AP_RESET_IGNORE
;
383 * AP device related attributes.
385 static ssize_t
ap_hwtype_show(struct device
*dev
,
386 struct device_attribute
*attr
, char *buf
)
388 struct ap_device
*ap_dev
= to_ap_dev(dev
);
389 return snprintf(buf
, PAGE_SIZE
, "%d\n", ap_dev
->device_type
);
391 static DEVICE_ATTR(hwtype
, 0444, ap_hwtype_show
, NULL
);
393 static ssize_t
ap_depth_show(struct device
*dev
, struct device_attribute
*attr
,
396 struct ap_device
*ap_dev
= to_ap_dev(dev
);
397 return snprintf(buf
, PAGE_SIZE
, "%d\n", ap_dev
->queue_depth
);
399 static DEVICE_ATTR(depth
, 0444, ap_depth_show
, NULL
);
401 static ssize_t
ap_request_count_show(struct device
*dev
,
402 struct device_attribute
*attr
,
405 struct ap_device
*ap_dev
= to_ap_dev(dev
);
408 spin_lock_bh(&ap_dev
->lock
);
409 rc
= snprintf(buf
, PAGE_SIZE
, "%d\n", ap_dev
->total_request_count
);
410 spin_unlock_bh(&ap_dev
->lock
);
414 static DEVICE_ATTR(request_count
, 0444, ap_request_count_show
, NULL
);
416 static ssize_t
ap_modalias_show(struct device
*dev
,
417 struct device_attribute
*attr
, char *buf
)
419 return sprintf(buf
, "ap:t%02X", to_ap_dev(dev
)->device_type
);
422 static DEVICE_ATTR(modalias
, 0444, ap_modalias_show
, NULL
);
424 static struct attribute
*ap_dev_attrs
[] = {
425 &dev_attr_hwtype
.attr
,
426 &dev_attr_depth
.attr
,
427 &dev_attr_request_count
.attr
,
428 &dev_attr_modalias
.attr
,
431 static struct attribute_group ap_dev_attr_group
= {
432 .attrs
= ap_dev_attrs
436 * AP bus driver registration/unregistration.
438 static int ap_bus_match(struct device
*dev
, struct device_driver
*drv
)
440 struct ap_device
*ap_dev
= to_ap_dev(dev
);
441 struct ap_driver
*ap_drv
= to_ap_drv(drv
);
442 struct ap_device_id
*id
;
445 * Compare device type of the device with the list of
446 * supported types of the device_driver.
448 for (id
= ap_drv
->ids
; id
->match_flags
; id
++) {
449 if ((id
->match_flags
& AP_DEVICE_ID_MATCH_DEVICE_TYPE
) &&
450 (id
->dev_type
!= ap_dev
->device_type
))
458 * uevent function for AP devices. It sets up a single environment
459 * variable DEV_TYPE which contains the hardware device type.
461 static int ap_uevent (struct device
*dev
, struct kobj_uevent_env
*env
)
463 struct ap_device
*ap_dev
= to_ap_dev(dev
);
469 /* Set up DEV_TYPE environment variable. */
470 retval
= add_uevent_var(env
, "DEV_TYPE=%04X", ap_dev
->device_type
);
475 retval
= add_uevent_var(env
, "MODALIAS=ap:t%02X", ap_dev
->device_type
);
480 static struct bus_type ap_bus_type
= {
482 .match
= &ap_bus_match
,
483 .uevent
= &ap_uevent
,
486 static int ap_device_probe(struct device
*dev
)
488 struct ap_device
*ap_dev
= to_ap_dev(dev
);
489 struct ap_driver
*ap_drv
= to_ap_drv(dev
->driver
);
492 ap_dev
->drv
= ap_drv
;
493 spin_lock_bh(&ap_device_lock
);
494 list_add(&ap_dev
->list
, &ap_device_list
);
495 spin_unlock_bh(&ap_device_lock
);
496 rc
= ap_drv
->probe
? ap_drv
->probe(ap_dev
) : -ENODEV
;
501 * Flush all requests from the request/pending queue of an AP device.
502 * @ap_dev: pointer to the AP device.
504 static void __ap_flush_queue(struct ap_device
*ap_dev
)
506 struct ap_message
*ap_msg
, *next
;
508 list_for_each_entry_safe(ap_msg
, next
, &ap_dev
->pendingq
, list
) {
509 list_del_init(&ap_msg
->list
);
510 ap_dev
->pendingq_count
--;
511 ap_dev
->drv
->receive(ap_dev
, ap_msg
, ERR_PTR(-ENODEV
));
513 list_for_each_entry_safe(ap_msg
, next
, &ap_dev
->requestq
, list
) {
514 list_del_init(&ap_msg
->list
);
515 ap_dev
->requestq_count
--;
516 ap_dev
->drv
->receive(ap_dev
, ap_msg
, ERR_PTR(-ENODEV
));
520 void ap_flush_queue(struct ap_device
*ap_dev
)
522 spin_lock_bh(&ap_dev
->lock
);
523 __ap_flush_queue(ap_dev
);
524 spin_unlock_bh(&ap_dev
->lock
);
526 EXPORT_SYMBOL(ap_flush_queue
);
528 static int ap_device_remove(struct device
*dev
)
530 struct ap_device
*ap_dev
= to_ap_dev(dev
);
531 struct ap_driver
*ap_drv
= ap_dev
->drv
;
533 ap_flush_queue(ap_dev
);
534 del_timer_sync(&ap_dev
->timeout
);
536 ap_drv
->remove(ap_dev
);
537 spin_lock_bh(&ap_device_lock
);
538 list_del_init(&ap_dev
->list
);
539 spin_unlock_bh(&ap_device_lock
);
540 spin_lock_bh(&ap_dev
->lock
);
541 atomic_sub(ap_dev
->queue_count
, &ap_poll_requests
);
542 spin_unlock_bh(&ap_dev
->lock
);
546 int ap_driver_register(struct ap_driver
*ap_drv
, struct module
*owner
,
549 struct device_driver
*drv
= &ap_drv
->driver
;
551 drv
->bus
= &ap_bus_type
;
552 drv
->probe
= ap_device_probe
;
553 drv
->remove
= ap_device_remove
;
556 return driver_register(drv
);
558 EXPORT_SYMBOL(ap_driver_register
);
560 void ap_driver_unregister(struct ap_driver
*ap_drv
)
562 driver_unregister(&ap_drv
->driver
);
564 EXPORT_SYMBOL(ap_driver_unregister
);
569 static ssize_t
ap_domain_show(struct bus_type
*bus
, char *buf
)
571 return snprintf(buf
, PAGE_SIZE
, "%d\n", ap_domain_index
);
574 static BUS_ATTR(ap_domain
, 0444, ap_domain_show
, NULL
);
576 static ssize_t
ap_config_time_show(struct bus_type
*bus
, char *buf
)
578 return snprintf(buf
, PAGE_SIZE
, "%d\n", ap_config_time
);
581 static ssize_t
ap_config_time_store(struct bus_type
*bus
,
582 const char *buf
, size_t count
)
586 if (sscanf(buf
, "%d\n", &time
) != 1 || time
< 5 || time
> 120)
588 ap_config_time
= time
;
589 if (!timer_pending(&ap_config_timer
) ||
590 !mod_timer(&ap_config_timer
, jiffies
+ ap_config_time
* HZ
)) {
591 ap_config_timer
.expires
= jiffies
+ ap_config_time
* HZ
;
592 add_timer(&ap_config_timer
);
597 static BUS_ATTR(config_time
, 0644, ap_config_time_show
, ap_config_time_store
);
599 static ssize_t
ap_poll_thread_show(struct bus_type
*bus
, char *buf
)
601 return snprintf(buf
, PAGE_SIZE
, "%d\n", ap_poll_kthread
? 1 : 0);
604 static ssize_t
ap_poll_thread_store(struct bus_type
*bus
,
605 const char *buf
, size_t count
)
609 if (sscanf(buf
, "%d\n", &flag
) != 1)
612 rc
= ap_poll_thread_start();
617 ap_poll_thread_stop();
621 static BUS_ATTR(poll_thread
, 0644, ap_poll_thread_show
, ap_poll_thread_store
);
623 static struct bus_attribute
*const ap_bus_attrs
[] = {
625 &bus_attr_config_time
,
626 &bus_attr_poll_thread
,
631 * Pick one of the 16 ap domains.
633 static int ap_select_domain(void)
635 int queue_depth
, device_type
, count
, max_count
, best_domain
;
639 * We want to use a single domain. Either the one specified with
640 * the "domain=" parameter or the domain with the maximum number
643 if (ap_domain_index
>= 0 && ap_domain_index
< AP_DOMAINS
)
644 /* Domain has already been selected. */
648 for (i
= 0; i
< AP_DOMAINS
; i
++) {
650 for (j
= 0; j
< AP_DEVICES
; j
++) {
651 ap_qid_t qid
= AP_MKQID(j
, i
);
652 rc
= ap_query_queue(qid
, &queue_depth
, &device_type
);
657 if (count
> max_count
) {
662 if (best_domain
>= 0){
663 ap_domain_index
= best_domain
;
670 * Find the device type if query queue returned a device type of 0.
671 * @ap_dev: pointer to the AP device.
673 static int ap_probe_device_type(struct ap_device
*ap_dev
)
675 static unsigned char msg
[] = {
676 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
677 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
678 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
679 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
680 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
681 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
682 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
683 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
684 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
685 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
686 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
687 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
688 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
689 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
690 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
691 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
692 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
693 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
694 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
695 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
696 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
697 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
698 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
699 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
700 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
701 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
702 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
703 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
704 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
705 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
706 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
707 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
708 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
709 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
710 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
711 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
712 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
713 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
714 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
715 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
716 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
717 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
718 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
719 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
720 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
722 struct ap_queue_status status
;
723 unsigned long long psmid
;
727 reply
= (void *) get_zeroed_page(GFP_KERNEL
);
733 status
= __ap_send(ap_dev
->qid
, 0x0102030405060708ULL
,
735 if (status
.response_code
!= AP_RESPONSE_NORMAL
) {
740 /* Wait for the test message to complete. */
741 for (i
= 0; i
< 6; i
++) {
743 status
= __ap_recv(ap_dev
->qid
, &psmid
, reply
, 4096);
744 if (status
.response_code
== AP_RESPONSE_NORMAL
&&
745 psmid
== 0x0102030405060708ULL
)
750 if (reply
[0] == 0x00 && reply
[1] == 0x86)
751 ap_dev
->device_type
= AP_DEVICE_TYPE_PCICC
;
753 ap_dev
->device_type
= AP_DEVICE_TYPE_PCICA
;
759 free_page((unsigned long) reply
);
765 * Scan the ap bus for new devices.
767 static int __ap_scan_bus(struct device
*dev
, void *data
)
769 return to_ap_dev(dev
)->qid
== (ap_qid_t
)(unsigned long) data
;
772 static void ap_device_release(struct device
*dev
)
774 struct ap_device
*ap_dev
= to_ap_dev(dev
);
779 static void ap_scan_bus(struct work_struct
*unused
)
781 struct ap_device
*ap_dev
;
784 int queue_depth
, device_type
;
787 if (ap_select_domain() != 0)
789 for (i
= 0; i
< AP_DEVICES
; i
++) {
790 qid
= AP_MKQID(i
, ap_domain_index
);
791 dev
= bus_find_device(&ap_bus_type
, NULL
,
792 (void *)(unsigned long)qid
,
794 rc
= ap_query_queue(qid
, &queue_depth
, &device_type
);
797 set_current_state(TASK_UNINTERRUPTIBLE
);
798 schedule_timeout(AP_RESET_TIMEOUT
);
799 rc
= ap_query_queue(qid
, &queue_depth
,
802 ap_dev
= to_ap_dev(dev
);
803 spin_lock_bh(&ap_dev
->lock
);
804 if (rc
|| ap_dev
->unregistered
) {
805 spin_unlock_bh(&ap_dev
->lock
);
806 device_unregister(dev
);
810 spin_unlock_bh(&ap_dev
->lock
);
816 rc
= ap_init_queue(qid
);
819 ap_dev
= kzalloc(sizeof(*ap_dev
), GFP_KERNEL
);
823 ap_dev
->queue_depth
= queue_depth
;
824 ap_dev
->unregistered
= 1;
825 spin_lock_init(&ap_dev
->lock
);
826 INIT_LIST_HEAD(&ap_dev
->pendingq
);
827 INIT_LIST_HEAD(&ap_dev
->requestq
);
828 INIT_LIST_HEAD(&ap_dev
->list
);
829 setup_timer(&ap_dev
->timeout
, ap_request_timeout
,
830 (unsigned long) ap_dev
);
831 if (device_type
== 0)
832 ap_probe_device_type(ap_dev
);
834 ap_dev
->device_type
= device_type
;
836 ap_dev
->device
.bus
= &ap_bus_type
;
837 ap_dev
->device
.parent
= ap_root_device
;
838 snprintf(ap_dev
->device
.bus_id
, BUS_ID_SIZE
, "card%02x",
839 AP_QID_DEVICE(ap_dev
->qid
));
840 ap_dev
->device
.release
= ap_device_release
;
841 rc
= device_register(&ap_dev
->device
);
846 /* Add device attributes. */
847 rc
= sysfs_create_group(&ap_dev
->device
.kobj
,
850 spin_lock_bh(&ap_dev
->lock
);
851 ap_dev
->unregistered
= 0;
852 spin_unlock_bh(&ap_dev
->lock
);
855 device_unregister(&ap_dev
->device
);
860 ap_config_timeout(unsigned long ptr
)
862 queue_work(ap_work_queue
, &ap_config_work
);
863 ap_config_timer
.expires
= jiffies
+ ap_config_time
* HZ
;
864 add_timer(&ap_config_timer
);
868 * Set up the timer to run the poll tasklet
870 static inline void ap_schedule_poll_timer(void)
872 if (timer_pending(&ap_poll_timer
))
874 mod_timer(&ap_poll_timer
, jiffies
+ AP_POLL_TIME
);
878 * Receive pending reply messages from an AP device.
879 * @ap_dev: pointer to the AP device
880 * @flags: pointer to control flags, bit 2^0 is set if another poll is
881 * required, bit 2^1 is set if the poll timer needs to get armed
882 * Returns 0 if the device is still present, -ENODEV if not.
884 static int ap_poll_read(struct ap_device
*ap_dev
, unsigned long *flags
)
886 struct ap_queue_status status
;
887 struct ap_message
*ap_msg
;
889 if (ap_dev
->queue_count
<= 0)
891 status
= __ap_recv(ap_dev
->qid
, &ap_dev
->reply
->psmid
,
892 ap_dev
->reply
->message
, ap_dev
->reply
->length
);
893 switch (status
.response_code
) {
894 case AP_RESPONSE_NORMAL
:
895 atomic_dec(&ap_poll_requests
);
896 ap_decrease_queue_count(ap_dev
);
897 list_for_each_entry(ap_msg
, &ap_dev
->pendingq
, list
) {
898 if (ap_msg
->psmid
!= ap_dev
->reply
->psmid
)
900 list_del_init(&ap_msg
->list
);
901 ap_dev
->pendingq_count
--;
902 ap_dev
->drv
->receive(ap_dev
, ap_msg
, ap_dev
->reply
);
905 if (ap_dev
->queue_count
> 0)
908 case AP_RESPONSE_NO_PENDING_REPLY
:
909 if (status
.queue_empty
) {
910 /* The card shouldn't forget requests but who knows. */
911 atomic_sub(ap_dev
->queue_count
, &ap_poll_requests
);
912 ap_dev
->queue_count
= 0;
913 list_splice_init(&ap_dev
->pendingq
, &ap_dev
->requestq
);
914 ap_dev
->requestq_count
+= ap_dev
->pendingq_count
;
915 ap_dev
->pendingq_count
= 0;
926 * Send messages from the request queue to an AP device.
927 * @ap_dev: pointer to the AP device
928 * @flags: pointer to control flags, bit 2^0 is set if another poll is
929 * required, bit 2^1 is set if the poll timer needs to get armed
930 * Returns 0 if the device is still present, -ENODEV if not.
932 static int ap_poll_write(struct ap_device
*ap_dev
, unsigned long *flags
)
934 struct ap_queue_status status
;
935 struct ap_message
*ap_msg
;
937 if (ap_dev
->requestq_count
<= 0 ||
938 ap_dev
->queue_count
>= ap_dev
->queue_depth
)
940 /* Start the next request on the queue. */
941 ap_msg
= list_entry(ap_dev
->requestq
.next
, struct ap_message
, list
);
942 status
= __ap_send(ap_dev
->qid
, ap_msg
->psmid
,
943 ap_msg
->message
, ap_msg
->length
);
944 switch (status
.response_code
) {
945 case AP_RESPONSE_NORMAL
:
946 atomic_inc(&ap_poll_requests
);
947 ap_increase_queue_count(ap_dev
);
948 list_move_tail(&ap_msg
->list
, &ap_dev
->pendingq
);
949 ap_dev
->requestq_count
--;
950 ap_dev
->pendingq_count
++;
951 if (ap_dev
->queue_count
< ap_dev
->queue_depth
&&
952 ap_dev
->requestq_count
> 0)
956 case AP_RESPONSE_Q_FULL
:
957 case AP_RESPONSE_RESET_IN_PROGRESS
:
960 case AP_RESPONSE_MESSAGE_TOO_BIG
:
969 * Poll AP device for pending replies and send new messages. If either
970 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
971 * @ap_dev: pointer to the bus device
972 * @flags: pointer to control flags, bit 2^0 is set if another poll is
973 * required, bit 2^1 is set if the poll timer needs to get armed
976 static inline int ap_poll_queue(struct ap_device
*ap_dev
, unsigned long *flags
)
980 rc
= ap_poll_read(ap_dev
, flags
);
983 return ap_poll_write(ap_dev
, flags
);
987 * Queue a message to a device.
988 * @ap_dev: pointer to the AP device
989 * @ap_msg: the message to be queued
991 static int __ap_queue_message(struct ap_device
*ap_dev
, struct ap_message
*ap_msg
)
993 struct ap_queue_status status
;
995 if (list_empty(&ap_dev
->requestq
) &&
996 ap_dev
->queue_count
< ap_dev
->queue_depth
) {
997 status
= __ap_send(ap_dev
->qid
, ap_msg
->psmid
,
998 ap_msg
->message
, ap_msg
->length
);
999 switch (status
.response_code
) {
1000 case AP_RESPONSE_NORMAL
:
1001 list_add_tail(&ap_msg
->list
, &ap_dev
->pendingq
);
1002 atomic_inc(&ap_poll_requests
);
1003 ap_dev
->pendingq_count
++;
1004 ap_increase_queue_count(ap_dev
);
1005 ap_dev
->total_request_count
++;
1007 case AP_RESPONSE_Q_FULL
:
1008 case AP_RESPONSE_RESET_IN_PROGRESS
:
1009 list_add_tail(&ap_msg
->list
, &ap_dev
->requestq
);
1010 ap_dev
->requestq_count
++;
1011 ap_dev
->total_request_count
++;
1013 case AP_RESPONSE_MESSAGE_TOO_BIG
:
1014 ap_dev
->drv
->receive(ap_dev
, ap_msg
, ERR_PTR(-EINVAL
));
1016 default: /* Device is gone. */
1017 ap_dev
->drv
->receive(ap_dev
, ap_msg
, ERR_PTR(-ENODEV
));
1021 list_add_tail(&ap_msg
->list
, &ap_dev
->requestq
);
1022 ap_dev
->requestq_count
++;
1023 ap_dev
->total_request_count
++;
1026 ap_schedule_poll_timer();
1030 void ap_queue_message(struct ap_device
*ap_dev
, struct ap_message
*ap_msg
)
1032 unsigned long flags
;
1035 spin_lock_bh(&ap_dev
->lock
);
1036 if (!ap_dev
->unregistered
) {
1037 /* Make room on the queue by polling for finished requests. */
1038 rc
= ap_poll_queue(ap_dev
, &flags
);
1040 rc
= __ap_queue_message(ap_dev
, ap_msg
);
1042 wake_up(&ap_poll_wait
);
1044 ap_dev
->unregistered
= 1;
1046 ap_dev
->drv
->receive(ap_dev
, ap_msg
, ERR_PTR(-ENODEV
));
1049 spin_unlock_bh(&ap_dev
->lock
);
1051 device_unregister(&ap_dev
->device
);
1053 EXPORT_SYMBOL(ap_queue_message
);
1056 * Cancel a crypto request. This is done by removing the request
1057 * from the devive pendingq or requestq queue. Note that the
1058 * request stays on the AP queue. When it finishes the message
1059 * reply will be discarded because the psmid can't be found.
1060 * @ap_dev: AP device that has the message queued
1061 * @ap_msg: the message that is to be removed
1063 void ap_cancel_message(struct ap_device
*ap_dev
, struct ap_message
*ap_msg
)
1065 struct ap_message
*tmp
;
1067 spin_lock_bh(&ap_dev
->lock
);
1068 if (!list_empty(&ap_msg
->list
)) {
1069 list_for_each_entry(tmp
, &ap_dev
->pendingq
, list
)
1070 if (tmp
->psmid
== ap_msg
->psmid
) {
1071 ap_dev
->pendingq_count
--;
1074 ap_dev
->requestq_count
--;
1076 list_del_init(&ap_msg
->list
);
1078 spin_unlock_bh(&ap_dev
->lock
);
1080 EXPORT_SYMBOL(ap_cancel_message
);
1083 * AP receive polling for finished AP requests
1085 static void ap_poll_timeout(unsigned long unused
)
1087 tasklet_schedule(&ap_tasklet
);
1091 * Reset a not responding AP device and move all requests from the
1092 * pending queue to the request queue.
1094 static void ap_reset(struct ap_device
*ap_dev
)
1098 ap_dev
->reset
= AP_RESET_IGNORE
;
1099 atomic_sub(ap_dev
->queue_count
, &ap_poll_requests
);
1100 ap_dev
->queue_count
= 0;
1101 list_splice_init(&ap_dev
->pendingq
, &ap_dev
->requestq
);
1102 ap_dev
->requestq_count
+= ap_dev
->pendingq_count
;
1103 ap_dev
->pendingq_count
= 0;
1104 rc
= ap_init_queue(ap_dev
->qid
);
1106 ap_dev
->unregistered
= 1;
1110 * Poll all AP devices on the bus in a round robin fashion. Continue
1111 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1112 * of the control flags has been set arm the poll timer.
1114 static int __ap_poll_all(struct ap_device
*ap_dev
, unsigned long *flags
)
1116 spin_lock(&ap_dev
->lock
);
1117 if (!ap_dev
->unregistered
) {
1118 if (ap_poll_queue(ap_dev
, flags
))
1119 ap_dev
->unregistered
= 1;
1120 if (ap_dev
->reset
== AP_RESET_DO
)
1123 spin_unlock(&ap_dev
->lock
);
1127 static void ap_poll_all(unsigned long dummy
)
1129 unsigned long flags
;
1130 struct ap_device
*ap_dev
;
1134 spin_lock(&ap_device_lock
);
1135 list_for_each_entry(ap_dev
, &ap_device_list
, list
) {
1136 __ap_poll_all(ap_dev
, &flags
);
1138 spin_unlock(&ap_device_lock
);
1139 } while (flags
& 1);
1141 ap_schedule_poll_timer();
1145 * AP bus poll thread. The purpose of this thread is to poll for
1146 * finished requests in a loop if there is a "free" cpu - that is
1147 * a cpu that doesn't have anything better to do. The polling stops
1148 * as soon as there is another task or if all messages have been
1151 static int ap_poll_thread(void *data
)
1153 DECLARE_WAITQUEUE(wait
, current
);
1154 unsigned long flags
;
1156 struct ap_device
*ap_dev
;
1158 set_user_nice(current
, 19);
1160 if (need_resched()) {
1164 add_wait_queue(&ap_poll_wait
, &wait
);
1165 set_current_state(TASK_INTERRUPTIBLE
);
1166 if (kthread_should_stop())
1168 requests
= atomic_read(&ap_poll_requests
);
1171 set_current_state(TASK_RUNNING
);
1172 remove_wait_queue(&ap_poll_wait
, &wait
);
1175 spin_lock_bh(&ap_device_lock
);
1176 list_for_each_entry(ap_dev
, &ap_device_list
, list
) {
1177 __ap_poll_all(ap_dev
, &flags
);
1179 spin_unlock_bh(&ap_device_lock
);
1181 set_current_state(TASK_RUNNING
);
1182 remove_wait_queue(&ap_poll_wait
, &wait
);
1186 static int ap_poll_thread_start(void)
1190 mutex_lock(&ap_poll_thread_mutex
);
1191 if (!ap_poll_kthread
) {
1192 ap_poll_kthread
= kthread_run(ap_poll_thread
, NULL
, "appoll");
1193 rc
= IS_ERR(ap_poll_kthread
) ? PTR_ERR(ap_poll_kthread
) : 0;
1195 ap_poll_kthread
= NULL
;
1199 mutex_unlock(&ap_poll_thread_mutex
);
1203 static void ap_poll_thread_stop(void)
1205 mutex_lock(&ap_poll_thread_mutex
);
1206 if (ap_poll_kthread
) {
1207 kthread_stop(ap_poll_kthread
);
1208 ap_poll_kthread
= NULL
;
1210 mutex_unlock(&ap_poll_thread_mutex
);
1214 * Handling of request timeouts
1216 static void ap_request_timeout(unsigned long data
)
1218 struct ap_device
*ap_dev
= (struct ap_device
*) data
;
1220 if (ap_dev
->reset
== AP_RESET_ARMED
)
1221 ap_dev
->reset
= AP_RESET_DO
;
1224 static void ap_reset_domain(void)
1228 if (ap_domain_index
!= -1)
1229 for (i
= 0; i
< AP_DEVICES
; i
++)
1230 ap_reset_queue(AP_MKQID(i
, ap_domain_index
));
1233 static void ap_reset_all(void)
1237 for (i
= 0; i
< AP_DOMAINS
; i
++)
1238 for (j
= 0; j
< AP_DEVICES
; j
++)
1239 ap_reset_queue(AP_MKQID(j
, i
));
1242 static struct reset_call ap_reset_call
= {
1247 * The module initialization code.
1249 int __init
ap_module_init(void)
1253 if (ap_domain_index
< -1 || ap_domain_index
>= AP_DOMAINS
) {
1254 printk(KERN_WARNING
"Invalid param: domain = %d. "
1255 " Not loading.\n", ap_domain_index
);
1258 if (ap_instructions_available() != 0) {
1259 printk(KERN_WARNING
"AP instructions not installed.\n");
1262 register_reset_call(&ap_reset_call
);
1264 /* Create /sys/bus/ap. */
1265 rc
= bus_register(&ap_bus_type
);
1268 for (i
= 0; ap_bus_attrs
[i
]; i
++) {
1269 rc
= bus_create_file(&ap_bus_type
, ap_bus_attrs
[i
]);
1274 /* Create /sys/devices/ap. */
1275 ap_root_device
= s390_root_dev_register("ap");
1276 rc
= IS_ERR(ap_root_device
) ? PTR_ERR(ap_root_device
) : 0;
1280 ap_work_queue
= create_singlethread_workqueue("kapwork");
1281 if (!ap_work_queue
) {
1286 if (ap_select_domain() == 0)
1289 /* Setup the ap bus rescan timer. */
1290 init_timer(&ap_config_timer
);
1291 ap_config_timer
.function
= ap_config_timeout
;
1292 ap_config_timer
.data
= 0;
1293 ap_config_timer
.expires
= jiffies
+ ap_config_time
* HZ
;
1294 add_timer(&ap_config_timer
);
1296 /* Start the low priority AP bus poll thread. */
1297 if (ap_thread_flag
) {
1298 rc
= ap_poll_thread_start();
1306 del_timer_sync(&ap_config_timer
);
1307 del_timer_sync(&ap_poll_timer
);
1308 destroy_workqueue(ap_work_queue
);
1310 s390_root_dev_unregister(ap_root_device
);
1313 bus_remove_file(&ap_bus_type
, ap_bus_attrs
[i
]);
1314 bus_unregister(&ap_bus_type
);
1316 unregister_reset_call(&ap_reset_call
);
1320 static int __ap_match_all(struct device
*dev
, void *data
)
1326 * The module termination code
1328 void ap_module_exit(void)
1334 ap_poll_thread_stop();
1335 del_timer_sync(&ap_config_timer
);
1336 del_timer_sync(&ap_poll_timer
);
1337 destroy_workqueue(ap_work_queue
);
1338 tasklet_kill(&ap_tasklet
);
1339 s390_root_dev_unregister(ap_root_device
);
1340 while ((dev
= bus_find_device(&ap_bus_type
, NULL
, NULL
,
1343 device_unregister(dev
);
1346 for (i
= 0; ap_bus_attrs
[i
]; i
++)
1347 bus_remove_file(&ap_bus_type
, ap_bus_attrs
[i
]);
1348 bus_unregister(&ap_bus_type
);
1349 unregister_reset_call(&ap_reset_call
);
1352 #ifndef CONFIG_ZCRYPT_MONOLITHIC
1353 module_init(ap_module_init
);
1354 module_exit(ap_module_exit
);