1 // SPDX-License-Identifier: GPL-2.0+
5 * Copyright IBM Corp. 2001, 2012
6 * Author(s): Robert Burroughs
7 * Eric Rossman (edrossma@us.ibm.com)
8 * Cornelia Huck <cornelia.huck@de.ibm.com>
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/miscdevice.h>
21 #include <linux/compat.h>
22 #include <linux/slab.h>
23 #include <linux/atomic.h>
24 #include <linux/uaccess.h>
25 #include <linux/hw_random.h>
26 #include <linux/debugfs.h>
27 #include <asm/debug.h>
29 #define CREATE_TRACE_POINTS
30 #include <asm/trace/zcrypt.h>
32 #include "zcrypt_api.h"
33 #include "zcrypt_debug.h"
35 #include "zcrypt_msgtype6.h"
36 #include "zcrypt_msgtype50.h"
41 MODULE_AUTHOR("IBM Corporation");
42 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
43 "Copyright IBM Corp. 2001, 2012");
44 MODULE_LICENSE("GPL");
47 * zcrypt tracepoint functions
49 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req
);
50 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep
);
52 static int zcrypt_hwrng_seed
= 1;
53 module_param_named(hwrng_seed
, zcrypt_hwrng_seed
, int, 0440);
54 MODULE_PARM_DESC(hwrng_seed
, "Turn on/off hwrng auto seed, default is 1 (on).");
56 DEFINE_SPINLOCK(zcrypt_list_lock
);
57 LIST_HEAD(zcrypt_card_list
);
58 int zcrypt_device_count
;
60 static atomic_t zcrypt_open_count
= ATOMIC_INIT(0);
61 static atomic_t zcrypt_rescan_count
= ATOMIC_INIT(0);
63 atomic_t zcrypt_rescan_req
= ATOMIC_INIT(0);
64 EXPORT_SYMBOL(zcrypt_rescan_req
);
66 static LIST_HEAD(zcrypt_ops_list
);
68 /* Zcrypt related debug feature stuff. */
69 debug_info_t
*zcrypt_dbf_info
;
72 * Process a rescan of the transport layer.
74 * Returns 1, if the rescan has been processed, otherwise 0.
76 static inline int zcrypt_process_rescan(void)
78 if (atomic_read(&zcrypt_rescan_req
)) {
79 atomic_set(&zcrypt_rescan_req
, 0);
80 atomic_inc(&zcrypt_rescan_count
);
81 ap_bus_force_rescan();
82 ZCRYPT_DBF(DBF_INFO
, "rescan count=%07d\n",
83 atomic_inc_return(&zcrypt_rescan_count
));
89 void zcrypt_msgtype_register(struct zcrypt_ops
*zops
)
91 list_add_tail(&zops
->list
, &zcrypt_ops_list
);
94 void zcrypt_msgtype_unregister(struct zcrypt_ops
*zops
)
96 list_del_init(&zops
->list
);
99 struct zcrypt_ops
*zcrypt_msgtype(unsigned char *name
, int variant
)
101 struct zcrypt_ops
*zops
;
103 list_for_each_entry(zops
, &zcrypt_ops_list
, list
)
104 if ((zops
->variant
== variant
) &&
105 (!strncmp(zops
->name
, name
, sizeof(zops
->name
))))
109 EXPORT_SYMBOL(zcrypt_msgtype
);
112 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
114 * This function is not supported beyond zcrypt 1.3.1.
116 static ssize_t
zcrypt_read(struct file
*filp
, char __user
*buf
,
117 size_t count
, loff_t
*f_pos
)
123 * zcrypt_write(): Not allowed.
125 * Write is is not allowed
127 static ssize_t
zcrypt_write(struct file
*filp
, const char __user
*buf
,
128 size_t count
, loff_t
*f_pos
)
134 * zcrypt_open(): Count number of users.
136 * Device open function to count number of users.
138 static int zcrypt_open(struct inode
*inode
, struct file
*filp
)
140 atomic_inc(&zcrypt_open_count
);
141 return nonseekable_open(inode
, filp
);
145 * zcrypt_release(): Count number of users.
147 * Device close function to count number of users.
149 static int zcrypt_release(struct inode
*inode
, struct file
*filp
)
151 atomic_dec(&zcrypt_open_count
);
155 static inline struct zcrypt_queue
*zcrypt_pick_queue(struct zcrypt_card
*zc
,
156 struct zcrypt_queue
*zq
,
159 if (!zq
|| !try_module_get(zq
->queue
->ap_dev
.drv
->driver
.owner
))
161 zcrypt_queue_get(zq
);
162 get_device(&zq
->queue
->ap_dev
.device
);
163 atomic_add(weight
, &zc
->load
);
164 atomic_add(weight
, &zq
->load
);
169 static inline void zcrypt_drop_queue(struct zcrypt_card
*zc
,
170 struct zcrypt_queue
*zq
,
173 struct module
*mod
= zq
->queue
->ap_dev
.drv
->driver
.owner
;
176 atomic_sub(weight
, &zc
->load
);
177 atomic_sub(weight
, &zq
->load
);
178 put_device(&zq
->queue
->ap_dev
.device
);
179 zcrypt_queue_put(zq
);
183 static inline bool zcrypt_card_compare(struct zcrypt_card
*zc
,
184 struct zcrypt_card
*pref_zc
,
186 unsigned int pref_weight
)
190 weight
+= atomic_read(&zc
->load
);
191 pref_weight
+= atomic_read(&pref_zc
->load
);
192 if (weight
== pref_weight
)
193 return atomic_read(&zc
->card
->total_request_count
) >
194 atomic_read(&pref_zc
->card
->total_request_count
);
195 return weight
> pref_weight
;
198 static inline bool zcrypt_queue_compare(struct zcrypt_queue
*zq
,
199 struct zcrypt_queue
*pref_zq
,
201 unsigned int pref_weight
)
205 weight
+= atomic_read(&zq
->load
);
206 pref_weight
+= atomic_read(&pref_zq
->load
);
207 if (weight
== pref_weight
)
208 return zq
->queue
->total_request_count
>
209 pref_zq
->queue
->total_request_count
;
210 return weight
> pref_weight
;
216 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo
*mex
)
218 struct zcrypt_card
*zc
, *pref_zc
;
219 struct zcrypt_queue
*zq
, *pref_zq
;
220 unsigned int weight
, pref_weight
;
221 unsigned int func_code
;
222 int qid
= 0, rc
= -ENODEV
;
224 trace_s390_zcrypt_req(mex
, TP_ICARSAMODEXPO
);
226 if (mex
->outputdatalength
< mex
->inputdatalength
) {
232 * As long as outputdatalength is big enough, we can set the
233 * outputdatalength equal to the inputdatalength, since that is the
234 * number of bytes we will copy in any case
236 mex
->outputdatalength
= mex
->inputdatalength
;
238 rc
= get_rsa_modex_fc(mex
, &func_code
);
244 spin_lock(&zcrypt_list_lock
);
245 for_each_zcrypt_card(zc
) {
246 /* Check for online accelarator and CCA cards */
247 if (!zc
->online
|| !(zc
->card
->functions
& 0x18000000))
249 /* Check for size limits */
250 if (zc
->min_mod_size
> mex
->inputdatalength
||
251 zc
->max_mod_size
< mex
->inputdatalength
)
253 /* get weight index of the card device */
254 weight
= zc
->speed_rating
[func_code
];
255 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
257 for_each_zcrypt_queue(zq
, zc
) {
258 /* check if device is online and eligible */
259 if (!zq
->online
|| !zq
->ops
->rsa_modexpo
)
261 if (zcrypt_queue_compare(zq
, pref_zq
,
262 weight
, pref_weight
))
266 pref_weight
= weight
;
269 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
270 spin_unlock(&zcrypt_list_lock
);
277 qid
= pref_zq
->queue
->qid
;
278 rc
= pref_zq
->ops
->rsa_modexpo(pref_zq
, mex
);
280 spin_lock(&zcrypt_list_lock
);
281 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
282 spin_unlock(&zcrypt_list_lock
);
285 trace_s390_zcrypt_rep(mex
, func_code
, rc
,
286 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
290 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt
*crt
)
292 struct zcrypt_card
*zc
, *pref_zc
;
293 struct zcrypt_queue
*zq
, *pref_zq
;
294 unsigned int weight
, pref_weight
;
295 unsigned int func_code
;
296 int qid
= 0, rc
= -ENODEV
;
298 trace_s390_zcrypt_req(crt
, TP_ICARSACRT
);
300 if (crt
->outputdatalength
< crt
->inputdatalength
) {
306 * As long as outputdatalength is big enough, we can set the
307 * outputdatalength equal to the inputdatalength, since that is the
308 * number of bytes we will copy in any case
310 crt
->outputdatalength
= crt
->inputdatalength
;
312 rc
= get_rsa_crt_fc(crt
, &func_code
);
318 spin_lock(&zcrypt_list_lock
);
319 for_each_zcrypt_card(zc
) {
320 /* Check for online accelarator and CCA cards */
321 if (!zc
->online
|| !(zc
->card
->functions
& 0x18000000))
323 /* Check for size limits */
324 if (zc
->min_mod_size
> crt
->inputdatalength
||
325 zc
->max_mod_size
< crt
->inputdatalength
)
327 /* get weight index of the card device */
328 weight
= zc
->speed_rating
[func_code
];
329 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
331 for_each_zcrypt_queue(zq
, zc
) {
332 /* check if device is online and eligible */
333 if (!zq
->online
|| !zq
->ops
->rsa_modexpo_crt
)
335 if (zcrypt_queue_compare(zq
, pref_zq
,
336 weight
, pref_weight
))
340 pref_weight
= weight
;
343 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
344 spin_unlock(&zcrypt_list_lock
);
351 qid
= pref_zq
->queue
->qid
;
352 rc
= pref_zq
->ops
->rsa_modexpo_crt(pref_zq
, crt
);
354 spin_lock(&zcrypt_list_lock
);
355 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
356 spin_unlock(&zcrypt_list_lock
);
359 trace_s390_zcrypt_rep(crt
, func_code
, rc
,
360 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
364 long zcrypt_send_cprb(struct ica_xcRB
*xcRB
)
366 struct zcrypt_card
*zc
, *pref_zc
;
367 struct zcrypt_queue
*zq
, *pref_zq
;
368 struct ap_message ap_msg
;
369 unsigned int weight
, pref_weight
;
370 unsigned int func_code
;
371 unsigned short *domain
;
372 int qid
= 0, rc
= -ENODEV
;
374 trace_s390_zcrypt_req(xcRB
, TB_ZSECSENDCPRB
);
376 ap_init_message(&ap_msg
);
377 rc
= get_cprb_fc(xcRB
, &ap_msg
, &func_code
, &domain
);
383 spin_lock(&zcrypt_list_lock
);
384 for_each_zcrypt_card(zc
) {
385 /* Check for online CCA cards */
386 if (!zc
->online
|| !(zc
->card
->functions
& 0x10000000))
388 /* Check for user selected CCA card */
389 if (xcRB
->user_defined
!= AUTOSELECT
&&
390 xcRB
->user_defined
!= zc
->card
->id
)
392 /* get weight index of the card device */
393 weight
= speed_idx_cca(func_code
) * zc
->speed_rating
[SECKEY
];
394 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
396 for_each_zcrypt_queue(zq
, zc
) {
397 /* check if device is online and eligible */
399 !zq
->ops
->send_cprb
||
400 ((*domain
!= (unsigned short) AUTOSELECT
) &&
401 (*domain
!= AP_QID_QUEUE(zq
->queue
->qid
))))
403 if (zcrypt_queue_compare(zq
, pref_zq
,
404 weight
, pref_weight
))
408 pref_weight
= weight
;
411 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
412 spin_unlock(&zcrypt_list_lock
);
419 /* in case of auto select, provide the correct domain */
420 qid
= pref_zq
->queue
->qid
;
421 if (*domain
== (unsigned short) AUTOSELECT
)
422 *domain
= AP_QID_QUEUE(qid
);
424 rc
= pref_zq
->ops
->send_cprb(pref_zq
, xcRB
, &ap_msg
);
426 spin_lock(&zcrypt_list_lock
);
427 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
428 spin_unlock(&zcrypt_list_lock
);
431 ap_release_message(&ap_msg
);
432 trace_s390_zcrypt_rep(xcRB
, func_code
, rc
,
433 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
436 EXPORT_SYMBOL(zcrypt_send_cprb
);
438 static bool is_desired_ep11_card(unsigned int dev_id
,
439 unsigned short target_num
,
440 struct ep11_target_dev
*targets
)
442 while (target_num
-- > 0) {
443 if (dev_id
== targets
->ap_id
)
450 static bool is_desired_ep11_queue(unsigned int dev_qid
,
451 unsigned short target_num
,
452 struct ep11_target_dev
*targets
)
454 while (target_num
-- > 0) {
455 if (AP_MKQID(targets
->ap_id
, targets
->dom_id
) == dev_qid
)
462 static long zcrypt_send_ep11_cprb(struct ep11_urb
*xcrb
)
464 struct zcrypt_card
*zc
, *pref_zc
;
465 struct zcrypt_queue
*zq
, *pref_zq
;
466 struct ep11_target_dev
*targets
;
467 unsigned short target_num
;
468 unsigned int weight
, pref_weight
;
469 unsigned int func_code
;
470 struct ap_message ap_msg
;
471 int qid
= 0, rc
= -ENODEV
;
473 trace_s390_zcrypt_req(xcrb
, TP_ZSENDEP11CPRB
);
475 ap_init_message(&ap_msg
);
477 target_num
= (unsigned short) xcrb
->targets_num
;
479 /* empty list indicates autoselect (all available targets) */
481 if (target_num
!= 0) {
482 struct ep11_target_dev __user
*uptr
;
484 targets
= kcalloc(target_num
, sizeof(*targets
), GFP_KERNEL
);
490 uptr
= (struct ep11_target_dev __force __user
*) xcrb
->targets
;
491 if (copy_from_user(targets
, uptr
,
492 target_num
* sizeof(*targets
))) {
498 rc
= get_ep11cprb_fc(xcrb
, &ap_msg
, &func_code
);
504 spin_lock(&zcrypt_list_lock
);
505 for_each_zcrypt_card(zc
) {
506 /* Check for online EP11 cards */
507 if (!zc
->online
|| !(zc
->card
->functions
& 0x04000000))
509 /* Check for user selected EP11 card */
511 !is_desired_ep11_card(zc
->card
->id
, target_num
, targets
))
513 /* get weight index of the card device */
514 weight
= speed_idx_ep11(func_code
) * zc
->speed_rating
[SECKEY
];
515 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
517 for_each_zcrypt_queue(zq
, zc
) {
518 /* check if device is online and eligible */
520 !zq
->ops
->send_ep11_cprb
||
522 !is_desired_ep11_queue(zq
->queue
->qid
,
523 target_num
, targets
)))
525 if (zcrypt_queue_compare(zq
, pref_zq
,
526 weight
, pref_weight
))
530 pref_weight
= weight
;
533 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
534 spin_unlock(&zcrypt_list_lock
);
541 qid
= pref_zq
->queue
->qid
;
542 rc
= pref_zq
->ops
->send_ep11_cprb(pref_zq
, xcrb
, &ap_msg
);
544 spin_lock(&zcrypt_list_lock
);
545 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
546 spin_unlock(&zcrypt_list_lock
);
551 ap_release_message(&ap_msg
);
552 trace_s390_zcrypt_rep(xcrb
, func_code
, rc
,
553 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
557 static long zcrypt_rng(char *buffer
)
559 struct zcrypt_card
*zc
, *pref_zc
;
560 struct zcrypt_queue
*zq
, *pref_zq
;
561 unsigned int weight
, pref_weight
;
562 unsigned int func_code
;
563 struct ap_message ap_msg
;
565 int qid
= 0, rc
= -ENODEV
;
567 trace_s390_zcrypt_req(buffer
, TP_HWRNGCPRB
);
569 ap_init_message(&ap_msg
);
570 rc
= get_rng_fc(&ap_msg
, &func_code
, &domain
);
576 spin_lock(&zcrypt_list_lock
);
577 for_each_zcrypt_card(zc
) {
578 /* Check for online CCA cards */
579 if (!zc
->online
|| !(zc
->card
->functions
& 0x10000000))
581 /* get weight index of the card device */
582 weight
= zc
->speed_rating
[func_code
];
583 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
585 for_each_zcrypt_queue(zq
, zc
) {
586 /* check if device is online and eligible */
587 if (!zq
->online
|| !zq
->ops
->rng
)
589 if (zcrypt_queue_compare(zq
, pref_zq
,
590 weight
, pref_weight
))
594 pref_weight
= weight
;
597 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
598 spin_unlock(&zcrypt_list_lock
);
605 qid
= pref_zq
->queue
->qid
;
606 rc
= pref_zq
->ops
->rng(pref_zq
, buffer
, &ap_msg
);
608 spin_lock(&zcrypt_list_lock
);
609 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
610 spin_unlock(&zcrypt_list_lock
);
613 ap_release_message(&ap_msg
);
614 trace_s390_zcrypt_rep(buffer
, func_code
, rc
,
615 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
619 static void zcrypt_device_status_mask(struct zcrypt_device_status
*devstatus
)
621 struct zcrypt_card
*zc
;
622 struct zcrypt_queue
*zq
;
623 struct zcrypt_device_status
*stat
;
626 memset(devstatus
, 0, MAX_ZDEV_ENTRIES
627 * sizeof(struct zcrypt_device_status
));
629 spin_lock(&zcrypt_list_lock
);
630 for_each_zcrypt_card(zc
) {
631 for_each_zcrypt_queue(zq
, zc
) {
632 card
= AP_QID_CARD(zq
->queue
->qid
);
633 if (card
>= MAX_ZDEV_CARDIDS
)
635 queue
= AP_QID_QUEUE(zq
->queue
->qid
);
636 stat
= &devstatus
[card
* AP_DOMAINS
+ queue
];
637 stat
->hwtype
= zc
->card
->ap_dev
.device_type
;
638 stat
->functions
= zc
->card
->functions
>> 26;
639 stat
->qid
= zq
->queue
->qid
;
640 stat
->online
= zq
->online
? 0x01 : 0x00;
643 spin_unlock(&zcrypt_list_lock
);
646 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext
*devstatus
)
648 struct zcrypt_card
*zc
;
649 struct zcrypt_queue
*zq
;
650 struct zcrypt_device_status_ext
*stat
;
653 memset(devstatus
, 0, MAX_ZDEV_ENTRIES_EXT
654 * sizeof(struct zcrypt_device_status_ext
));
656 spin_lock(&zcrypt_list_lock
);
657 for_each_zcrypt_card(zc
) {
658 for_each_zcrypt_queue(zq
, zc
) {
659 card
= AP_QID_CARD(zq
->queue
->qid
);
660 queue
= AP_QID_QUEUE(zq
->queue
->qid
);
661 stat
= &devstatus
[card
* AP_DOMAINS
+ queue
];
662 stat
->hwtype
= zc
->card
->ap_dev
.device_type
;
663 stat
->functions
= zc
->card
->functions
>> 26;
664 stat
->qid
= zq
->queue
->qid
;
665 stat
->online
= zq
->online
? 0x01 : 0x00;
668 spin_unlock(&zcrypt_list_lock
);
670 EXPORT_SYMBOL(zcrypt_device_status_mask_ext
);
672 static void zcrypt_status_mask(char status
[], size_t max_adapters
)
674 struct zcrypt_card
*zc
;
675 struct zcrypt_queue
*zq
;
678 memset(status
, 0, max_adapters
);
679 spin_lock(&zcrypt_list_lock
);
680 for_each_zcrypt_card(zc
) {
681 for_each_zcrypt_queue(zq
, zc
) {
682 card
= AP_QID_CARD(zq
->queue
->qid
);
683 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
684 || card
>= max_adapters
)
686 status
[card
] = zc
->online
? zc
->user_space_type
: 0x0d;
689 spin_unlock(&zcrypt_list_lock
);
692 static void zcrypt_qdepth_mask(char qdepth
[], size_t max_adapters
)
694 struct zcrypt_card
*zc
;
695 struct zcrypt_queue
*zq
;
698 memset(qdepth
, 0, max_adapters
);
699 spin_lock(&zcrypt_list_lock
);
701 for_each_zcrypt_card(zc
) {
702 for_each_zcrypt_queue(zq
, zc
) {
703 card
= AP_QID_CARD(zq
->queue
->qid
);
704 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
705 || card
>= max_adapters
)
707 spin_lock(&zq
->queue
->lock
);
709 zq
->queue
->pendingq_count
+
710 zq
->queue
->requestq_count
;
711 spin_unlock(&zq
->queue
->lock
);
715 spin_unlock(&zcrypt_list_lock
);
718 static void zcrypt_perdev_reqcnt(int reqcnt
[], size_t max_adapters
)
720 struct zcrypt_card
*zc
;
721 struct zcrypt_queue
*zq
;
724 memset(reqcnt
, 0, sizeof(int) * max_adapters
);
725 spin_lock(&zcrypt_list_lock
);
727 for_each_zcrypt_card(zc
) {
728 for_each_zcrypt_queue(zq
, zc
) {
729 card
= AP_QID_CARD(zq
->queue
->qid
);
730 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
731 || card
>= max_adapters
)
733 spin_lock(&zq
->queue
->lock
);
734 reqcnt
[card
] = zq
->queue
->total_request_count
;
735 spin_unlock(&zq
->queue
->lock
);
739 spin_unlock(&zcrypt_list_lock
);
742 static int zcrypt_pendingq_count(void)
744 struct zcrypt_card
*zc
;
745 struct zcrypt_queue
*zq
;
749 spin_lock(&zcrypt_list_lock
);
751 for_each_zcrypt_card(zc
) {
752 for_each_zcrypt_queue(zq
, zc
) {
753 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
)
755 spin_lock(&zq
->queue
->lock
);
756 pendingq_count
+= zq
->queue
->pendingq_count
;
757 spin_unlock(&zq
->queue
->lock
);
761 spin_unlock(&zcrypt_list_lock
);
762 return pendingq_count
;
765 static int zcrypt_requestq_count(void)
767 struct zcrypt_card
*zc
;
768 struct zcrypt_queue
*zq
;
772 spin_lock(&zcrypt_list_lock
);
774 for_each_zcrypt_card(zc
) {
775 for_each_zcrypt_queue(zq
, zc
) {
776 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
)
778 spin_lock(&zq
->queue
->lock
);
779 requestq_count
+= zq
->queue
->requestq_count
;
780 spin_unlock(&zq
->queue
->lock
);
784 spin_unlock(&zcrypt_list_lock
);
785 return requestq_count
;
788 static long zcrypt_unlocked_ioctl(struct file
*filp
, unsigned int cmd
,
794 case ICARSAMODEXPO
: {
795 struct ica_rsa_modexpo __user
*umex
= (void __user
*) arg
;
796 struct ica_rsa_modexpo mex
;
798 if (copy_from_user(&mex
, umex
, sizeof(mex
)))
801 rc
= zcrypt_rsa_modexpo(&mex
);
802 } while (rc
== -EAGAIN
);
803 /* on failure: retry once again after a requested rescan */
804 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
806 rc
= zcrypt_rsa_modexpo(&mex
);
807 } while (rc
== -EAGAIN
);
809 ZCRYPT_DBF(DBF_DEBUG
, "ioctl ICARSAMODEXPO rc=%d\n", rc
);
812 return put_user(mex
.outputdatalength
, &umex
->outputdatalength
);
815 struct ica_rsa_modexpo_crt __user
*ucrt
= (void __user
*) arg
;
816 struct ica_rsa_modexpo_crt crt
;
818 if (copy_from_user(&crt
, ucrt
, sizeof(crt
)))
821 rc
= zcrypt_rsa_crt(&crt
);
822 } while (rc
== -EAGAIN
);
823 /* on failure: retry once again after a requested rescan */
824 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
826 rc
= zcrypt_rsa_crt(&crt
);
827 } while (rc
== -EAGAIN
);
829 ZCRYPT_DBF(DBF_DEBUG
, "ioctl ICARSACRT rc=%d\n", rc
);
832 return put_user(crt
.outputdatalength
, &ucrt
->outputdatalength
);
835 struct ica_xcRB __user
*uxcRB
= (void __user
*) arg
;
836 struct ica_xcRB xcRB
;
838 if (copy_from_user(&xcRB
, uxcRB
, sizeof(xcRB
)))
841 rc
= zcrypt_send_cprb(&xcRB
);
842 } while (rc
== -EAGAIN
);
843 /* on failure: retry once again after a requested rescan */
844 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
846 rc
= zcrypt_send_cprb(&xcRB
);
847 } while (rc
== -EAGAIN
);
849 ZCRYPT_DBF(DBF_DEBUG
, "ioctl ZSENDCPRB rc=%d\n", rc
);
850 if (copy_to_user(uxcRB
, &xcRB
, sizeof(xcRB
)))
854 case ZSENDEP11CPRB
: {
855 struct ep11_urb __user
*uxcrb
= (void __user
*)arg
;
856 struct ep11_urb xcrb
;
858 if (copy_from_user(&xcrb
, uxcrb
, sizeof(xcrb
)))
861 rc
= zcrypt_send_ep11_cprb(&xcrb
);
862 } while (rc
== -EAGAIN
);
863 /* on failure: retry once again after a requested rescan */
864 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
866 rc
= zcrypt_send_ep11_cprb(&xcrb
);
867 } while (rc
== -EAGAIN
);
869 ZCRYPT_DBF(DBF_DEBUG
, "ioctl ZSENDEP11CPRB rc=%d\n", rc
);
870 if (copy_to_user(uxcrb
, &xcrb
, sizeof(xcrb
)))
874 case ZCRYPT_DEVICE_STATUS
: {
875 struct zcrypt_device_status_ext
*device_status
;
876 size_t total_size
= MAX_ZDEV_ENTRIES_EXT
877 * sizeof(struct zcrypt_device_status_ext
);
879 device_status
= kzalloc(total_size
, GFP_KERNEL
);
882 zcrypt_device_status_mask_ext(device_status
);
883 if (copy_to_user((char __user
*) arg
, device_status
,
886 kfree(device_status
);
889 case ZCRYPT_STATUS_MASK
: {
890 char status
[AP_DEVICES
];
892 zcrypt_status_mask(status
, AP_DEVICES
);
893 if (copy_to_user((char __user
*) arg
, status
, sizeof(status
)))
897 case ZCRYPT_QDEPTH_MASK
: {
898 char qdepth
[AP_DEVICES
];
900 zcrypt_qdepth_mask(qdepth
, AP_DEVICES
);
901 if (copy_to_user((char __user
*) arg
, qdepth
, sizeof(qdepth
)))
905 case ZCRYPT_PERDEV_REQCNT
: {
908 reqcnt
= kcalloc(AP_DEVICES
, sizeof(int), GFP_KERNEL
);
911 zcrypt_perdev_reqcnt(reqcnt
, AP_DEVICES
);
912 if (copy_to_user((int __user
*) arg
, reqcnt
, sizeof(reqcnt
)))
917 case Z90STAT_REQUESTQ_COUNT
:
918 return put_user(zcrypt_requestq_count(), (int __user
*) arg
);
919 case Z90STAT_PENDINGQ_COUNT
:
920 return put_user(zcrypt_pendingq_count(), (int __user
*) arg
);
921 case Z90STAT_TOTALOPEN_COUNT
:
922 return put_user(atomic_read(&zcrypt_open_count
),
924 case Z90STAT_DOMAIN_INDEX
:
925 return put_user(ap_domain_index
, (int __user
*) arg
);
929 case ZDEVICESTATUS
: {
930 /* the old ioctl supports only 64 adapters */
931 struct zcrypt_device_status
*device_status
;
932 size_t total_size
= MAX_ZDEV_ENTRIES
933 * sizeof(struct zcrypt_device_status
);
935 device_status
= kzalloc(total_size
, GFP_KERNEL
);
938 zcrypt_device_status_mask(device_status
);
939 if (copy_to_user((char __user
*) arg
, device_status
,
942 kfree(device_status
);
945 case Z90STAT_STATUS_MASK
: {
946 /* the old ioctl supports only 64 adapters */
947 char status
[MAX_ZDEV_CARDIDS
];
949 zcrypt_status_mask(status
, MAX_ZDEV_CARDIDS
);
950 if (copy_to_user((char __user
*) arg
, status
, sizeof(status
)))
954 case Z90STAT_QDEPTH_MASK
: {
955 /* the old ioctl supports only 64 adapters */
956 char qdepth
[MAX_ZDEV_CARDIDS
];
958 zcrypt_qdepth_mask(qdepth
, MAX_ZDEV_CARDIDS
);
959 if (copy_to_user((char __user
*) arg
, qdepth
, sizeof(qdepth
)))
963 case Z90STAT_PERDEV_REQCNT
: {
964 /* the old ioctl supports only 64 adapters */
965 int reqcnt
[MAX_ZDEV_CARDIDS
];
967 zcrypt_perdev_reqcnt(reqcnt
, MAX_ZDEV_CARDIDS
);
968 if (copy_to_user((int __user
*) arg
, reqcnt
, sizeof(reqcnt
)))
972 /* unknown ioctl number */
974 ZCRYPT_DBF(DBF_DEBUG
, "unknown ioctl 0x%08x\n", cmd
);
981 * ioctl32 conversion routines
983 struct compat_ica_rsa_modexpo
{
984 compat_uptr_t inputdata
;
985 unsigned int inputdatalength
;
986 compat_uptr_t outputdata
;
987 unsigned int outputdatalength
;
989 compat_uptr_t n_modulus
;
992 static long trans_modexpo32(struct file
*filp
, unsigned int cmd
,
995 struct compat_ica_rsa_modexpo __user
*umex32
= compat_ptr(arg
);
996 struct compat_ica_rsa_modexpo mex32
;
997 struct ica_rsa_modexpo mex64
;
1000 if (copy_from_user(&mex32
, umex32
, sizeof(mex32
)))
1002 mex64
.inputdata
= compat_ptr(mex32
.inputdata
);
1003 mex64
.inputdatalength
= mex32
.inputdatalength
;
1004 mex64
.outputdata
= compat_ptr(mex32
.outputdata
);
1005 mex64
.outputdatalength
= mex32
.outputdatalength
;
1006 mex64
.b_key
= compat_ptr(mex32
.b_key
);
1007 mex64
.n_modulus
= compat_ptr(mex32
.n_modulus
);
1009 rc
= zcrypt_rsa_modexpo(&mex64
);
1010 } while (rc
== -EAGAIN
);
1011 /* on failure: retry once again after a requested rescan */
1012 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
1014 rc
= zcrypt_rsa_modexpo(&mex64
);
1015 } while (rc
== -EAGAIN
);
1018 return put_user(mex64
.outputdatalength
,
1019 &umex32
->outputdatalength
);
1022 struct compat_ica_rsa_modexpo_crt
{
1023 compat_uptr_t inputdata
;
1024 unsigned int inputdatalength
;
1025 compat_uptr_t outputdata
;
1026 unsigned int outputdatalength
;
1027 compat_uptr_t bp_key
;
1028 compat_uptr_t bq_key
;
1029 compat_uptr_t np_prime
;
1030 compat_uptr_t nq_prime
;
1031 compat_uptr_t u_mult_inv
;
1034 static long trans_modexpo_crt32(struct file
*filp
, unsigned int cmd
,
1037 struct compat_ica_rsa_modexpo_crt __user
*ucrt32
= compat_ptr(arg
);
1038 struct compat_ica_rsa_modexpo_crt crt32
;
1039 struct ica_rsa_modexpo_crt crt64
;
1042 if (copy_from_user(&crt32
, ucrt32
, sizeof(crt32
)))
1044 crt64
.inputdata
= compat_ptr(crt32
.inputdata
);
1045 crt64
.inputdatalength
= crt32
.inputdatalength
;
1046 crt64
.outputdata
= compat_ptr(crt32
.outputdata
);
1047 crt64
.outputdatalength
= crt32
.outputdatalength
;
1048 crt64
.bp_key
= compat_ptr(crt32
.bp_key
);
1049 crt64
.bq_key
= compat_ptr(crt32
.bq_key
);
1050 crt64
.np_prime
= compat_ptr(crt32
.np_prime
);
1051 crt64
.nq_prime
= compat_ptr(crt32
.nq_prime
);
1052 crt64
.u_mult_inv
= compat_ptr(crt32
.u_mult_inv
);
1054 rc
= zcrypt_rsa_crt(&crt64
);
1055 } while (rc
== -EAGAIN
);
1056 /* on failure: retry once again after a requested rescan */
1057 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
1059 rc
= zcrypt_rsa_crt(&crt64
);
1060 } while (rc
== -EAGAIN
);
1063 return put_user(crt64
.outputdatalength
,
1064 &ucrt32
->outputdatalength
);
1067 struct compat_ica_xcRB
{
1068 unsigned short agent_ID
;
1069 unsigned int user_defined
;
1070 unsigned short request_ID
;
1071 unsigned int request_control_blk_length
;
1072 unsigned char padding1
[16 - sizeof(compat_uptr_t
)];
1073 compat_uptr_t request_control_blk_addr
;
1074 unsigned int request_data_length
;
1075 char padding2
[16 - sizeof(compat_uptr_t
)];
1076 compat_uptr_t request_data_address
;
1077 unsigned int reply_control_blk_length
;
1078 char padding3
[16 - sizeof(compat_uptr_t
)];
1079 compat_uptr_t reply_control_blk_addr
;
1080 unsigned int reply_data_length
;
1081 char padding4
[16 - sizeof(compat_uptr_t
)];
1082 compat_uptr_t reply_data_addr
;
1083 unsigned short priority_window
;
1084 unsigned int status
;
1087 static long trans_xcRB32(struct file
*filp
, unsigned int cmd
,
1090 struct compat_ica_xcRB __user
*uxcRB32
= compat_ptr(arg
);
1091 struct compat_ica_xcRB xcRB32
;
1092 struct ica_xcRB xcRB64
;
1095 if (copy_from_user(&xcRB32
, uxcRB32
, sizeof(xcRB32
)))
1097 xcRB64
.agent_ID
= xcRB32
.agent_ID
;
1098 xcRB64
.user_defined
= xcRB32
.user_defined
;
1099 xcRB64
.request_ID
= xcRB32
.request_ID
;
1100 xcRB64
.request_control_blk_length
=
1101 xcRB32
.request_control_blk_length
;
1102 xcRB64
.request_control_blk_addr
=
1103 compat_ptr(xcRB32
.request_control_blk_addr
);
1104 xcRB64
.request_data_length
=
1105 xcRB32
.request_data_length
;
1106 xcRB64
.request_data_address
=
1107 compat_ptr(xcRB32
.request_data_address
);
1108 xcRB64
.reply_control_blk_length
=
1109 xcRB32
.reply_control_blk_length
;
1110 xcRB64
.reply_control_blk_addr
=
1111 compat_ptr(xcRB32
.reply_control_blk_addr
);
1112 xcRB64
.reply_data_length
= xcRB32
.reply_data_length
;
1113 xcRB64
.reply_data_addr
=
1114 compat_ptr(xcRB32
.reply_data_addr
);
1115 xcRB64
.priority_window
= xcRB32
.priority_window
;
1116 xcRB64
.status
= xcRB32
.status
;
1118 rc
= zcrypt_send_cprb(&xcRB64
);
1119 } while (rc
== -EAGAIN
);
1120 /* on failure: retry once again after a requested rescan */
1121 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
1123 rc
= zcrypt_send_cprb(&xcRB64
);
1124 } while (rc
== -EAGAIN
);
1125 xcRB32
.reply_control_blk_length
= xcRB64
.reply_control_blk_length
;
1126 xcRB32
.reply_data_length
= xcRB64
.reply_data_length
;
1127 xcRB32
.status
= xcRB64
.status
;
1128 if (copy_to_user(uxcRB32
, &xcRB32
, sizeof(xcRB32
)))
1133 static long zcrypt_compat_ioctl(struct file
*filp
, unsigned int cmd
,
1136 if (cmd
== ICARSAMODEXPO
)
1137 return trans_modexpo32(filp
, cmd
, arg
);
1138 if (cmd
== ICARSACRT
)
1139 return trans_modexpo_crt32(filp
, cmd
, arg
);
1140 if (cmd
== ZSECSENDCPRB
)
1141 return trans_xcRB32(filp
, cmd
, arg
);
1142 return zcrypt_unlocked_ioctl(filp
, cmd
, arg
);
1147 * Misc device file operations.
1149 static const struct file_operations zcrypt_fops
= {
1150 .owner
= THIS_MODULE
,
1151 .read
= zcrypt_read
,
1152 .write
= zcrypt_write
,
1153 .unlocked_ioctl
= zcrypt_unlocked_ioctl
,
1154 #ifdef CONFIG_COMPAT
1155 .compat_ioctl
= zcrypt_compat_ioctl
,
1157 .open
= zcrypt_open
,
1158 .release
= zcrypt_release
,
1159 .llseek
= no_llseek
,
1165 static struct miscdevice zcrypt_misc_device
= {
1166 .minor
= MISC_DYNAMIC_MINOR
,
1168 .fops
= &zcrypt_fops
,
1171 static int zcrypt_rng_device_count
;
1172 static u32
*zcrypt_rng_buffer
;
1173 static int zcrypt_rng_buffer_index
;
1174 static DEFINE_MUTEX(zcrypt_rng_mutex
);
1176 static int zcrypt_rng_data_read(struct hwrng
*rng
, u32
*data
)
1181 * We don't need locking here because the RNG API guarantees serialized
1182 * read method calls.
1184 if (zcrypt_rng_buffer_index
== 0) {
1185 rc
= zcrypt_rng((char *) zcrypt_rng_buffer
);
1186 /* on failure: retry once again after a requested rescan */
1187 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
1188 rc
= zcrypt_rng((char *) zcrypt_rng_buffer
);
1191 zcrypt_rng_buffer_index
= rc
/ sizeof(*data
);
1193 *data
= zcrypt_rng_buffer
[--zcrypt_rng_buffer_index
];
1194 return sizeof(*data
);
1197 static struct hwrng zcrypt_rng_dev
= {
1199 .data_read
= zcrypt_rng_data_read
,
1203 int zcrypt_rng_device_add(void)
1207 mutex_lock(&zcrypt_rng_mutex
);
1208 if (zcrypt_rng_device_count
== 0) {
1209 zcrypt_rng_buffer
= (u32
*) get_zeroed_page(GFP_KERNEL
);
1210 if (!zcrypt_rng_buffer
) {
1214 zcrypt_rng_buffer_index
= 0;
1215 if (!zcrypt_hwrng_seed
)
1216 zcrypt_rng_dev
.quality
= 0;
1217 rc
= hwrng_register(&zcrypt_rng_dev
);
1220 zcrypt_rng_device_count
= 1;
1222 zcrypt_rng_device_count
++;
1223 mutex_unlock(&zcrypt_rng_mutex
);
1227 free_page((unsigned long) zcrypt_rng_buffer
);
1229 mutex_unlock(&zcrypt_rng_mutex
);
1233 void zcrypt_rng_device_remove(void)
1235 mutex_lock(&zcrypt_rng_mutex
);
1236 zcrypt_rng_device_count
--;
1237 if (zcrypt_rng_device_count
== 0) {
1238 hwrng_unregister(&zcrypt_rng_dev
);
1239 free_page((unsigned long) zcrypt_rng_buffer
);
1241 mutex_unlock(&zcrypt_rng_mutex
);
1244 int __init
zcrypt_debug_init(void)
1246 zcrypt_dbf_info
= debug_register("zcrypt", 1, 1,
1247 DBF_MAX_SPRINTF_ARGS
* sizeof(long));
1248 debug_register_view(zcrypt_dbf_info
, &debug_sprintf_view
);
1249 debug_set_level(zcrypt_dbf_info
, DBF_ERR
);
1254 void zcrypt_debug_exit(void)
1256 debug_unregister(zcrypt_dbf_info
);
1260 * zcrypt_api_init(): Module initialization.
1262 * The module initialization code.
1264 int __init
zcrypt_api_init(void)
1268 rc
= zcrypt_debug_init();
1272 /* Register the request sprayer. */
1273 rc
= misc_register(&zcrypt_misc_device
);
1277 zcrypt_msgtype6_init();
1278 zcrypt_msgtype50_init();
1286 * zcrypt_api_exit(): Module termination.
1288 * The module termination code.
1290 void __exit
zcrypt_api_exit(void)
1292 misc_deregister(&zcrypt_misc_device
);
1293 zcrypt_msgtype6_exit();
1294 zcrypt_msgtype50_exit();
1295 zcrypt_debug_exit();
1298 module_init(zcrypt_api_init
);
1299 module_exit(zcrypt_api_exit
);