1 // SPDX-License-Identifier: GPL-2.0+
5 * Copyright IBM Corp. 2001, 2012
6 * Author(s): Robert Burroughs
7 * Eric Rossman (edrossma@us.ibm.com)
8 * Cornelia Huck <cornelia.huck@de.ibm.com>
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/miscdevice.h>
21 #include <linux/compat.h>
22 #include <linux/slab.h>
23 #include <linux/atomic.h>
24 #include <linux/uaccess.h>
25 #include <linux/hw_random.h>
26 #include <linux/debugfs.h>
27 #include <asm/debug.h>
29 #define CREATE_TRACE_POINTS
30 #include <asm/trace/zcrypt.h>
32 #include "zcrypt_api.h"
33 #include "zcrypt_debug.h"
35 #include "zcrypt_msgtype6.h"
36 #include "zcrypt_msgtype50.h"
41 MODULE_AUTHOR("IBM Corporation");
42 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
43 "Copyright IBM Corp. 2001, 2012");
44 MODULE_LICENSE("GPL");
47 * zcrypt tracepoint functions
49 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req
);
50 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep
);
52 static int zcrypt_hwrng_seed
= 1;
53 module_param_named(hwrng_seed
, zcrypt_hwrng_seed
, int, S_IRUSR
|S_IRGRP
);
54 MODULE_PARM_DESC(hwrng_seed
, "Turn on/off hwrng auto seed, default is 1 (on).");
56 DEFINE_SPINLOCK(zcrypt_list_lock
);
57 LIST_HEAD(zcrypt_card_list
);
58 int zcrypt_device_count
;
60 static atomic_t zcrypt_open_count
= ATOMIC_INIT(0);
61 static atomic_t zcrypt_rescan_count
= ATOMIC_INIT(0);
63 atomic_t zcrypt_rescan_req
= ATOMIC_INIT(0);
64 EXPORT_SYMBOL(zcrypt_rescan_req
);
66 static LIST_HEAD(zcrypt_ops_list
);
68 /* Zcrypt related debug feature stuff. */
69 debug_info_t
*zcrypt_dbf_info
;
72 * Process a rescan of the transport layer.
74 * Returns 1, if the rescan has been processed, otherwise 0.
76 static inline int zcrypt_process_rescan(void)
78 if (atomic_read(&zcrypt_rescan_req
)) {
79 atomic_set(&zcrypt_rescan_req
, 0);
80 atomic_inc(&zcrypt_rescan_count
);
81 ap_bus_force_rescan();
82 ZCRYPT_DBF(DBF_INFO
, "rescan count=%07d\n",
83 atomic_inc_return(&zcrypt_rescan_count
));
89 void zcrypt_msgtype_register(struct zcrypt_ops
*zops
)
91 list_add_tail(&zops
->list
, &zcrypt_ops_list
);
94 void zcrypt_msgtype_unregister(struct zcrypt_ops
*zops
)
96 list_del_init(&zops
->list
);
99 struct zcrypt_ops
*zcrypt_msgtype(unsigned char *name
, int variant
)
101 struct zcrypt_ops
*zops
;
103 list_for_each_entry(zops
, &zcrypt_ops_list
, list
)
104 if ((zops
->variant
== variant
) &&
105 (!strncmp(zops
->name
, name
, sizeof(zops
->name
))))
109 EXPORT_SYMBOL(zcrypt_msgtype
);
112 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
114 * This function is not supported beyond zcrypt 1.3.1.
116 static ssize_t
zcrypt_read(struct file
*filp
, char __user
*buf
,
117 size_t count
, loff_t
*f_pos
)
123 * zcrypt_write(): Not allowed.
125 * Write is is not allowed
127 static ssize_t
zcrypt_write(struct file
*filp
, const char __user
*buf
,
128 size_t count
, loff_t
*f_pos
)
134 * zcrypt_open(): Count number of users.
136 * Device open function to count number of users.
138 static int zcrypt_open(struct inode
*inode
, struct file
*filp
)
140 atomic_inc(&zcrypt_open_count
);
141 return nonseekable_open(inode
, filp
);
145 * zcrypt_release(): Count number of users.
147 * Device close function to count number of users.
149 static int zcrypt_release(struct inode
*inode
, struct file
*filp
)
151 atomic_dec(&zcrypt_open_count
);
155 static inline struct zcrypt_queue
*zcrypt_pick_queue(struct zcrypt_card
*zc
,
156 struct zcrypt_queue
*zq
,
159 if (!zq
|| !try_module_get(zq
->queue
->ap_dev
.drv
->driver
.owner
))
161 zcrypt_queue_get(zq
);
162 get_device(&zq
->queue
->ap_dev
.device
);
163 atomic_add(weight
, &zc
->load
);
164 atomic_add(weight
, &zq
->load
);
169 static inline void zcrypt_drop_queue(struct zcrypt_card
*zc
,
170 struct zcrypt_queue
*zq
,
173 struct module
*mod
= zq
->queue
->ap_dev
.drv
->driver
.owner
;
176 atomic_sub(weight
, &zc
->load
);
177 atomic_sub(weight
, &zq
->load
);
178 put_device(&zq
->queue
->ap_dev
.device
);
179 zcrypt_queue_put(zq
);
183 static inline bool zcrypt_card_compare(struct zcrypt_card
*zc
,
184 struct zcrypt_card
*pref_zc
,
185 unsigned weight
, unsigned pref_weight
)
189 weight
+= atomic_read(&zc
->load
);
190 pref_weight
+= atomic_read(&pref_zc
->load
);
191 if (weight
== pref_weight
)
192 return atomic_read(&zc
->card
->total_request_count
) >
193 atomic_read(&pref_zc
->card
->total_request_count
);
194 return weight
> pref_weight
;
197 static inline bool zcrypt_queue_compare(struct zcrypt_queue
*zq
,
198 struct zcrypt_queue
*pref_zq
,
199 unsigned weight
, unsigned pref_weight
)
203 weight
+= atomic_read(&zq
->load
);
204 pref_weight
+= atomic_read(&pref_zq
->load
);
205 if (weight
== pref_weight
)
206 return zq
->queue
->total_request_count
>
207 pref_zq
->queue
->total_request_count
;
208 return weight
> pref_weight
;
214 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo
*mex
)
216 struct zcrypt_card
*zc
, *pref_zc
;
217 struct zcrypt_queue
*zq
, *pref_zq
;
218 unsigned int weight
, pref_weight
;
219 unsigned int func_code
;
220 int qid
= 0, rc
= -ENODEV
;
222 trace_s390_zcrypt_req(mex
, TP_ICARSAMODEXPO
);
224 if (mex
->outputdatalength
< mex
->inputdatalength
) {
230 * As long as outputdatalength is big enough, we can set the
231 * outputdatalength equal to the inputdatalength, since that is the
232 * number of bytes we will copy in any case
234 mex
->outputdatalength
= mex
->inputdatalength
;
236 rc
= get_rsa_modex_fc(mex
, &func_code
);
242 spin_lock(&zcrypt_list_lock
);
243 for_each_zcrypt_card(zc
) {
244 /* Check for online accelarator and CCA cards */
245 if (!zc
->online
|| !(zc
->card
->functions
& 0x18000000))
247 /* Check for size limits */
248 if (zc
->min_mod_size
> mex
->inputdatalength
||
249 zc
->max_mod_size
< mex
->inputdatalength
)
251 /* get weight index of the card device */
252 weight
= zc
->speed_rating
[func_code
];
253 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
255 for_each_zcrypt_queue(zq
, zc
) {
256 /* check if device is online and eligible */
257 if (!zq
->online
|| !zq
->ops
->rsa_modexpo
)
259 if (zcrypt_queue_compare(zq
, pref_zq
,
260 weight
, pref_weight
))
264 pref_weight
= weight
;
267 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
268 spin_unlock(&zcrypt_list_lock
);
275 qid
= pref_zq
->queue
->qid
;
276 rc
= pref_zq
->ops
->rsa_modexpo(pref_zq
, mex
);
278 spin_lock(&zcrypt_list_lock
);
279 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
280 spin_unlock(&zcrypt_list_lock
);
283 trace_s390_zcrypt_rep(mex
, func_code
, rc
,
284 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
288 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt
*crt
)
290 struct zcrypt_card
*zc
, *pref_zc
;
291 struct zcrypt_queue
*zq
, *pref_zq
;
292 unsigned int weight
, pref_weight
;
293 unsigned int func_code
;
294 int qid
= 0, rc
= -ENODEV
;
296 trace_s390_zcrypt_req(crt
, TP_ICARSACRT
);
298 if (crt
->outputdatalength
< crt
->inputdatalength
) {
304 * As long as outputdatalength is big enough, we can set the
305 * outputdatalength equal to the inputdatalength, since that is the
306 * number of bytes we will copy in any case
308 crt
->outputdatalength
= crt
->inputdatalength
;
310 rc
= get_rsa_crt_fc(crt
, &func_code
);
316 spin_lock(&zcrypt_list_lock
);
317 for_each_zcrypt_card(zc
) {
318 /* Check for online accelarator and CCA cards */
319 if (!zc
->online
|| !(zc
->card
->functions
& 0x18000000))
321 /* Check for size limits */
322 if (zc
->min_mod_size
> crt
->inputdatalength
||
323 zc
->max_mod_size
< crt
->inputdatalength
)
325 /* get weight index of the card device */
326 weight
= zc
->speed_rating
[func_code
];
327 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
329 for_each_zcrypt_queue(zq
, zc
) {
330 /* check if device is online and eligible */
331 if (!zq
->online
|| !zq
->ops
->rsa_modexpo_crt
)
333 if (zcrypt_queue_compare(zq
, pref_zq
,
334 weight
, pref_weight
))
338 pref_weight
= weight
;
341 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
342 spin_unlock(&zcrypt_list_lock
);
349 qid
= pref_zq
->queue
->qid
;
350 rc
= pref_zq
->ops
->rsa_modexpo_crt(pref_zq
, crt
);
352 spin_lock(&zcrypt_list_lock
);
353 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
354 spin_unlock(&zcrypt_list_lock
);
357 trace_s390_zcrypt_rep(crt
, func_code
, rc
,
358 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
362 long zcrypt_send_cprb(struct ica_xcRB
*xcRB
)
364 struct zcrypt_card
*zc
, *pref_zc
;
365 struct zcrypt_queue
*zq
, *pref_zq
;
366 struct ap_message ap_msg
;
367 unsigned int weight
, pref_weight
;
368 unsigned int func_code
;
369 unsigned short *domain
;
370 int qid
= 0, rc
= -ENODEV
;
372 trace_s390_zcrypt_req(xcRB
, TB_ZSECSENDCPRB
);
374 ap_init_message(&ap_msg
);
375 rc
= get_cprb_fc(xcRB
, &ap_msg
, &func_code
, &domain
);
381 spin_lock(&zcrypt_list_lock
);
382 for_each_zcrypt_card(zc
) {
383 /* Check for online CCA cards */
384 if (!zc
->online
|| !(zc
->card
->functions
& 0x10000000))
386 /* Check for user selected CCA card */
387 if (xcRB
->user_defined
!= AUTOSELECT
&&
388 xcRB
->user_defined
!= zc
->card
->id
)
390 /* get weight index of the card device */
391 weight
= speed_idx_cca(func_code
) * zc
->speed_rating
[SECKEY
];
392 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
394 for_each_zcrypt_queue(zq
, zc
) {
395 /* check if device is online and eligible */
397 !zq
->ops
->send_cprb
||
398 ((*domain
!= (unsigned short) AUTOSELECT
) &&
399 (*domain
!= AP_QID_QUEUE(zq
->queue
->qid
))))
401 if (zcrypt_queue_compare(zq
, pref_zq
,
402 weight
, pref_weight
))
406 pref_weight
= weight
;
409 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
410 spin_unlock(&zcrypt_list_lock
);
417 /* in case of auto select, provide the correct domain */
418 qid
= pref_zq
->queue
->qid
;
419 if (*domain
== (unsigned short) AUTOSELECT
)
420 *domain
= AP_QID_QUEUE(qid
);
422 rc
= pref_zq
->ops
->send_cprb(pref_zq
, xcRB
, &ap_msg
);
424 spin_lock(&zcrypt_list_lock
);
425 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
426 spin_unlock(&zcrypt_list_lock
);
429 ap_release_message(&ap_msg
);
430 trace_s390_zcrypt_rep(xcRB
, func_code
, rc
,
431 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
434 EXPORT_SYMBOL(zcrypt_send_cprb
);
436 static bool is_desired_ep11_card(unsigned int dev_id
,
437 unsigned short target_num
,
438 struct ep11_target_dev
*targets
)
440 while (target_num
-- > 0) {
441 if (dev_id
== targets
->ap_id
)
448 static bool is_desired_ep11_queue(unsigned int dev_qid
,
449 unsigned short target_num
,
450 struct ep11_target_dev
*targets
)
452 while (target_num
-- > 0) {
453 if (AP_MKQID(targets
->ap_id
, targets
->dom_id
) == dev_qid
)
460 static long zcrypt_send_ep11_cprb(struct ep11_urb
*xcrb
)
462 struct zcrypt_card
*zc
, *pref_zc
;
463 struct zcrypt_queue
*zq
, *pref_zq
;
464 struct ep11_target_dev
*targets
;
465 unsigned short target_num
;
466 unsigned int weight
, pref_weight
;
467 unsigned int func_code
;
468 struct ap_message ap_msg
;
469 int qid
= 0, rc
= -ENODEV
;
471 trace_s390_zcrypt_req(xcrb
, TP_ZSENDEP11CPRB
);
473 ap_init_message(&ap_msg
);
475 target_num
= (unsigned short) xcrb
->targets_num
;
477 /* empty list indicates autoselect (all available targets) */
479 if (target_num
!= 0) {
480 struct ep11_target_dev __user
*uptr
;
482 targets
= kcalloc(target_num
, sizeof(*targets
), GFP_KERNEL
);
488 uptr
= (struct ep11_target_dev __force __user
*) xcrb
->targets
;
489 if (copy_from_user(targets
, uptr
,
490 target_num
* sizeof(*targets
))) {
496 rc
= get_ep11cprb_fc(xcrb
, &ap_msg
, &func_code
);
502 spin_lock(&zcrypt_list_lock
);
503 for_each_zcrypt_card(zc
) {
504 /* Check for online EP11 cards */
505 if (!zc
->online
|| !(zc
->card
->functions
& 0x04000000))
507 /* Check for user selected EP11 card */
509 !is_desired_ep11_card(zc
->card
->id
, target_num
, targets
))
511 /* get weight index of the card device */
512 weight
= speed_idx_ep11(func_code
) * zc
->speed_rating
[SECKEY
];
513 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
515 for_each_zcrypt_queue(zq
, zc
) {
516 /* check if device is online and eligible */
518 !zq
->ops
->send_ep11_cprb
||
520 !is_desired_ep11_queue(zq
->queue
->qid
,
521 target_num
, targets
)))
523 if (zcrypt_queue_compare(zq
, pref_zq
,
524 weight
, pref_weight
))
528 pref_weight
= weight
;
531 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
532 spin_unlock(&zcrypt_list_lock
);
539 qid
= pref_zq
->queue
->qid
;
540 rc
= pref_zq
->ops
->send_ep11_cprb(pref_zq
, xcrb
, &ap_msg
);
542 spin_lock(&zcrypt_list_lock
);
543 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
544 spin_unlock(&zcrypt_list_lock
);
549 ap_release_message(&ap_msg
);
550 trace_s390_zcrypt_rep(xcrb
, func_code
, rc
,
551 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
555 static long zcrypt_rng(char *buffer
)
557 struct zcrypt_card
*zc
, *pref_zc
;
558 struct zcrypt_queue
*zq
, *pref_zq
;
559 unsigned int weight
, pref_weight
;
560 unsigned int func_code
;
561 struct ap_message ap_msg
;
563 int qid
= 0, rc
= -ENODEV
;
565 trace_s390_zcrypt_req(buffer
, TP_HWRNGCPRB
);
567 ap_init_message(&ap_msg
);
568 rc
= get_rng_fc(&ap_msg
, &func_code
, &domain
);
574 spin_lock(&zcrypt_list_lock
);
575 for_each_zcrypt_card(zc
) {
576 /* Check for online CCA cards */
577 if (!zc
->online
|| !(zc
->card
->functions
& 0x10000000))
579 /* get weight index of the card device */
580 weight
= zc
->speed_rating
[func_code
];
581 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
583 for_each_zcrypt_queue(zq
, zc
) {
584 /* check if device is online and eligible */
585 if (!zq
->online
|| !zq
->ops
->rng
)
587 if (zcrypt_queue_compare(zq
, pref_zq
,
588 weight
, pref_weight
))
592 pref_weight
= weight
;
595 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
596 spin_unlock(&zcrypt_list_lock
);
603 qid
= pref_zq
->queue
->qid
;
604 rc
= pref_zq
->ops
->rng(pref_zq
, buffer
, &ap_msg
);
606 spin_lock(&zcrypt_list_lock
);
607 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
608 spin_unlock(&zcrypt_list_lock
);
611 ap_release_message(&ap_msg
);
612 trace_s390_zcrypt_rep(buffer
, func_code
, rc
,
613 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
617 static void zcrypt_device_status_mask(struct zcrypt_device_status
*devstatus
)
619 struct zcrypt_card
*zc
;
620 struct zcrypt_queue
*zq
;
621 struct zcrypt_device_status
*stat
;
624 memset(devstatus
, 0, MAX_ZDEV_ENTRIES
625 * sizeof(struct zcrypt_device_status
));
627 spin_lock(&zcrypt_list_lock
);
628 for_each_zcrypt_card(zc
) {
629 for_each_zcrypt_queue(zq
, zc
) {
630 card
= AP_QID_CARD(zq
->queue
->qid
);
631 if (card
>= MAX_ZDEV_CARDIDS
)
633 queue
= AP_QID_QUEUE(zq
->queue
->qid
);
634 stat
= &devstatus
[card
* AP_DOMAINS
+ queue
];
635 stat
->hwtype
= zc
->card
->ap_dev
.device_type
;
636 stat
->functions
= zc
->card
->functions
>> 26;
637 stat
->qid
= zq
->queue
->qid
;
638 stat
->online
= zq
->online
? 0x01 : 0x00;
641 spin_unlock(&zcrypt_list_lock
);
644 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext
*devstatus
)
646 struct zcrypt_card
*zc
;
647 struct zcrypt_queue
*zq
;
648 struct zcrypt_device_status_ext
*stat
;
651 memset(devstatus
, 0, MAX_ZDEV_ENTRIES_EXT
652 * sizeof(struct zcrypt_device_status_ext
));
654 spin_lock(&zcrypt_list_lock
);
655 for_each_zcrypt_card(zc
) {
656 for_each_zcrypt_queue(zq
, zc
) {
657 card
= AP_QID_CARD(zq
->queue
->qid
);
658 queue
= AP_QID_QUEUE(zq
->queue
->qid
);
659 stat
= &devstatus
[card
* AP_DOMAINS
+ queue
];
660 stat
->hwtype
= zc
->card
->ap_dev
.device_type
;
661 stat
->functions
= zc
->card
->functions
>> 26;
662 stat
->qid
= zq
->queue
->qid
;
663 stat
->online
= zq
->online
? 0x01 : 0x00;
666 spin_unlock(&zcrypt_list_lock
);
668 EXPORT_SYMBOL(zcrypt_device_status_mask_ext
);
670 static void zcrypt_status_mask(char status
[], size_t max_adapters
)
672 struct zcrypt_card
*zc
;
673 struct zcrypt_queue
*zq
;
676 memset(status
, 0, max_adapters
);
677 spin_lock(&zcrypt_list_lock
);
678 for_each_zcrypt_card(zc
) {
679 for_each_zcrypt_queue(zq
, zc
) {
680 card
= AP_QID_CARD(zq
->queue
->qid
);
681 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
682 || card
>= max_adapters
)
684 status
[card
] = zc
->online
? zc
->user_space_type
: 0x0d;
687 spin_unlock(&zcrypt_list_lock
);
690 static void zcrypt_qdepth_mask(char qdepth
[], size_t max_adapters
)
692 struct zcrypt_card
*zc
;
693 struct zcrypt_queue
*zq
;
696 memset(qdepth
, 0, max_adapters
);
697 spin_lock(&zcrypt_list_lock
);
699 for_each_zcrypt_card(zc
) {
700 for_each_zcrypt_queue(zq
, zc
) {
701 card
= AP_QID_CARD(zq
->queue
->qid
);
702 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
703 || card
>= max_adapters
)
705 spin_lock(&zq
->queue
->lock
);
707 zq
->queue
->pendingq_count
+
708 zq
->queue
->requestq_count
;
709 spin_unlock(&zq
->queue
->lock
);
713 spin_unlock(&zcrypt_list_lock
);
716 static void zcrypt_perdev_reqcnt(int reqcnt
[], size_t max_adapters
)
718 struct zcrypt_card
*zc
;
719 struct zcrypt_queue
*zq
;
722 memset(reqcnt
, 0, sizeof(int) * max_adapters
);
723 spin_lock(&zcrypt_list_lock
);
725 for_each_zcrypt_card(zc
) {
726 for_each_zcrypt_queue(zq
, zc
) {
727 card
= AP_QID_CARD(zq
->queue
->qid
);
728 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
729 || card
>= max_adapters
)
731 spin_lock(&zq
->queue
->lock
);
732 reqcnt
[card
] = zq
->queue
->total_request_count
;
733 spin_unlock(&zq
->queue
->lock
);
737 spin_unlock(&zcrypt_list_lock
);
740 static int zcrypt_pendingq_count(void)
742 struct zcrypt_card
*zc
;
743 struct zcrypt_queue
*zq
;
747 spin_lock(&zcrypt_list_lock
);
749 for_each_zcrypt_card(zc
) {
750 for_each_zcrypt_queue(zq
, zc
) {
751 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
)
753 spin_lock(&zq
->queue
->lock
);
754 pendingq_count
+= zq
->queue
->pendingq_count
;
755 spin_unlock(&zq
->queue
->lock
);
759 spin_unlock(&zcrypt_list_lock
);
760 return pendingq_count
;
763 static int zcrypt_requestq_count(void)
765 struct zcrypt_card
*zc
;
766 struct zcrypt_queue
*zq
;
770 spin_lock(&zcrypt_list_lock
);
772 for_each_zcrypt_card(zc
) {
773 for_each_zcrypt_queue(zq
, zc
) {
774 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
)
776 spin_lock(&zq
->queue
->lock
);
777 requestq_count
+= zq
->queue
->requestq_count
;
778 spin_unlock(&zq
->queue
->lock
);
782 spin_unlock(&zcrypt_list_lock
);
783 return requestq_count
;
786 static long zcrypt_unlocked_ioctl(struct file
*filp
, unsigned int cmd
,
792 case ICARSAMODEXPO
: {
793 struct ica_rsa_modexpo __user
*umex
= (void __user
*) arg
;
794 struct ica_rsa_modexpo mex
;
795 if (copy_from_user(&mex
, umex
, sizeof(mex
)))
798 rc
= zcrypt_rsa_modexpo(&mex
);
799 } while (rc
== -EAGAIN
);
800 /* on failure: retry once again after a requested rescan */
801 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
803 rc
= zcrypt_rsa_modexpo(&mex
);
804 } while (rc
== -EAGAIN
);
806 ZCRYPT_DBF(DBF_DEBUG
, "ioctl ICARSAMODEXPO rc=%d\n", rc
);
809 return put_user(mex
.outputdatalength
, &umex
->outputdatalength
);
812 struct ica_rsa_modexpo_crt __user
*ucrt
= (void __user
*) arg
;
813 struct ica_rsa_modexpo_crt crt
;
814 if (copy_from_user(&crt
, ucrt
, sizeof(crt
)))
817 rc
= zcrypt_rsa_crt(&crt
);
818 } while (rc
== -EAGAIN
);
819 /* on failure: retry once again after a requested rescan */
820 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
822 rc
= zcrypt_rsa_crt(&crt
);
823 } while (rc
== -EAGAIN
);
825 ZCRYPT_DBF(DBF_DEBUG
, "ioctl ICARSACRT rc=%d\n", rc
);
828 return put_user(crt
.outputdatalength
, &ucrt
->outputdatalength
);
831 struct ica_xcRB __user
*uxcRB
= (void __user
*) arg
;
832 struct ica_xcRB xcRB
;
833 if (copy_from_user(&xcRB
, uxcRB
, sizeof(xcRB
)))
836 rc
= zcrypt_send_cprb(&xcRB
);
837 } while (rc
== -EAGAIN
);
838 /* on failure: retry once again after a requested rescan */
839 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
841 rc
= zcrypt_send_cprb(&xcRB
);
842 } while (rc
== -EAGAIN
);
844 ZCRYPT_DBF(DBF_DEBUG
, "ioctl ZSENDCPRB rc=%d\n", rc
);
845 if (copy_to_user(uxcRB
, &xcRB
, sizeof(xcRB
)))
849 case ZSENDEP11CPRB
: {
850 struct ep11_urb __user
*uxcrb
= (void __user
*)arg
;
851 struct ep11_urb xcrb
;
852 if (copy_from_user(&xcrb
, uxcrb
, sizeof(xcrb
)))
855 rc
= zcrypt_send_ep11_cprb(&xcrb
);
856 } while (rc
== -EAGAIN
);
857 /* on failure: retry once again after a requested rescan */
858 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
860 rc
= zcrypt_send_ep11_cprb(&xcrb
);
861 } while (rc
== -EAGAIN
);
863 ZCRYPT_DBF(DBF_DEBUG
, "ioctl ZSENDEP11CPRB rc=%d\n", rc
);
864 if (copy_to_user(uxcrb
, &xcrb
, sizeof(xcrb
)))
868 case ZCRYPT_DEVICE_STATUS
: {
869 struct zcrypt_device_status_ext
*device_status
;
870 size_t total_size
= MAX_ZDEV_ENTRIES_EXT
871 * sizeof(struct zcrypt_device_status_ext
);
873 device_status
= kzalloc(total_size
, GFP_KERNEL
);
876 zcrypt_device_status_mask_ext(device_status
);
877 if (copy_to_user((char __user
*) arg
, device_status
,
880 kfree(device_status
);
883 case ZCRYPT_STATUS_MASK
: {
884 char status
[AP_DEVICES
];
886 zcrypt_status_mask(status
, AP_DEVICES
);
887 if (copy_to_user((char __user
*) arg
, status
, sizeof(status
)))
891 case ZCRYPT_QDEPTH_MASK
: {
892 char qdepth
[AP_DEVICES
];
894 zcrypt_qdepth_mask(qdepth
, AP_DEVICES
);
895 if (copy_to_user((char __user
*) arg
, qdepth
, sizeof(qdepth
)))
899 case ZCRYPT_PERDEV_REQCNT
: {
902 reqcnt
= kcalloc(AP_DEVICES
, sizeof(int), GFP_KERNEL
);
905 zcrypt_perdev_reqcnt(reqcnt
, AP_DEVICES
);
906 if (copy_to_user((int __user
*) arg
, reqcnt
, sizeof(reqcnt
)))
911 case Z90STAT_REQUESTQ_COUNT
:
912 return put_user(zcrypt_requestq_count(), (int __user
*) arg
);
913 case Z90STAT_PENDINGQ_COUNT
:
914 return put_user(zcrypt_pendingq_count(), (int __user
*) arg
);
915 case Z90STAT_TOTALOPEN_COUNT
:
916 return put_user(atomic_read(&zcrypt_open_count
),
918 case Z90STAT_DOMAIN_INDEX
:
919 return put_user(ap_domain_index
, (int __user
*) arg
);
923 case ZDEVICESTATUS
: {
924 /* the old ioctl supports only 64 adapters */
925 struct zcrypt_device_status
*device_status
;
926 size_t total_size
= MAX_ZDEV_ENTRIES
927 * sizeof(struct zcrypt_device_status
);
929 device_status
= kzalloc(total_size
, GFP_KERNEL
);
932 zcrypt_device_status_mask(device_status
);
933 if (copy_to_user((char __user
*) arg
, device_status
,
936 kfree(device_status
);
939 case Z90STAT_STATUS_MASK
: {
940 /* the old ioctl supports only 64 adapters */
941 char status
[MAX_ZDEV_CARDIDS
];
943 zcrypt_status_mask(status
, MAX_ZDEV_CARDIDS
);
944 if (copy_to_user((char __user
*) arg
, status
, sizeof(status
)))
948 case Z90STAT_QDEPTH_MASK
: {
949 /* the old ioctl supports only 64 adapters */
950 char qdepth
[MAX_ZDEV_CARDIDS
];
952 zcrypt_qdepth_mask(qdepth
, MAX_ZDEV_CARDIDS
);
953 if (copy_to_user((char __user
*) arg
, qdepth
, sizeof(qdepth
)))
957 case Z90STAT_PERDEV_REQCNT
: {
958 /* the old ioctl supports only 64 adapters */
959 int reqcnt
[MAX_ZDEV_CARDIDS
];
961 zcrypt_perdev_reqcnt(reqcnt
, MAX_ZDEV_CARDIDS
);
962 if (copy_to_user((int __user
*) arg
, reqcnt
, sizeof(reqcnt
)))
966 /* unknown ioctl number */
968 ZCRYPT_DBF(DBF_DEBUG
, "unknown ioctl 0x%08x\n", cmd
);
975 * ioctl32 conversion routines
977 struct compat_ica_rsa_modexpo
{
978 compat_uptr_t inputdata
;
979 unsigned int inputdatalength
;
980 compat_uptr_t outputdata
;
981 unsigned int outputdatalength
;
983 compat_uptr_t n_modulus
;
986 static long trans_modexpo32(struct file
*filp
, unsigned int cmd
,
989 struct compat_ica_rsa_modexpo __user
*umex32
= compat_ptr(arg
);
990 struct compat_ica_rsa_modexpo mex32
;
991 struct ica_rsa_modexpo mex64
;
994 if (copy_from_user(&mex32
, umex32
, sizeof(mex32
)))
996 mex64
.inputdata
= compat_ptr(mex32
.inputdata
);
997 mex64
.inputdatalength
= mex32
.inputdatalength
;
998 mex64
.outputdata
= compat_ptr(mex32
.outputdata
);
999 mex64
.outputdatalength
= mex32
.outputdatalength
;
1000 mex64
.b_key
= compat_ptr(mex32
.b_key
);
1001 mex64
.n_modulus
= compat_ptr(mex32
.n_modulus
);
1003 rc
= zcrypt_rsa_modexpo(&mex64
);
1004 } while (rc
== -EAGAIN
);
1005 /* on failure: retry once again after a requested rescan */
1006 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
1008 rc
= zcrypt_rsa_modexpo(&mex64
);
1009 } while (rc
== -EAGAIN
);
1012 return put_user(mex64
.outputdatalength
,
1013 &umex32
->outputdatalength
);
1016 struct compat_ica_rsa_modexpo_crt
{
1017 compat_uptr_t inputdata
;
1018 unsigned int inputdatalength
;
1019 compat_uptr_t outputdata
;
1020 unsigned int outputdatalength
;
1021 compat_uptr_t bp_key
;
1022 compat_uptr_t bq_key
;
1023 compat_uptr_t np_prime
;
1024 compat_uptr_t nq_prime
;
1025 compat_uptr_t u_mult_inv
;
1028 static long trans_modexpo_crt32(struct file
*filp
, unsigned int cmd
,
1031 struct compat_ica_rsa_modexpo_crt __user
*ucrt32
= compat_ptr(arg
);
1032 struct compat_ica_rsa_modexpo_crt crt32
;
1033 struct ica_rsa_modexpo_crt crt64
;
1036 if (copy_from_user(&crt32
, ucrt32
, sizeof(crt32
)))
1038 crt64
.inputdata
= compat_ptr(crt32
.inputdata
);
1039 crt64
.inputdatalength
= crt32
.inputdatalength
;
1040 crt64
.outputdata
= compat_ptr(crt32
.outputdata
);
1041 crt64
.outputdatalength
= crt32
.outputdatalength
;
1042 crt64
.bp_key
= compat_ptr(crt32
.bp_key
);
1043 crt64
.bq_key
= compat_ptr(crt32
.bq_key
);
1044 crt64
.np_prime
= compat_ptr(crt32
.np_prime
);
1045 crt64
.nq_prime
= compat_ptr(crt32
.nq_prime
);
1046 crt64
.u_mult_inv
= compat_ptr(crt32
.u_mult_inv
);
1048 rc
= zcrypt_rsa_crt(&crt64
);
1049 } while (rc
== -EAGAIN
);
1050 /* on failure: retry once again after a requested rescan */
1051 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
1053 rc
= zcrypt_rsa_crt(&crt64
);
1054 } while (rc
== -EAGAIN
);
1057 return put_user(crt64
.outputdatalength
,
1058 &ucrt32
->outputdatalength
);
1061 struct compat_ica_xcRB
{
1062 unsigned short agent_ID
;
1063 unsigned int user_defined
;
1064 unsigned short request_ID
;
1065 unsigned int request_control_blk_length
;
1066 unsigned char padding1
[16 - sizeof (compat_uptr_t
)];
1067 compat_uptr_t request_control_blk_addr
;
1068 unsigned int request_data_length
;
1069 char padding2
[16 - sizeof (compat_uptr_t
)];
1070 compat_uptr_t request_data_address
;
1071 unsigned int reply_control_blk_length
;
1072 char padding3
[16 - sizeof (compat_uptr_t
)];
1073 compat_uptr_t reply_control_blk_addr
;
1074 unsigned int reply_data_length
;
1075 char padding4
[16 - sizeof (compat_uptr_t
)];
1076 compat_uptr_t reply_data_addr
;
1077 unsigned short priority_window
;
1078 unsigned int status
;
1079 } __attribute__((packed
));
1081 static long trans_xcRB32(struct file
*filp
, unsigned int cmd
,
1084 struct compat_ica_xcRB __user
*uxcRB32
= compat_ptr(arg
);
1085 struct compat_ica_xcRB xcRB32
;
1086 struct ica_xcRB xcRB64
;
1089 if (copy_from_user(&xcRB32
, uxcRB32
, sizeof(xcRB32
)))
1091 xcRB64
.agent_ID
= xcRB32
.agent_ID
;
1092 xcRB64
.user_defined
= xcRB32
.user_defined
;
1093 xcRB64
.request_ID
= xcRB32
.request_ID
;
1094 xcRB64
.request_control_blk_length
=
1095 xcRB32
.request_control_blk_length
;
1096 xcRB64
.request_control_blk_addr
=
1097 compat_ptr(xcRB32
.request_control_blk_addr
);
1098 xcRB64
.request_data_length
=
1099 xcRB32
.request_data_length
;
1100 xcRB64
.request_data_address
=
1101 compat_ptr(xcRB32
.request_data_address
);
1102 xcRB64
.reply_control_blk_length
=
1103 xcRB32
.reply_control_blk_length
;
1104 xcRB64
.reply_control_blk_addr
=
1105 compat_ptr(xcRB32
.reply_control_blk_addr
);
1106 xcRB64
.reply_data_length
= xcRB32
.reply_data_length
;
1107 xcRB64
.reply_data_addr
=
1108 compat_ptr(xcRB32
.reply_data_addr
);
1109 xcRB64
.priority_window
= xcRB32
.priority_window
;
1110 xcRB64
.status
= xcRB32
.status
;
1112 rc
= zcrypt_send_cprb(&xcRB64
);
1113 } while (rc
== -EAGAIN
);
1114 /* on failure: retry once again after a requested rescan */
1115 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
1117 rc
= zcrypt_send_cprb(&xcRB64
);
1118 } while (rc
== -EAGAIN
);
1119 xcRB32
.reply_control_blk_length
= xcRB64
.reply_control_blk_length
;
1120 xcRB32
.reply_data_length
= xcRB64
.reply_data_length
;
1121 xcRB32
.status
= xcRB64
.status
;
1122 if (copy_to_user(uxcRB32
, &xcRB32
, sizeof(xcRB32
)))
1127 static long zcrypt_compat_ioctl(struct file
*filp
, unsigned int cmd
,
1130 if (cmd
== ICARSAMODEXPO
)
1131 return trans_modexpo32(filp
, cmd
, arg
);
1132 if (cmd
== ICARSACRT
)
1133 return trans_modexpo_crt32(filp
, cmd
, arg
);
1134 if (cmd
== ZSECSENDCPRB
)
1135 return trans_xcRB32(filp
, cmd
, arg
);
1136 return zcrypt_unlocked_ioctl(filp
, cmd
, arg
);
1141 * Misc device file operations.
1143 static const struct file_operations zcrypt_fops
= {
1144 .owner
= THIS_MODULE
,
1145 .read
= zcrypt_read
,
1146 .write
= zcrypt_write
,
1147 .unlocked_ioctl
= zcrypt_unlocked_ioctl
,
1148 #ifdef CONFIG_COMPAT
1149 .compat_ioctl
= zcrypt_compat_ioctl
,
1151 .open
= zcrypt_open
,
1152 .release
= zcrypt_release
,
1153 .llseek
= no_llseek
,
1159 static struct miscdevice zcrypt_misc_device
= {
1160 .minor
= MISC_DYNAMIC_MINOR
,
1162 .fops
= &zcrypt_fops
,
1165 static int zcrypt_rng_device_count
;
1166 static u32
*zcrypt_rng_buffer
;
1167 static int zcrypt_rng_buffer_index
;
1168 static DEFINE_MUTEX(zcrypt_rng_mutex
);
1170 static int zcrypt_rng_data_read(struct hwrng
*rng
, u32
*data
)
1175 * We don't need locking here because the RNG API guarantees serialized
1176 * read method calls.
1178 if (zcrypt_rng_buffer_index
== 0) {
1179 rc
= zcrypt_rng((char *) zcrypt_rng_buffer
);
1180 /* on failure: retry once again after a requested rescan */
1181 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
1182 rc
= zcrypt_rng((char *) zcrypt_rng_buffer
);
1185 zcrypt_rng_buffer_index
= rc
/ sizeof *data
;
1187 *data
= zcrypt_rng_buffer
[--zcrypt_rng_buffer_index
];
1188 return sizeof *data
;
1191 static struct hwrng zcrypt_rng_dev
= {
1193 .data_read
= zcrypt_rng_data_read
,
1197 int zcrypt_rng_device_add(void)
1201 mutex_lock(&zcrypt_rng_mutex
);
1202 if (zcrypt_rng_device_count
== 0) {
1203 zcrypt_rng_buffer
= (u32
*) get_zeroed_page(GFP_KERNEL
);
1204 if (!zcrypt_rng_buffer
) {
1208 zcrypt_rng_buffer_index
= 0;
1209 if (!zcrypt_hwrng_seed
)
1210 zcrypt_rng_dev
.quality
= 0;
1211 rc
= hwrng_register(&zcrypt_rng_dev
);
1214 zcrypt_rng_device_count
= 1;
1216 zcrypt_rng_device_count
++;
1217 mutex_unlock(&zcrypt_rng_mutex
);
1221 free_page((unsigned long) zcrypt_rng_buffer
);
1223 mutex_unlock(&zcrypt_rng_mutex
);
1227 void zcrypt_rng_device_remove(void)
1229 mutex_lock(&zcrypt_rng_mutex
);
1230 zcrypt_rng_device_count
--;
1231 if (zcrypt_rng_device_count
== 0) {
1232 hwrng_unregister(&zcrypt_rng_dev
);
1233 free_page((unsigned long) zcrypt_rng_buffer
);
1235 mutex_unlock(&zcrypt_rng_mutex
);
1238 int __init
zcrypt_debug_init(void)
1240 zcrypt_dbf_info
= debug_register("zcrypt", 1, 1,
1241 DBF_MAX_SPRINTF_ARGS
* sizeof(long));
1242 debug_register_view(zcrypt_dbf_info
, &debug_sprintf_view
);
1243 debug_set_level(zcrypt_dbf_info
, DBF_ERR
);
1248 void zcrypt_debug_exit(void)
1250 debug_unregister(zcrypt_dbf_info
);
1254 * zcrypt_api_init(): Module initialization.
1256 * The module initialization code.
1258 int __init
zcrypt_api_init(void)
1262 rc
= zcrypt_debug_init();
1266 /* Register the request sprayer. */
1267 rc
= misc_register(&zcrypt_misc_device
);
1271 zcrypt_msgtype6_init();
1272 zcrypt_msgtype50_init();
1280 * zcrypt_api_exit(): Module termination.
1282 * The module termination code.
1284 void __exit
zcrypt_api_exit(void)
1286 misc_deregister(&zcrypt_misc_device
);
1287 zcrypt_msgtype6_exit();
1288 zcrypt_msgtype50_exit();
1289 zcrypt_debug_exit();
1292 module_init(zcrypt_api_init
);
1293 module_exit(zcrypt_api_exit
);