1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Copyright IBM Corp. 2001, 2019
4 * Author(s): Robert Burroughs
5 * Eric Rossman (edrossma@us.ibm.com)
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
14 #ifndef _ZCRYPT_API_H_
15 #define _ZCRYPT_API_H_
17 #include <linux/atomic.h>
18 #include <asm/debug.h>
19 #include <asm/zcrypt.h>
23 * Supported device types
25 #define ZCRYPT_CEX2C 5
26 #define ZCRYPT_CEX2A 6
27 #define ZCRYPT_CEX3C 7
28 #define ZCRYPT_CEX3A 8
29 #define ZCRYPT_CEX4 10
30 #define ZCRYPT_CEX5 11
31 #define ZCRYPT_CEX6 12
32 #define ZCRYPT_CEX7 13
35 * Large random numbers are pulled in 4096 byte chunks from the crypto cards
36 * and stored in a page. Be careful when increasing this buffer due to size
37 * limitations for AP requests.
39 #define ZCRYPT_RNG_BUFFER_SIZE 4096
42 * The zcrypt_wait_api_operational() function waits this
43 * amount in milliseconds for ap_wait_aqpn_bindings_complete().
44 * Also on a cprb send failure with ENODEV the send functions
45 * trigger an ap bus rescan and wait this time in milliseconds
46 * for ap_wait_aqpn_bindings_complete() before resending.
48 #define ZCRYPT_WAIT_BINDINGS_COMPLETE_MS 30000
51 * Identifier for Crypto Request Performance Index
67 /* struct to hold tracking information for a userspace request/response */
69 int again_counter
; /* retry attempts counter */
70 int last_qid
; /* last qid used */
71 int last_rc
; /* last return code */
74 /* defines related to message tracking */
75 #define TRACK_AGAIN_MAX 10
76 #define TRACK_AGAIN_CARD_WEIGHT_PENALTY 1000
77 #define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000
80 long (*rsa_modexpo
)(struct zcrypt_queue
*, struct ica_rsa_modexpo
*,
82 long (*rsa_modexpo_crt
)(struct zcrypt_queue
*,
83 struct ica_rsa_modexpo_crt
*,
85 long (*send_cprb
)(bool userspace
, struct zcrypt_queue
*, struct ica_xcRB
*,
87 long (*send_ep11_cprb
)(bool userspace
, struct zcrypt_queue
*, struct ep11_urb
*,
89 long (*rng
)(struct zcrypt_queue
*, char *, struct ap_message
*);
90 struct list_head list
; /* zcrypt ops list. */
97 struct list_head list
; /* Device list. */
98 struct list_head zqueues
; /* List of zcrypt queues */
99 struct kref refcount
; /* device refcounting */
100 struct ap_card
*card
; /* The "real" ap card device. */
101 int online
; /* User online/offline */
103 int user_space_type
; /* User space device id. */
104 char *type_string
; /* User space device name. */
105 int min_mod_size
; /* Min number of bits. */
106 int max_mod_size
; /* Max number of bits. */
107 int max_exp_bit_length
;
108 const int *speed_rating
; /* Speed idx of crypto ops. */
109 atomic_t load
; /* Utilization of the crypto device */
111 int request_count
; /* # current requests. */
114 struct zcrypt_queue
{
115 struct list_head list
; /* Device list. */
116 struct kref refcount
; /* device refcounting */
117 struct zcrypt_card
*zcard
;
118 struct zcrypt_ops
*ops
; /* Crypto operations. */
119 struct ap_queue
*queue
; /* The "real" ap queue device. */
120 int online
; /* User online/offline */
122 atomic_t load
; /* Utilization of the crypto device */
124 int request_count
; /* # current requests. */
126 struct ap_message reply
; /* Per-device reply structure. */
129 /* transport layer rescanning */
130 extern atomic_t zcrypt_rescan_req
;
132 extern spinlock_t zcrypt_list_lock
;
133 extern struct list_head zcrypt_card_list
;
135 #define for_each_zcrypt_card(_zc) \
136 list_for_each_entry(_zc, &zcrypt_card_list, list)
138 #define for_each_zcrypt_queue(_zq, _zc) \
139 list_for_each_entry(_zq, &(_zc)->zqueues, list)
141 struct zcrypt_card
*zcrypt_card_alloc(void);
142 void zcrypt_card_free(struct zcrypt_card
*);
143 void zcrypt_card_get(struct zcrypt_card
*);
144 int zcrypt_card_put(struct zcrypt_card
*);
145 int zcrypt_card_register(struct zcrypt_card
*);
146 void zcrypt_card_unregister(struct zcrypt_card
*);
148 struct zcrypt_queue
*zcrypt_queue_alloc(size_t);
149 void zcrypt_queue_free(struct zcrypt_queue
*);
150 void zcrypt_queue_get(struct zcrypt_queue
*);
151 int zcrypt_queue_put(struct zcrypt_queue
*);
152 int zcrypt_queue_register(struct zcrypt_queue
*);
153 void zcrypt_queue_unregister(struct zcrypt_queue
*);
154 bool zcrypt_queue_force_online(struct zcrypt_queue
*zq
, int online
);
156 int zcrypt_rng_device_add(void);
157 void zcrypt_rng_device_remove(void);
159 void zcrypt_msgtype_register(struct zcrypt_ops
*);
160 void zcrypt_msgtype_unregister(struct zcrypt_ops
*);
161 struct zcrypt_ops
*zcrypt_msgtype(unsigned char *, int);
162 int zcrypt_api_init(void);
163 void zcrypt_api_exit(void);
164 long zcrypt_send_cprb(struct ica_xcRB
*xcRB
);
165 long zcrypt_send_ep11_cprb(struct ep11_urb
*urb
);
166 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext
*devstatus
);
167 int zcrypt_device_status_ext(int card
, int queue
,
168 struct zcrypt_device_status_ext
*devstatus
);
170 int zcrypt_wait_api_operational(void);
172 static inline unsigned long z_copy_from_user(bool userspace
,
174 const void __user
*from
,
177 if (likely(userspace
))
178 return copy_from_user(to
, from
, n
);
179 memcpy(to
, (void __force
*)from
, n
);
183 static inline unsigned long z_copy_to_user(bool userspace
,
188 if (likely(userspace
))
189 return copy_to_user(to
, from
, n
);
190 memcpy((void __force
*)to
, from
, n
);
194 #endif /* _ZCRYPT_API_H_ */