1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* Common header for Virtio crypto device.
4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
7 #ifndef _VIRTIO_CRYPTO_COMMON_H
8 #define _VIRTIO_CRYPTO_COMMON_H
10 #include <linux/virtio.h>
11 #include <linux/crypto.h>
12 #include <linux/spinlock.h>
13 #include <linux/interrupt.h>
14 #include <crypto/aead.h>
15 #include <crypto/aes.h>
16 #include <crypto/engine.h>
17 #include <uapi/linux/virtio_crypto.h>
20 /* Internal representation of a data virtqueue */
22 /* Virtqueue associated with this send _queue */
25 /* To protect the vq operations for the dataq */
28 /* Name of the tx queue: dataq.$index */
31 struct crypto_engine
*engine
;
32 struct tasklet_struct done_task
;
35 struct virtio_crypto
{
36 struct virtio_device
*vdev
;
37 struct virtqueue
*ctrl_vq
;
38 struct data_queue
*data_vq
;
40 /* Work struct for config space updates */
41 struct work_struct config_work
;
43 /* To protect the vq operations for the controlq */
46 /* Maximum of data queues supported by the device */
49 /* Number of queue currently used by the driver */
53 * Specifies the services mask which the device support,
54 * see VIRTIO_CRYPTO_SERVICE_*
58 /* Detailed algorithms mask */
67 /* Maximum length of cipher key */
68 u32 max_cipher_key_len
;
69 /* Maximum length of authenticated key */
71 /* Maximum size of per request */
76 struct list_head list
;
80 /* Does the affinity hint is set for virtqueues? */
81 bool affinity_hint_set
;
84 struct virtio_crypto_sym_session_info
{
85 /* Backend session id, which come from the host side */
90 * Note: there are padding fields in request, clear them to zero before
91 * sending to host to avoid to divulge any information.
92 * Ex, virtio_crypto_ctrl_request::ctrl::u::destroy_session::padding[48]
94 struct virtio_crypto_ctrl_request
{
95 struct virtio_crypto_op_ctrl_req ctrl
;
96 struct virtio_crypto_session_input input
;
97 struct virtio_crypto_inhdr ctrl_status
;
98 struct completion
compl;
101 struct virtio_crypto_request
;
102 typedef void (*virtio_crypto_data_callback
)
103 (struct virtio_crypto_request
*vc_req
, int len
);
105 struct virtio_crypto_request
{
107 struct virtio_crypto_op_data_req
*req_data
;
108 struct scatterlist
**sgs
;
109 struct data_queue
*dataq
;
110 virtio_crypto_data_callback alg_cb
;
113 int virtcrypto_devmgr_add_dev(struct virtio_crypto
*vcrypto_dev
);
114 struct list_head
*virtcrypto_devmgr_get_head(void);
115 void virtcrypto_devmgr_rm_dev(struct virtio_crypto
*vcrypto_dev
);
116 struct virtio_crypto
*virtcrypto_devmgr_get_first(void);
117 int virtcrypto_dev_in_use(struct virtio_crypto
*vcrypto_dev
);
118 int virtcrypto_dev_get(struct virtio_crypto
*vcrypto_dev
);
119 void virtcrypto_dev_put(struct virtio_crypto
*vcrypto_dev
);
120 int virtcrypto_dev_started(struct virtio_crypto
*vcrypto_dev
);
121 bool virtcrypto_algo_is_supported(struct virtio_crypto
*vcrypto_dev
,
124 struct virtio_crypto
*virtcrypto_get_dev_node(int node
,
127 int virtcrypto_dev_start(struct virtio_crypto
*vcrypto
);
128 void virtcrypto_dev_stop(struct virtio_crypto
*vcrypto
);
129 int virtio_crypto_skcipher_crypt_req(
130 struct crypto_engine
*engine
, void *vreq
);
133 virtcrypto_clear_request(struct virtio_crypto_request
*vc_req
);
135 static inline int virtio_crypto_get_current_node(void)
140 node
= topology_physical_package_id(cpu
);
146 int virtio_crypto_skcipher_algs_register(struct virtio_crypto
*vcrypto
);
147 void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto
*vcrypto
);
148 int virtio_crypto_akcipher_algs_register(struct virtio_crypto
*vcrypto
);
149 void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto
*vcrypto
);
150 int virtio_crypto_ctrl_vq_request(struct virtio_crypto
*vcrypto
, struct scatterlist
*sgs
[],
151 unsigned int out_sgs
, unsigned int in_sgs
,
152 struct virtio_crypto_ctrl_request
*vc_ctrl_req
);
154 #endif /* _VIRTIO_CRYPTO_COMMON_H */