1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
7 #include <crypto/algapi.h>
8 #include <linux/kfifo.h>
10 #define SEC_MAX_SGE_NUM 64
11 #define SEC_HW_RING_NUM 3
13 #define SEC_CMD_RING 0
14 #define SEC_OUTORDER_RING 1
15 #define SEC_DBG_RING 2
17 /* A reasonable length to balance memory use against flexibility */
18 #define SEC_QUEUE_LEN 512
20 #define SEC_MAX_SGE_NUM 64
23 #define SEC_BD_W0_T_LEN_M GENMASK(4, 0)
24 #define SEC_BD_W0_T_LEN_S 0
26 #define SEC_BD_W0_C_WIDTH_M GENMASK(6, 5)
27 #define SEC_BD_W0_C_WIDTH_S 5
28 #define SEC_C_WIDTH_AES_128BIT 0
29 #define SEC_C_WIDTH_AES_8BIT 1
30 #define SEC_C_WIDTH_AES_1BIT 2
31 #define SEC_C_WIDTH_DES_64BIT 0
32 #define SEC_C_WIDTH_DES_8BIT 1
33 #define SEC_C_WIDTH_DES_1BIT 2
35 #define SEC_BD_W0_C_MODE_M GENMASK(9, 7)
36 #define SEC_BD_W0_C_MODE_S 7
37 #define SEC_C_MODE_ECB 0
38 #define SEC_C_MODE_CBC 1
39 #define SEC_C_MODE_CTR 4
40 #define SEC_C_MODE_CCM 5
41 #define SEC_C_MODE_GCM 6
42 #define SEC_C_MODE_XTS 7
44 #define SEC_BD_W0_SEQ BIT(10)
45 #define SEC_BD_W0_DE BIT(11)
46 #define SEC_BD_W0_DAT_SKIP_M GENMASK(13, 12)
47 #define SEC_BD_W0_DAT_SKIP_S 12
48 #define SEC_BD_W0_C_GRAN_SIZE_19_16_M GENMASK(17, 14)
49 #define SEC_BD_W0_C_GRAN_SIZE_19_16_S 14
51 #define SEC_BD_W0_CIPHER_M GENMASK(19, 18)
52 #define SEC_BD_W0_CIPHER_S 18
53 #define SEC_CIPHER_NULL 0
54 #define SEC_CIPHER_ENCRYPT 1
55 #define SEC_CIPHER_DECRYPT 2
57 #define SEC_BD_W0_AUTH_M GENMASK(21, 20)
58 #define SEC_BD_W0_AUTH_S 20
59 #define SEC_AUTH_NULL 0
60 #define SEC_AUTH_MAC 1
61 #define SEC_AUTH_VERIF 2
63 #define SEC_BD_W0_AI_GEN BIT(22)
64 #define SEC_BD_W0_CI_GEN BIT(23)
65 #define SEC_BD_W0_NO_HPAD BIT(24)
66 #define SEC_BD_W0_HM_M GENMASK(26, 25)
67 #define SEC_BD_W0_HM_S 25
68 #define SEC_BD_W0_ICV_OR_SKEY_EN_M GENMASK(28, 27)
69 #define SEC_BD_W0_ICV_OR_SKEY_EN_S 27
71 /* Multi purpose field - gran size bits for send, flag for recv */
72 #define SEC_BD_W0_FLAG_M GENMASK(30, 29)
73 #define SEC_BD_W0_C_GRAN_SIZE_21_20_M GENMASK(30, 29)
74 #define SEC_BD_W0_FLAG_S 29
75 #define SEC_BD_W0_C_GRAN_SIZE_21_20_S 29
77 #define SEC_BD_W0_DONE BIT(31)
80 #define SEC_BD_W1_AUTH_GRAN_SIZE_M GENMASK(21, 0)
81 #define SEC_BD_W1_AUTH_GRAN_SIZE_S 0
82 #define SEC_BD_W1_M_KEY_EN BIT(22)
83 #define SEC_BD_W1_BD_INVALID BIT(23)
84 #define SEC_BD_W1_ADDR_TYPE BIT(24)
86 #define SEC_BD_W1_A_ALG_M GENMASK(28, 25)
87 #define SEC_BD_W1_A_ALG_S 25
88 #define SEC_A_ALG_SHA1 0
89 #define SEC_A_ALG_SHA256 1
90 #define SEC_A_ALG_MD5 2
91 #define SEC_A_ALG_SHA224 3
92 #define SEC_A_ALG_HMAC_SHA1 8
93 #define SEC_A_ALG_HMAC_SHA224 10
94 #define SEC_A_ALG_HMAC_SHA256 11
95 #define SEC_A_ALG_HMAC_MD5 12
96 #define SEC_A_ALG_AES_XCBC 13
97 #define SEC_A_ALG_AES_CMAC 14
99 #define SEC_BD_W1_C_ALG_M GENMASK(31, 29)
100 #define SEC_BD_W1_C_ALG_S 29
101 #define SEC_C_ALG_DES 0
102 #define SEC_C_ALG_3DES 1
103 #define SEC_C_ALG_AES 2
107 #define SEC_BD_W2_C_GRAN_SIZE_15_0_M GENMASK(15, 0)
108 #define SEC_BD_W2_C_GRAN_SIZE_15_0_S 0
109 #define SEC_BD_W2_GRAN_NUM_M GENMASK(31, 16)
110 #define SEC_BD_W2_GRAN_NUM_S 16
113 #define SEC_BD_W3_AUTH_LEN_OFFSET_M GENMASK(9, 0)
114 #define SEC_BD_W3_AUTH_LEN_OFFSET_S 0
115 #define SEC_BD_W3_CIPHER_LEN_OFFSET_M GENMASK(19, 10)
116 #define SEC_BD_W3_CIPHER_LEN_OFFSET_S 10
117 #define SEC_BD_W3_MAC_LEN_M GENMASK(24, 20)
118 #define SEC_BD_W3_MAC_LEN_S 20
119 #define SEC_BD_W3_A_KEY_LEN_M GENMASK(29, 25)
120 #define SEC_BD_W3_A_KEY_LEN_S 25
121 #define SEC_BD_W3_C_KEY_LEN_M GENMASK(31, 30)
122 #define SEC_BD_W3_C_KEY_LEN_S 30
123 #define SEC_KEY_LEN_AES_128 0
124 #define SEC_KEY_LEN_AES_192 1
125 #define SEC_KEY_LEN_AES_256 2
126 #define SEC_KEY_LEN_DES 1
127 #define SEC_KEY_LEN_3DES_3_KEY 1
128 #define SEC_KEY_LEN_3DES_2_KEY 3
142 u32 cipher_key_addr_lo
;
143 u32 cipher_key_addr_hi
;
146 u32 cipher_iv_addr_lo
;
147 u32 cipher_iv_addr_hi
;
158 u32 cipher_destin_addr_lo
;
159 u32 cipher_destin_addr_hi
;
162 enum sec_mem_region
{
168 #define SEC_NAME_SIZE 64
173 * struct sec_queue_ring_cmd - store information about a SEC HW cmd ring
174 * @used: Local counter used to cheaply establish if the ring is empty.
175 * @lock: Protect against simultaneous adjusting of the read and write pointers.
176 * @vaddr: Virtual address for the ram pages used for the ring.
177 * @paddr: Physical address of the dma mapped region of ram used for the ring.
178 * @callback: Callback function called on a ring element completing.
180 struct sec_queue_ring_cmd
{
183 struct sec_bd_info
*vaddr
;
185 void (*callback
)(struct sec_bd_info
*resp
, void *ctx
);
188 struct sec_debug_bd_info
;
189 struct sec_queue_ring_db
{
190 struct sec_debug_bd_info
*vaddr
;
194 struct sec_out_bd_info
;
195 struct sec_queue_ring_cq
{
196 struct sec_out_bd_info
*vaddr
;
202 enum sec_cipher_alg
{
206 SEC_C_3DES_ECB_192_3KEY
,
207 SEC_C_3DES_ECB_192_2KEY
,
209 SEC_C_3DES_CBC_192_3KEY
,
210 SEC_C_3DES_CBC_192_2KEY
,
231 * struct sec_alg_tfm_ctx - hardware specific tranformation context
232 * @cipher_alg: Cipher algorithm enabled include encryption mode.
233 * @key: Key storage if required.
234 * @pkey: DMA address for the key storage.
235 * @req_template: Request template to save time on setup.
236 * @queue: The hardware queue associated with this tfm context.
237 * @lock: Protect key and pkey to ensure they are consistent
238 * @auth_buf: Current context buffer for auth operations.
239 * @backlog: The backlog queue used for cases where our buffers aren't
242 struct sec_alg_tfm_ctx
{
243 enum sec_cipher_alg cipher_alg
;
246 struct sec_bd_info req_template
;
247 struct sec_queue
*queue
;
250 struct list_head backlog
;
254 * struct sec_request - data associate with a single crypto request
255 * @elements: List of subparts of this request (hardware size restriction)
256 * @num_elements: The number of subparts (used as an optimization)
257 * @lock: Protect elements of this structure against concurrent change.
258 * @tfm_ctx: hardware specific context.
259 * @len_in: length of in sgl from upper layers
260 * @len_out: length of out sgl from upper layers
261 * @dma_iv: initialization vector - phsyical address
262 * @err: store used to track errors across subelements of this request.
263 * @req_base: pointer to base element of associate crypto context.
264 * This is needed to allow shared handling skcipher, ahash etc.
265 * @cb: completion callback.
266 * @backlog_head: list head to allow backlog maintenance.
268 * The hardware is limited in the maximum size of data that it can
269 * process from a single BD. Typically this is fairly large (32MB)
270 * but still requires the complexity of splitting the incoming
271 * skreq up into a number of elements complete with appropriate
275 struct list_head elements
;
278 struct sec_alg_tfm_ctx
*tfm_ctx
;
283 struct crypto_async_request
*req_base
;
284 void (*cb
)(struct sec_bd_info
*resp
, struct crypto_async_request
*req
);
285 struct list_head backlog_head
;
289 * struct sec_request_el - A subpart of a request.
290 * @head: allow us to attach this to the list in the sec_request
291 * @req: hardware block descriptor corresponding to this request subpart
292 * @in: hardware sgl for input - virtual address
293 * @dma_in: hardware sgl for input - physical address
294 * @sgl_in: scatterlist for this request subpart
295 * @out: hardware sgl for output - virtual address
296 * @dma_out: hardware sgl for output - physical address
297 * @sgl_out: scatterlist for this request subpart
298 * @sec_req: The request which this subpart forms a part of
299 * @el_length: Number of bytes in this subpart. Needed to locate
300 * last ivsize chunk for iv chaining.
302 struct sec_request_el
{
303 struct list_head head
;
304 struct sec_bd_info req
;
305 struct sec_hw_sgl
*in
;
307 struct scatterlist
*sgl_in
;
308 struct sec_hw_sgl
*out
;
310 struct scatterlist
*sgl_out
;
311 struct sec_request
*sec_req
;
316 * struct sec_queue - All the information about a HW queue
317 * @dev_info: The parent SEC device to which this queue belongs.
318 * @task_irq: Completion interrupt for the queue.
319 * @name: Human readable queue description also used as irq name.
320 * @ring: The several HW rings associated with one queue.
321 * @regs: The iomapped device registers
322 * @queue_id: Index of the queue used for naming and resource selection.
323 * @in_use: Flag to say if the queue is in use.
324 * @expected: The next expected element to finish assuming we were in order.
325 * @uprocessed: A bitmap to track which OoO elements are done but not handled.
326 * @softqueue: A software queue used when chaining requirements prevent direct
327 * use of the hardware queues.
328 * @havesoftqueue: A flag to say we have a queues - as we may need one for the
330 * @queuelock: Protect the soft queue from concurrent changes to avoid some
331 * potential loss of data races.
332 * @shadow: Pointers back to the shadow copy of the hardware ring element
333 * need because we can't store any context reference in the bd element.
336 struct sec_dev_info
*dev_info
;
338 char name
[SEC_NAME_SIZE
];
339 struct sec_queue_ring_cmd ring_cmd
;
340 struct sec_queue_ring_cq ring_cq
;
341 struct sec_queue_ring_db ring_db
;
347 DECLARE_BITMAP(unprocessed
, SEC_QUEUE_LEN
);
348 DECLARE_KFIFO_PTR(softqueue
, typeof(struct sec_request_el
*));
350 struct mutex queuelock
;
351 void *shadow
[SEC_QUEUE_LEN
];
355 * struct sec_hw_sge: Track each of the 64 element SEC HW SGL entries
356 * @buf: The IOV dma address for this entry.
357 * @len: Length of this IOV.
358 * @pad: Reserved space.
367 * struct sec_hw_sgl: One hardware SGL entry.
368 * @next_sgl: The next entry if we need to chain dma address. Null if last.
369 * @entry_sum_in_chain: The full count of SGEs - only matters for first SGL.
370 * @entry_sum_in_sgl: The number of SGEs in this SGL element.
371 * @flag: Unused in skciphers.
372 * @serial_num: Unsued in skciphers.
373 * @cpuid: Currently unused.
374 * @data_bytes_in_sgl: Count of bytes from all SGEs in this SGL.
375 * @next: Virtual address used to stash the next sgl - useful in completion.
376 * @reserved: A reserved field not currently used.
377 * @sge_entries: The (up to) 64 Scatter Gather Entries, representing IOVs.
378 * @node: Currently unused.
382 u16 entry_sum_in_chain
;
383 u16 entry_sum_in_sgl
;
387 u32 data_bytes_in_sgl
;
388 struct sec_hw_sgl
*next
;
390 struct sec_hw_sge sge_entries
[SEC_MAX_SGE_NUM
];
397 * struct sec_dev_info: The full SEC unit comprising queues and processors.
398 * @sec_id: Index used to track which SEC this is when more than one is present.
399 * @num_saas: The number of backed processors enabled.
400 * @regs: iomapped register regions shared by whole SEC unit.
401 * @dev_lock: Protects concurrent queue allocation / freeing for the SEC.
402 * @queues: The 16 queues that this SEC instance provides.
403 * @dev: Device pointer.
404 * @hw_sgl_pool: DMA pool used to mimise mapping for the scatter gather lists.
406 struct sec_dev_info
{
409 void __iomem
*regs
[SEC_NUM_ADDR_REGIONS
];
410 struct mutex dev_lock
;
412 struct sec_queue queues
[SEC_Q_NUM
];
414 struct dma_pool
*hw_sgl_pool
;
417 int sec_queue_send(struct sec_queue
*queue
, struct sec_bd_info
*msg
, void *ctx
);
418 bool sec_queue_can_enqueue(struct sec_queue
*queue
, int num
);
419 int sec_queue_stop_release(struct sec_queue
*queue
);
420 struct sec_queue
*sec_queue_alloc_start_safe(void);
421 bool sec_queue_empty(struct sec_queue
*queue
);
423 /* Algorithm specific elements from sec_algs.c */
424 void sec_alg_callback(struct sec_bd_info
*resp
, void *ctx
);
425 int sec_algs_register(void);
426 void sec_algs_unregister(void);
428 #endif /* _SEC_DRV_H_ */