1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
4 #include <crypto/ctr.h>
7 #include "cc_request_mgr.h"
8 #include "cc_sram_mgr.h"
9 #include "cc_buffer_mgr.h"
11 /* The max. size of pool *MUST* be <= SRAM total size */
12 #define CC_IVPOOL_SIZE 1024
13 /* The first 32B fraction of pool are dedicated to the
14 * next encryption "key" & "IV" for pool regeneration
16 #define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
17 #define CC_IVPOOL_GEN_SEQ_LEN 4
20 * struct cc_ivgen_ctx -IV pool generation context
21 * @pool: the start address of the iv-pool resides in internal RAM
22 * @ctr_key_dma: address of pool's encryption key material in internal RAM
23 * @ctr_iv_dma: address of pool's counter iv in internal RAM
24 * @next_iv_ofs: the offset to the next available IV in pool
25 * @pool_meta: virt. address of the initial enc. key/IV
26 * @pool_meta_dma: phys. address of the initial enc. key/IV
30 cc_sram_addr_t ctr_key
;
31 cc_sram_addr_t ctr_iv
;
34 dma_addr_t pool_meta_dma
;
38 * Generates CC_IVPOOL_SIZE of random bytes by
39 * encrypting 0's using AES128-CTR.
41 * \param ivgen iv-pool context
42 * \param iv_seq IN/OUT array to the descriptors sequence
43 * \param iv_seq_len IN/OUT pointer to the sequence length
45 static int cc_gen_iv_pool(struct cc_ivgen_ctx
*ivgen_ctx
,
46 struct cc_hw_desc iv_seq
[], unsigned int *iv_seq_len
)
48 unsigned int idx
= *iv_seq_len
;
50 if ((*iv_seq_len
+ CC_IVPOOL_GEN_SEQ_LEN
) > CC_IVPOOL_SEQ_LEN
) {
51 /* The sequence will be longer than allowed */
55 hw_desc_init(&iv_seq
[idx
]);
56 set_din_sram(&iv_seq
[idx
], ivgen_ctx
->ctr_key
, AES_KEYSIZE_128
);
57 set_setup_mode(&iv_seq
[idx
], SETUP_LOAD_KEY0
);
58 set_cipher_config0(&iv_seq
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
59 set_flow_mode(&iv_seq
[idx
], S_DIN_to_AES
);
60 set_key_size_aes(&iv_seq
[idx
], CC_AES_128_BIT_KEY_SIZE
);
61 set_cipher_mode(&iv_seq
[idx
], DRV_CIPHER_CTR
);
64 /* Setup cipher state */
65 hw_desc_init(&iv_seq
[idx
]);
66 set_din_sram(&iv_seq
[idx
], ivgen_ctx
->ctr_iv
, CC_AES_IV_SIZE
);
67 set_cipher_config0(&iv_seq
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
68 set_flow_mode(&iv_seq
[idx
], S_DIN_to_AES
);
69 set_setup_mode(&iv_seq
[idx
], SETUP_LOAD_STATE1
);
70 set_key_size_aes(&iv_seq
[idx
], CC_AES_128_BIT_KEY_SIZE
);
71 set_cipher_mode(&iv_seq
[idx
], DRV_CIPHER_CTR
);
74 /* Perform dummy encrypt to skip first block */
75 hw_desc_init(&iv_seq
[idx
]);
76 set_din_const(&iv_seq
[idx
], 0, CC_AES_IV_SIZE
);
77 set_dout_sram(&iv_seq
[idx
], ivgen_ctx
->pool
, CC_AES_IV_SIZE
);
78 set_flow_mode(&iv_seq
[idx
], DIN_AES_DOUT
);
81 /* Generate IV pool */
82 hw_desc_init(&iv_seq
[idx
]);
83 set_din_const(&iv_seq
[idx
], 0, CC_IVPOOL_SIZE
);
84 set_dout_sram(&iv_seq
[idx
], ivgen_ctx
->pool
, CC_IVPOOL_SIZE
);
85 set_flow_mode(&iv_seq
[idx
], DIN_AES_DOUT
);
88 *iv_seq_len
= idx
; /* Update sequence length */
90 /* queue ordering assures pool readiness */
91 ivgen_ctx
->next_iv_ofs
= CC_IVPOOL_META_SIZE
;
97 * Generates the initial pool in SRAM.
98 * This function should be invoked when resuming driver.
102 * \return int Zero for success, negative value otherwise.
104 int cc_init_iv_sram(struct cc_drvdata
*drvdata
)
106 struct cc_ivgen_ctx
*ivgen_ctx
= drvdata
->ivgen_handle
;
107 struct cc_hw_desc iv_seq
[CC_IVPOOL_SEQ_LEN
];
108 unsigned int iv_seq_len
= 0;
111 /* Generate initial enc. key/iv */
112 get_random_bytes(ivgen_ctx
->pool_meta
, CC_IVPOOL_META_SIZE
);
114 /* The first 32B reserved for the enc. Key/IV */
115 ivgen_ctx
->ctr_key
= ivgen_ctx
->pool
;
116 ivgen_ctx
->ctr_iv
= ivgen_ctx
->pool
+ AES_KEYSIZE_128
;
118 /* Copy initial enc. key and IV to SRAM at a single descriptor */
119 hw_desc_init(&iv_seq
[iv_seq_len
]);
120 set_din_type(&iv_seq
[iv_seq_len
], DMA_DLLI
, ivgen_ctx
->pool_meta_dma
,
121 CC_IVPOOL_META_SIZE
, NS_BIT
);
122 set_dout_sram(&iv_seq
[iv_seq_len
], ivgen_ctx
->pool
,
123 CC_IVPOOL_META_SIZE
);
124 set_flow_mode(&iv_seq
[iv_seq_len
], BYPASS
);
127 /* Generate initial pool */
128 rc
= cc_gen_iv_pool(ivgen_ctx
, iv_seq
, &iv_seq_len
);
132 /* Fire-and-forget */
133 return send_request_init(drvdata
, iv_seq
, iv_seq_len
);
137 * Free iv-pool and ivgen context.
141 void cc_ivgen_fini(struct cc_drvdata
*drvdata
)
143 struct cc_ivgen_ctx
*ivgen_ctx
= drvdata
->ivgen_handle
;
144 struct device
*device
= &drvdata
->plat_dev
->dev
;
149 if (ivgen_ctx
->pool_meta
) {
150 memset(ivgen_ctx
->pool_meta
, 0, CC_IVPOOL_META_SIZE
);
151 dma_free_coherent(device
, CC_IVPOOL_META_SIZE
,
152 ivgen_ctx
->pool_meta
,
153 ivgen_ctx
->pool_meta_dma
);
156 ivgen_ctx
->pool
= NULL_SRAM_ADDR
;
158 /* release "this" context */
163 * Allocates iv-pool and maps resources.
164 * This function generates the first IV pool.
166 * \param drvdata Driver's private context
168 * \return int Zero for success, negative value otherwise.
170 int cc_ivgen_init(struct cc_drvdata
*drvdata
)
172 struct cc_ivgen_ctx
*ivgen_ctx
;
173 struct device
*device
= &drvdata
->plat_dev
->dev
;
176 /* Allocate "this" context */
177 ivgen_ctx
= kzalloc(sizeof(*ivgen_ctx
), GFP_KERNEL
);
181 /* Allocate pool's header for initial enc. key/IV */
182 ivgen_ctx
->pool_meta
= dma_alloc_coherent(device
, CC_IVPOOL_META_SIZE
,
183 &ivgen_ctx
->pool_meta_dma
,
185 if (!ivgen_ctx
->pool_meta
) {
186 dev_err(device
, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
187 CC_IVPOOL_META_SIZE
);
191 /* Allocate IV pool in SRAM */
192 ivgen_ctx
->pool
= cc_sram_alloc(drvdata
, CC_IVPOOL_SIZE
);
193 if (ivgen_ctx
->pool
== NULL_SRAM_ADDR
) {
194 dev_err(device
, "SRAM pool exhausted\n");
199 drvdata
->ivgen_handle
= ivgen_ctx
;
201 return cc_init_iv_sram(drvdata
);
204 cc_ivgen_fini(drvdata
);
209 * Acquires 16 Bytes IV from the iv-pool
211 * \param drvdata Driver private context
212 * \param iv_out_dma Array of physical IV out addresses
213 * \param iv_out_dma_len Length of iv_out_dma array (additional elements
214 * of iv_out_dma array are ignore)
215 * \param iv_out_size May be 8 or 16 bytes long
216 * \param iv_seq IN/OUT array to the descriptors sequence
217 * \param iv_seq_len IN/OUT pointer to the sequence length
219 * \return int Zero for success, negative value otherwise.
221 int cc_get_iv(struct cc_drvdata
*drvdata
, dma_addr_t iv_out_dma
[],
222 unsigned int iv_out_dma_len
, unsigned int iv_out_size
,
223 struct cc_hw_desc iv_seq
[], unsigned int *iv_seq_len
)
225 struct cc_ivgen_ctx
*ivgen_ctx
= drvdata
->ivgen_handle
;
226 unsigned int idx
= *iv_seq_len
;
227 struct device
*dev
= drvdata_to_dev(drvdata
);
230 if (iv_out_size
!= CC_AES_IV_SIZE
&&
231 iv_out_size
!= CTR_RFC3686_IV_SIZE
) {
234 if ((iv_out_dma_len
+ 1) > CC_IVPOOL_SEQ_LEN
) {
235 /* The sequence will be longer than allowed */
239 /* check that number of generated IV is limited to max dma address
242 if (iv_out_dma_len
> CC_MAX_IVGEN_DMA_ADDRESSES
) {
243 /* The sequence will be longer than allowed */
247 for (t
= 0; t
< iv_out_dma_len
; t
++) {
248 /* Acquire IV from pool */
249 hw_desc_init(&iv_seq
[idx
]);
250 set_din_sram(&iv_seq
[idx
], (ivgen_ctx
->pool
+
251 ivgen_ctx
->next_iv_ofs
),
253 set_dout_dlli(&iv_seq
[idx
], iv_out_dma
[t
], iv_out_size
,
255 set_flow_mode(&iv_seq
[idx
], BYPASS
);
259 /* Bypass operation is proceeded by crypto sequence, hence must
260 * assure bypass-write-transaction by a memory barrier
262 hw_desc_init(&iv_seq
[idx
]);
263 set_din_no_dma(&iv_seq
[idx
], 0, 0xfffff0);
264 set_dout_no_dma(&iv_seq
[idx
], 0, 0, 1);
267 *iv_seq_len
= idx
; /* update seq length */
269 /* Update iv index */
270 ivgen_ctx
->next_iv_ofs
+= iv_out_size
;
272 if ((CC_IVPOOL_SIZE
- ivgen_ctx
->next_iv_ofs
) < CC_AES_IV_SIZE
) {
273 dev_dbg(dev
, "Pool exhausted, regenerating iv-pool\n");
274 /* pool is drained -regenerate it! */
275 return cc_gen_iv_pool(ivgen_ctx
, iv_seq
, iv_seq_len
);