1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for the Hisilicon SEC units found on Hip06 Hip07
5 * Copyright (c) 2016-2017 Hisilicon Limited.
7 #include <linux/acpi.h>
8 #include <linux/atomic.h>
9 #include <linux/delay.h>
10 #include <linux/dma-direction.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmapool.h>
14 #include <linux/iommu.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/irqreturn.h>
19 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
26 #define SEC_QUEUE_AR_FROCE_ALLOC 0
27 #define SEC_QUEUE_AR_FROCE_NOALLOC 1
28 #define SEC_QUEUE_AR_FROCE_DIS 2
30 #define SEC_QUEUE_AW_FROCE_ALLOC 0
31 #define SEC_QUEUE_AW_FROCE_NOALLOC 1
32 #define SEC_QUEUE_AW_FROCE_DIS 2
34 /* SEC_ALGSUB registers */
35 #define SEC_ALGSUB_CLK_EN_REG 0x03b8
36 #define SEC_ALGSUB_CLK_DIS_REG 0x03bc
37 #define SEC_ALGSUB_CLK_ST_REG 0x535c
38 #define SEC_ALGSUB_RST_REQ_REG 0x0aa8
39 #define SEC_ALGSUB_RST_DREQ_REG 0x0aac
40 #define SEC_ALGSUB_RST_ST_REG 0x5a54
41 #define SEC_ALGSUB_RST_ST_IS_RST BIT(0)
43 #define SEC_ALGSUB_BUILD_RST_REQ_REG 0x0ab8
44 #define SEC_ALGSUB_BUILD_RST_DREQ_REG 0x0abc
45 #define SEC_ALGSUB_BUILD_RST_ST_REG 0x5a5c
46 #define SEC_ALGSUB_BUILD_RST_ST_IS_RST BIT(0)
48 #define SEC_SAA_BASE 0x00001000UL
50 /* SEC_SAA registers */
51 #define SEC_SAA_CTRL_REG(x) ((x) * SEC_SAA_ADDR_SIZE)
52 #define SEC_SAA_CTRL_GET_QM_EN BIT(0)
54 #define SEC_ST_INTMSK1_REG 0x0200
55 #define SEC_ST_RINT1_REG 0x0400
56 #define SEC_ST_INTSTS1_REG 0x0600
57 #define SEC_BD_MNG_STAT_REG 0x0800
58 #define SEC_PARSING_STAT_REG 0x0804
59 #define SEC_LOAD_TIME_OUT_CNT_REG 0x0808
60 #define SEC_CORE_WORK_TIME_OUT_CNT_REG 0x080c
61 #define SEC_BACK_TIME_OUT_CNT_REG 0x0810
62 #define SEC_BD1_PARSING_RD_TIME_OUT_CNT_REG 0x0814
63 #define SEC_BD1_PARSING_WR_TIME_OUT_CNT_REG 0x0818
64 #define SEC_BD2_PARSING_RD_TIME_OUT_CNT_REG 0x081c
65 #define SEC_BD2_PARSING_WR_TIME_OUT_CNT_REG 0x0820
66 #define SEC_SAA_ACC_REG 0x083c
67 #define SEC_BD_NUM_CNT_IN_SEC_REG 0x0858
68 #define SEC_LOAD_WORK_TIME_CNT_REG 0x0860
69 #define SEC_CORE_WORK_WORK_TIME_CNT_REG 0x0864
70 #define SEC_BACK_WORK_TIME_CNT_REG 0x0868
71 #define SEC_SAA_IDLE_TIME_CNT_REG 0x086c
72 #define SEC_SAA_CLK_CNT_REG 0x0870
74 /* SEC_COMMON registers */
75 #define SEC_CLK_EN_REG 0x0000
76 #define SEC_CTRL_REG 0x0004
78 #define SEC_COMMON_CNT_CLR_CE_REG 0x0008
79 #define SEC_COMMON_CNT_CLR_CE_CLEAR BIT(0)
80 #define SEC_COMMON_CNT_CLR_CE_SNAP_EN BIT(1)
82 #define SEC_SECURE_CTRL_REG 0x000c
83 #define SEC_AXI_CACHE_CFG_REG 0x0010
84 #define SEC_AXI_QOS_CFG_REG 0x0014
85 #define SEC_IPV4_MASK_TABLE_REG 0x0020
86 #define SEC_IPV6_MASK_TABLE_X_REG(x) (0x0024 + (x) * 4)
87 #define SEC_FSM_MAX_CNT_REG 0x0064
89 #define SEC_CTRL2_REG 0x0068
90 #define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M GENMASK(3, 0)
91 #define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S 0
92 #define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M GENMASK(6, 4)
93 #define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S 4
94 #define SEC_CTRL2_CLK_GATE_EN BIT(7)
95 #define SEC_CTRL2_ENDIAN_BD BIT(8)
96 #define SEC_CTRL2_ENDIAN_BD_TYPE BIT(9)
98 #define SEC_CNT_PRECISION_CFG_REG 0x006c
99 #define SEC_DEBUG_BD_CFG_REG 0x0070
100 #define SEC_DEBUG_BD_CFG_WB_NORMAL BIT(0)
101 #define SEC_DEBUG_BD_CFG_WB_EN BIT(1)
103 #define SEC_Q_SIGHT_SEL 0x0074
104 #define SEC_Q_SIGHT_HIS_CLR 0x0078
105 #define SEC_Q_VMID_CFG_REG(q) (0x0100 + (q) * 4)
106 #define SEC_Q_WEIGHT_CFG_REG(q) (0x200 + (q) * 4)
107 #define SEC_STAT_CLR_REG 0x0a00
108 #define SEC_SAA_IDLE_CNT_CLR_REG 0x0a04
109 #define SEC_QM_CPL_Q_IDBUF_DFX_CFG_REG 0x0b00
110 #define SEC_QM_CPL_Q_IDBUF_DFX_RESULT_REG 0x0b04
111 #define SEC_QM_BD_DFX_CFG_REG 0x0b08
112 #define SEC_QM_BD_DFX_RESULT_REG 0x0b0c
113 #define SEC_QM_BDID_DFX_RESULT_REG 0x0b10
114 #define SEC_QM_BD_DFIFO_STATUS_REG 0x0b14
115 #define SEC_QM_BD_DFX_CFG2_REG 0x0b1c
116 #define SEC_QM_BD_DFX_RESULT2_REG 0x0b20
117 #define SEC_QM_BD_IDFIFO_STATUS_REG 0x0b18
118 #define SEC_QM_BD_DFIFO_STATUS2_REG 0x0b28
119 #define SEC_QM_BD_IDFIFO_STATUS2_REG 0x0b2c
121 #define SEC_HASH_IPV4_MASK 0xfff00000
122 #define SEC_MAX_SAA_NUM 0xa
123 #define SEC_SAA_ADDR_SIZE 0x1000
125 #define SEC_Q_INIT_REG 0x0
126 #define SEC_Q_INIT_WO_STAT_CLEAR 0x2
127 #define SEC_Q_INIT_AND_STAT_CLEAR 0x3
129 #define SEC_Q_CFG_REG 0x8
130 #define SEC_Q_CFG_REORDER BIT(0)
132 #define SEC_Q_PROC_NUM_CFG_REG 0x10
133 #define SEC_QUEUE_ENB_REG 0x18
135 #define SEC_Q_DEPTH_CFG_REG 0x50
136 #define SEC_Q_DEPTH_CFG_DEPTH_M GENMASK(11, 0)
137 #define SEC_Q_DEPTH_CFG_DEPTH_S 0
139 #define SEC_Q_BASE_HADDR_REG 0x54
140 #define SEC_Q_BASE_LADDR_REG 0x58
141 #define SEC_Q_WR_PTR_REG 0x5c
142 #define SEC_Q_OUTORDER_BASE_HADDR_REG 0x60
143 #define SEC_Q_OUTORDER_BASE_LADDR_REG 0x64
144 #define SEC_Q_OUTORDER_RD_PTR_REG 0x68
145 #define SEC_Q_OT_TH_REG 0x6c
147 #define SEC_Q_ARUSER_CFG_REG 0x70
148 #define SEC_Q_ARUSER_CFG_FA BIT(0)
149 #define SEC_Q_ARUSER_CFG_FNA BIT(1)
150 #define SEC_Q_ARUSER_CFG_RINVLD BIT(2)
151 #define SEC_Q_ARUSER_CFG_PKG BIT(3)
153 #define SEC_Q_AWUSER_CFG_REG 0x74
154 #define SEC_Q_AWUSER_CFG_FA BIT(0)
155 #define SEC_Q_AWUSER_CFG_FNA BIT(1)
156 #define SEC_Q_AWUSER_CFG_PKG BIT(2)
158 #define SEC_Q_ERR_BASE_HADDR_REG 0x7c
159 #define SEC_Q_ERR_BASE_LADDR_REG 0x80
160 #define SEC_Q_CFG_VF_NUM_REG 0x84
161 #define SEC_Q_SOFT_PROC_PTR_REG 0x88
162 #define SEC_Q_FAIL_INT_MSK_REG 0x300
163 #define SEC_Q_FLOW_INT_MKS_REG 0x304
164 #define SEC_Q_FAIL_RINT_REG 0x400
165 #define SEC_Q_FLOW_RINT_REG 0x404
166 #define SEC_Q_FAIL_INT_STATUS_REG 0x500
167 #define SEC_Q_FLOW_INT_STATUS_REG 0x504
168 #define SEC_Q_STATUS_REG 0x600
169 #define SEC_Q_RD_PTR_REG 0x604
170 #define SEC_Q_PRO_PTR_REG 0x608
171 #define SEC_Q_OUTORDER_WR_PTR_REG 0x60c
172 #define SEC_Q_OT_CNT_STATUS_REG 0x610
173 #define SEC_Q_INORDER_BD_NUM_ST_REG 0x650
174 #define SEC_Q_INORDER_GET_FLAG_ST_REG 0x654
175 #define SEC_Q_INORDER_ADD_FLAG_ST_REG 0x658
176 #define SEC_Q_INORDER_TASK_INT_NUM_LEFT_ST_REG 0x65c
177 #define SEC_Q_RD_DONE_PTR_REG 0x660
178 #define SEC_Q_CPL_Q_BD_NUM_ST_REG 0x700
179 #define SEC_Q_CPL_Q_PTR_ST_REG 0x704
180 #define SEC_Q_CPL_Q_H_ADDR_ST_REG 0x708
181 #define SEC_Q_CPL_Q_L_ADDR_ST_REG 0x70c
182 #define SEC_Q_CPL_TASK_INT_NUM_LEFT_ST_REG 0x710
183 #define SEC_Q_WRR_ID_CHECK_REG 0x714
184 #define SEC_Q_CPLQ_FULL_CHECK_REG 0x718
185 #define SEC_Q_SUCCESS_BD_CNT_REG 0x800
186 #define SEC_Q_FAIL_BD_CNT_REG 0x804
187 #define SEC_Q_GET_BD_CNT_REG 0x808
188 #define SEC_Q_IVLD_CNT_REG 0x80c
189 #define SEC_Q_BD_PROC_GET_CNT_REG 0x810
190 #define SEC_Q_BD_PROC_DONE_CNT_REG 0x814
191 #define SEC_Q_LAT_CLR_REG 0x850
192 #define SEC_Q_PKT_LAT_MAX_REG 0x854
193 #define SEC_Q_PKT_LAT_AVG_REG 0x858
194 #define SEC_Q_PKT_LAT_MIN_REG 0x85c
195 #define SEC_Q_ID_CLR_CFG_REG 0x900
196 #define SEC_Q_1ST_BD_ERR_ID_REG 0x904
197 #define SEC_Q_1ST_AUTH_FAIL_ID_REG 0x908
198 #define SEC_Q_1ST_RD_ERR_ID_REG 0x90c
199 #define SEC_Q_1ST_ECC2_ERR_ID_REG 0x910
200 #define SEC_Q_1ST_IVLD_ID_REG 0x914
201 #define SEC_Q_1ST_BD_WR_ERR_ID_REG 0x918
202 #define SEC_Q_1ST_ERR_BD_WR_ERR_ID_REG 0x91c
203 #define SEC_Q_1ST_BD_MAC_WR_ERR_ID_REG 0x920
205 struct sec_debug_bd_info
{
206 #define SEC_DEBUG_BD_INFO_SOFT_ERR_CHECK_M GENMASK(22, 0)
208 #define SEC_DEBUG_BD_INFO_HARD_ERR_CHECK_M GENMASK(9, 0)
211 #define SEC_DEBUG_BD_INFO_GET_ID_M GENMASK(19, 0)
217 struct sec_out_bd_info
{
218 #define SEC_OUT_BD_INFO_Q_ID_M GENMASK(11, 0)
219 #define SEC_OUT_BD_INFO_ECC_2BIT_ERR BIT(14)
223 #define SEC_MAX_DEVICES 8
224 static struct sec_dev_info
*sec_devices
[SEC_MAX_DEVICES
];
225 static DEFINE_MUTEX(sec_id_lock
);
227 static int sec_queue_map_io(struct sec_queue
*queue
)
229 struct device
*dev
= queue
->dev_info
->dev
;
230 struct resource
*res
;
232 res
= platform_get_resource(to_platform_device(dev
),
234 2 + queue
->queue_id
);
236 dev_err(dev
, "Failed to get queue %d memory resource\n",
240 queue
->regs
= ioremap(res
->start
, resource_size(res
));
247 static void sec_queue_unmap_io(struct sec_queue
*queue
)
249 iounmap(queue
->regs
);
252 static int sec_queue_ar_pkgattr(struct sec_queue
*queue
, u32 ar_pkg
)
254 void __iomem
*addr
= queue
->regs
+ SEC_Q_ARUSER_CFG_REG
;
257 regval
= readl_relaxed(addr
);
259 regval
|= SEC_Q_ARUSER_CFG_PKG
;
261 regval
&= ~SEC_Q_ARUSER_CFG_PKG
;
262 writel_relaxed(regval
, addr
);
267 static int sec_queue_aw_pkgattr(struct sec_queue
*queue
, u32 aw_pkg
)
269 void __iomem
*addr
= queue
->regs
+ SEC_Q_AWUSER_CFG_REG
;
272 regval
= readl_relaxed(addr
);
273 regval
|= SEC_Q_AWUSER_CFG_PKG
;
274 writel_relaxed(regval
, addr
);
279 static int sec_clk_en(struct sec_dev_info
*info
)
281 void __iomem
*base
= info
->regs
[SEC_COMMON
];
284 writel_relaxed(0x7, base
+ SEC_ALGSUB_CLK_EN_REG
);
286 usleep_range(1000, 10000);
287 if ((readl_relaxed(base
+ SEC_ALGSUB_CLK_ST_REG
) & 0x7) == 0x7)
291 dev_err(info
->dev
, "sec clock enable fail!\n");
296 static int sec_clk_dis(struct sec_dev_info
*info
)
298 void __iomem
*base
= info
->regs
[SEC_COMMON
];
301 writel_relaxed(0x7, base
+ SEC_ALGSUB_CLK_DIS_REG
);
303 usleep_range(1000, 10000);
304 if ((readl_relaxed(base
+ SEC_ALGSUB_CLK_ST_REG
) & 0x7) == 0)
308 dev_err(info
->dev
, "sec clock disable fail!\n");
313 static int sec_reset_whole_module(struct sec_dev_info
*info
)
315 void __iomem
*base
= info
->regs
[SEC_COMMON
];
316 bool is_reset
, b_is_reset
;
319 writel_relaxed(1, base
+ SEC_ALGSUB_RST_REQ_REG
);
320 writel_relaxed(1, base
+ SEC_ALGSUB_BUILD_RST_REQ_REG
);
322 usleep_range(1000, 10000);
323 is_reset
= readl_relaxed(base
+ SEC_ALGSUB_RST_ST_REG
) &
324 SEC_ALGSUB_RST_ST_IS_RST
;
325 b_is_reset
= readl_relaxed(base
+ SEC_ALGSUB_BUILD_RST_ST_REG
) &
326 SEC_ALGSUB_BUILD_RST_ST_IS_RST
;
327 if (is_reset
&& b_is_reset
)
331 dev_err(info
->dev
, "Reset req failed\n");
337 writel_relaxed(1, base
+ SEC_ALGSUB_RST_DREQ_REG
);
338 writel_relaxed(1, base
+ SEC_ALGSUB_BUILD_RST_DREQ_REG
);
340 usleep_range(1000, 10000);
341 is_reset
= readl_relaxed(base
+ SEC_ALGSUB_RST_ST_REG
) &
342 SEC_ALGSUB_RST_ST_IS_RST
;
343 b_is_reset
= readl_relaxed(base
+ SEC_ALGSUB_BUILD_RST_ST_REG
) &
344 SEC_ALGSUB_BUILD_RST_ST_IS_RST
;
345 if (!is_reset
&& !b_is_reset
)
350 dev_err(info
->dev
, "Reset dreq failed\n");
358 static void sec_bd_endian_little(struct sec_dev_info
*info
)
360 void __iomem
*addr
= info
->regs
[SEC_SAA
] + SEC_CTRL2_REG
;
363 regval
= readl_relaxed(addr
);
364 regval
&= ~(SEC_CTRL2_ENDIAN_BD
| SEC_CTRL2_ENDIAN_BD_TYPE
);
365 writel_relaxed(regval
, addr
);
369 * sec_cache_config - configure optimum cache placement
371 static void sec_cache_config(struct sec_dev_info
*info
)
373 struct iommu_domain
*domain
;
374 void __iomem
*addr
= info
->regs
[SEC_SAA
] + SEC_CTRL_REG
;
376 domain
= iommu_get_domain_for_dev(info
->dev
);
378 /* Check that translation is occurring */
379 if (domain
&& (domain
->type
& __IOMMU_DOMAIN_PAGING
))
380 writel_relaxed(0x44cf9e, addr
);
382 writel_relaxed(0x4cfd9, addr
);
385 static void sec_data_axiwr_otsd_cfg(struct sec_dev_info
*info
, u32 cfg
)
387 void __iomem
*addr
= info
->regs
[SEC_SAA
] + SEC_CTRL2_REG
;
390 regval
= readl_relaxed(addr
);
391 regval
&= ~SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M
;
392 regval
|= (cfg
<< SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S
) &
393 SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M
;
394 writel_relaxed(regval
, addr
);
397 static void sec_data_axird_otsd_cfg(struct sec_dev_info
*info
, u32 cfg
)
399 void __iomem
*addr
= info
->regs
[SEC_SAA
] + SEC_CTRL2_REG
;
402 regval
= readl_relaxed(addr
);
403 regval
&= ~SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M
;
404 regval
|= (cfg
<< SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S
) &
405 SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M
;
406 writel_relaxed(regval
, addr
);
409 static void sec_clk_gate_en(struct sec_dev_info
*info
, bool clkgate
)
411 void __iomem
*addr
= info
->regs
[SEC_SAA
] + SEC_CTRL2_REG
;
414 regval
= readl_relaxed(addr
);
416 regval
|= SEC_CTRL2_CLK_GATE_EN
;
418 regval
&= ~SEC_CTRL2_CLK_GATE_EN
;
419 writel_relaxed(regval
, addr
);
422 static void sec_comm_cnt_cfg(struct sec_dev_info
*info
, bool clr_ce
)
424 void __iomem
*addr
= info
->regs
[SEC_SAA
] + SEC_COMMON_CNT_CLR_CE_REG
;
427 regval
= readl_relaxed(addr
);
429 regval
|= SEC_COMMON_CNT_CLR_CE_CLEAR
;
431 regval
&= ~SEC_COMMON_CNT_CLR_CE_CLEAR
;
432 writel_relaxed(regval
, addr
);
435 static void sec_commsnap_en(struct sec_dev_info
*info
, bool snap_en
)
437 void __iomem
*addr
= info
->regs
[SEC_SAA
] + SEC_COMMON_CNT_CLR_CE_REG
;
440 regval
= readl_relaxed(addr
);
442 regval
|= SEC_COMMON_CNT_CLR_CE_SNAP_EN
;
444 regval
&= ~SEC_COMMON_CNT_CLR_CE_SNAP_EN
;
445 writel_relaxed(regval
, addr
);
448 static void sec_ipv6_hashmask(struct sec_dev_info
*info
, u32 hash_mask
[])
450 void __iomem
*base
= info
->regs
[SEC_SAA
];
453 for (i
= 0; i
< 10; i
++)
454 writel_relaxed(hash_mask
[0],
455 base
+ SEC_IPV6_MASK_TABLE_X_REG(i
));
458 static int sec_ipv4_hashmask(struct sec_dev_info
*info
, u32 hash_mask
)
460 if (hash_mask
& SEC_HASH_IPV4_MASK
) {
461 dev_err(info
->dev
, "Sec Ipv4 Hash Mask Input Error!\n ");
465 writel_relaxed(hash_mask
,
466 info
->regs
[SEC_SAA
] + SEC_IPV4_MASK_TABLE_REG
);
471 static void sec_set_dbg_bd_cfg(struct sec_dev_info
*info
, u32 cfg
)
473 void __iomem
*addr
= info
->regs
[SEC_SAA
] + SEC_DEBUG_BD_CFG_REG
;
476 regval
= readl_relaxed(addr
);
477 /* Always disable write back of normal bd */
478 regval
&= ~SEC_DEBUG_BD_CFG_WB_NORMAL
;
481 regval
&= ~SEC_DEBUG_BD_CFG_WB_EN
;
483 regval
|= SEC_DEBUG_BD_CFG_WB_EN
;
485 writel_relaxed(regval
, addr
);
488 static void sec_saa_getqm_en(struct sec_dev_info
*info
, u32 saa_indx
, u32 en
)
490 void __iomem
*addr
= info
->regs
[SEC_SAA
] + SEC_SAA_BASE
+
491 SEC_SAA_CTRL_REG(saa_indx
);
494 regval
= readl_relaxed(addr
);
496 regval
|= SEC_SAA_CTRL_GET_QM_EN
;
498 regval
&= ~SEC_SAA_CTRL_GET_QM_EN
;
499 writel_relaxed(regval
, addr
);
502 static void sec_saa_int_mask(struct sec_dev_info
*info
, u32 saa_indx
,
505 writel_relaxed(saa_int_mask
,
506 info
->regs
[SEC_SAA
] + SEC_SAA_BASE
+ SEC_ST_INTMSK1_REG
+
507 saa_indx
* SEC_SAA_ADDR_SIZE
);
510 static void sec_streamid(struct sec_dev_info
*info
, int i
)
512 #define SEC_SID 0x600
515 writel_relaxed((SEC_VMID
| ((SEC_SID
& 0xffff) << 8)),
516 info
->regs
[SEC_SAA
] + SEC_Q_VMID_CFG_REG(i
));
519 static void sec_queue_ar_alloc(struct sec_queue
*queue
, u32 alloc
)
521 void __iomem
*addr
= queue
->regs
+ SEC_Q_ARUSER_CFG_REG
;
524 regval
= readl_relaxed(addr
);
525 if (alloc
== SEC_QUEUE_AR_FROCE_ALLOC
) {
526 regval
|= SEC_Q_ARUSER_CFG_FA
;
527 regval
&= ~SEC_Q_ARUSER_CFG_FNA
;
529 regval
&= ~SEC_Q_ARUSER_CFG_FA
;
530 regval
|= SEC_Q_ARUSER_CFG_FNA
;
533 writel_relaxed(regval
, addr
);
536 static void sec_queue_aw_alloc(struct sec_queue
*queue
, u32 alloc
)
538 void __iomem
*addr
= queue
->regs
+ SEC_Q_AWUSER_CFG_REG
;
541 regval
= readl_relaxed(addr
);
542 if (alloc
== SEC_QUEUE_AW_FROCE_ALLOC
) {
543 regval
|= SEC_Q_AWUSER_CFG_FA
;
544 regval
&= ~SEC_Q_AWUSER_CFG_FNA
;
546 regval
&= ~SEC_Q_AWUSER_CFG_FA
;
547 regval
|= SEC_Q_AWUSER_CFG_FNA
;
550 writel_relaxed(regval
, addr
);
553 static void sec_queue_reorder(struct sec_queue
*queue
, bool reorder
)
555 void __iomem
*base
= queue
->regs
;
558 regval
= readl_relaxed(base
+ SEC_Q_CFG_REG
);
560 regval
|= SEC_Q_CFG_REORDER
;
562 regval
&= ~SEC_Q_CFG_REORDER
;
563 writel_relaxed(regval
, base
+ SEC_Q_CFG_REG
);
566 static void sec_queue_depth(struct sec_queue
*queue
, u32 depth
)
568 void __iomem
*addr
= queue
->regs
+ SEC_Q_DEPTH_CFG_REG
;
571 regval
= readl_relaxed(addr
);
572 regval
&= ~SEC_Q_DEPTH_CFG_DEPTH_M
;
573 regval
|= (depth
<< SEC_Q_DEPTH_CFG_DEPTH_S
) & SEC_Q_DEPTH_CFG_DEPTH_M
;
575 writel_relaxed(regval
, addr
);
578 static void sec_queue_cmdbase_addr(struct sec_queue
*queue
, u64 addr
)
580 writel_relaxed(upper_32_bits(addr
), queue
->regs
+ SEC_Q_BASE_HADDR_REG
);
581 writel_relaxed(lower_32_bits(addr
), queue
->regs
+ SEC_Q_BASE_LADDR_REG
);
584 static void sec_queue_outorder_addr(struct sec_queue
*queue
, u64 addr
)
586 writel_relaxed(upper_32_bits(addr
),
587 queue
->regs
+ SEC_Q_OUTORDER_BASE_HADDR_REG
);
588 writel_relaxed(lower_32_bits(addr
),
589 queue
->regs
+ SEC_Q_OUTORDER_BASE_LADDR_REG
);
592 static void sec_queue_errbase_addr(struct sec_queue
*queue
, u64 addr
)
594 writel_relaxed(upper_32_bits(addr
),
595 queue
->regs
+ SEC_Q_ERR_BASE_HADDR_REG
);
596 writel_relaxed(lower_32_bits(addr
),
597 queue
->regs
+ SEC_Q_ERR_BASE_LADDR_REG
);
600 static void sec_queue_irq_disable(struct sec_queue
*queue
)
602 writel_relaxed((u32
)~0, queue
->regs
+ SEC_Q_FLOW_INT_MKS_REG
);
605 static void sec_queue_irq_enable(struct sec_queue
*queue
)
607 writel_relaxed(0, queue
->regs
+ SEC_Q_FLOW_INT_MKS_REG
);
610 static void sec_queue_abn_irq_disable(struct sec_queue
*queue
)
612 writel_relaxed((u32
)~0, queue
->regs
+ SEC_Q_FAIL_INT_MSK_REG
);
615 static void sec_queue_stop(struct sec_queue
*queue
)
617 disable_irq(queue
->task_irq
);
618 sec_queue_irq_disable(queue
);
619 writel_relaxed(0x0, queue
->regs
+ SEC_QUEUE_ENB_REG
);
622 static void sec_queue_start(struct sec_queue
*queue
)
624 sec_queue_irq_enable(queue
);
625 enable_irq(queue
->task_irq
);
627 writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR
, queue
->regs
+ SEC_Q_INIT_REG
);
628 writel_relaxed(0x1, queue
->regs
+ SEC_QUEUE_ENB_REG
);
631 static struct sec_queue
*sec_alloc_queue(struct sec_dev_info
*info
)
635 mutex_lock(&info
->dev_lock
);
637 /* Get the first idle queue in SEC device */
638 for (i
= 0; i
< SEC_Q_NUM
; i
++)
639 if (!info
->queues
[i
].in_use
) {
640 info
->queues
[i
].in_use
= true;
641 info
->queues_in_use
++;
642 mutex_unlock(&info
->dev_lock
);
644 return &info
->queues
[i
];
646 mutex_unlock(&info
->dev_lock
);
648 return ERR_PTR(-ENODEV
);
651 static int sec_queue_free(struct sec_queue
*queue
)
653 struct sec_dev_info
*info
= queue
->dev_info
;
655 if (queue
->queue_id
>= SEC_Q_NUM
) {
656 dev_err(info
->dev
, "No queue %d\n", queue
->queue_id
);
660 if (!queue
->in_use
) {
661 dev_err(info
->dev
, "Queue %d is idle\n", queue
->queue_id
);
665 mutex_lock(&info
->dev_lock
);
666 queue
->in_use
= false;
667 info
->queues_in_use
--;
668 mutex_unlock(&info
->dev_lock
);
673 static irqreturn_t
sec_isr_handle_th(int irq
, void *q
)
675 sec_queue_irq_disable(q
);
676 return IRQ_WAKE_THREAD
;
679 static irqreturn_t
sec_isr_handle(int irq
, void *q
)
681 struct sec_queue
*queue
= q
;
682 struct sec_queue_ring_cmd
*msg_ring
= &queue
->ring_cmd
;
683 struct sec_queue_ring_cq
*cq_ring
= &queue
->ring_cq
;
684 struct sec_out_bd_info
*outorder_msg
;
685 struct sec_bd_info
*msg
;
686 u32 ooo_read
, ooo_write
;
687 void __iomem
*base
= queue
->regs
;
690 ooo_read
= readl(base
+ SEC_Q_OUTORDER_RD_PTR_REG
);
691 ooo_write
= readl(base
+ SEC_Q_OUTORDER_WR_PTR_REG
);
692 outorder_msg
= cq_ring
->vaddr
+ ooo_read
;
693 q_id
= outorder_msg
->data
& SEC_OUT_BD_INFO_Q_ID_M
;
694 msg
= msg_ring
->vaddr
+ q_id
;
696 while ((ooo_write
!= ooo_read
) && msg
->w0
& SEC_BD_W0_DONE
) {
698 * Must be before callback otherwise blocks adding other chained
701 set_bit(q_id
, queue
->unprocessed
);
702 if (q_id
== queue
->expected
)
703 while (test_bit(queue
->expected
, queue
->unprocessed
)) {
704 clear_bit(queue
->expected
, queue
->unprocessed
);
705 msg
= msg_ring
->vaddr
+ queue
->expected
;
706 msg
->w0
&= ~SEC_BD_W0_DONE
;
707 msg_ring
->callback(msg
,
708 queue
->shadow
[queue
->expected
]);
709 queue
->shadow
[queue
->expected
] = NULL
;
710 queue
->expected
= (queue
->expected
+ 1) %
712 atomic_dec(&msg_ring
->used
);
715 ooo_read
= (ooo_read
+ 1) % SEC_QUEUE_LEN
;
716 writel(ooo_read
, base
+ SEC_Q_OUTORDER_RD_PTR_REG
);
717 ooo_write
= readl(base
+ SEC_Q_OUTORDER_WR_PTR_REG
);
718 outorder_msg
= cq_ring
->vaddr
+ ooo_read
;
719 q_id
= outorder_msg
->data
& SEC_OUT_BD_INFO_Q_ID_M
;
720 msg
= msg_ring
->vaddr
+ q_id
;
723 sec_queue_irq_enable(queue
);
728 static int sec_queue_irq_init(struct sec_queue
*queue
)
730 struct sec_dev_info
*info
= queue
->dev_info
;
731 int irq
= queue
->task_irq
;
734 ret
= request_threaded_irq(irq
, sec_isr_handle_th
, sec_isr_handle
,
735 IRQF_TRIGGER_RISING
, queue
->name
, queue
);
737 dev_err(info
->dev
, "request irq(%d) failed %d\n", irq
, ret
);
745 static int sec_queue_irq_uninit(struct sec_queue
*queue
)
747 free_irq(queue
->task_irq
, queue
);
752 static struct sec_dev_info
*sec_device_get(void)
754 struct sec_dev_info
*sec_dev
= NULL
;
755 struct sec_dev_info
*this_sec_dev
;
756 int least_busy_n
= SEC_Q_NUM
+ 1;
759 /* Find which one is least busy and use that first */
760 for (i
= 0; i
< SEC_MAX_DEVICES
; i
++) {
761 this_sec_dev
= sec_devices
[i
];
763 this_sec_dev
->queues_in_use
< least_busy_n
) {
764 least_busy_n
= this_sec_dev
->queues_in_use
;
765 sec_dev
= this_sec_dev
;
772 static struct sec_queue
*sec_queue_alloc_start(struct sec_dev_info
*info
)
774 struct sec_queue
*queue
;
776 queue
= sec_alloc_queue(info
);
778 dev_err(info
->dev
, "alloc sec queue failed! %ld\n",
783 sec_queue_start(queue
);
789 * sec_queue_alloc_start_safe - get a hw queue from appropriate instance
791 * This function does extremely simplistic load balancing. It does not take into
792 * account NUMA locality of the accelerator, or which cpu has requested the
793 * queue. Future work may focus on optimizing this in order to improve full
794 * machine throughput.
796 struct sec_queue
*sec_queue_alloc_start_safe(void)
798 struct sec_dev_info
*info
;
799 struct sec_queue
*queue
= ERR_PTR(-ENODEV
);
801 mutex_lock(&sec_id_lock
);
802 info
= sec_device_get();
806 queue
= sec_queue_alloc_start(info
);
809 mutex_unlock(&sec_id_lock
);
815 * sec_queue_stop_release() - free up a hw queue for reuse
816 * @queue: The queue we are done with.
818 * This will stop the current queue, terminanting any transactions
819 * that are inflight an return it to the pool of available hw queuess
821 int sec_queue_stop_release(struct sec_queue
*queue
)
823 struct device
*dev
= queue
->dev_info
->dev
;
826 sec_queue_stop(queue
);
828 ret
= sec_queue_free(queue
);
830 dev_err(dev
, "Releasing queue failed %d\n", ret
);
836 * sec_queue_empty() - Is this hardware queue currently empty.
838 * We need to know if we have an empty queue for some of the chaining modes
839 * as if it is not empty we may need to hold the message in a software queue
840 * until the hw queue is drained.
842 bool sec_queue_empty(struct sec_queue
*queue
)
844 struct sec_queue_ring_cmd
*msg_ring
= &queue
->ring_cmd
;
846 return !atomic_read(&msg_ring
->used
);
850 * sec_queue_send() - queue up a single operation in the hw queue
851 * @queue: The queue in which to put the message
853 * @ctx: Context to be put in the shadow array and passed back to cb on result.
855 * This function will return -EAGAIN if the queue is currently full.
857 int sec_queue_send(struct sec_queue
*queue
, struct sec_bd_info
*msg
, void *ctx
)
859 struct sec_queue_ring_cmd
*msg_ring
= &queue
->ring_cmd
;
860 void __iomem
*base
= queue
->regs
;
863 mutex_lock(&msg_ring
->lock
);
864 read
= readl(base
+ SEC_Q_RD_PTR_REG
);
865 write
= readl(base
+ SEC_Q_WR_PTR_REG
);
866 if (write
== read
&& atomic_read(&msg_ring
->used
) == SEC_QUEUE_LEN
) {
867 mutex_unlock(&msg_ring
->lock
);
870 memcpy(msg_ring
->vaddr
+ write
, msg
, sizeof(*msg
));
871 queue
->shadow
[write
] = ctx
;
872 write
= (write
+ 1) % SEC_QUEUE_LEN
;
874 /* Ensure content updated before queue advance */
876 writel(write
, base
+ SEC_Q_WR_PTR_REG
);
878 atomic_inc(&msg_ring
->used
);
879 mutex_unlock(&msg_ring
->lock
);
884 bool sec_queue_can_enqueue(struct sec_queue
*queue
, int num
)
886 struct sec_queue_ring_cmd
*msg_ring
= &queue
->ring_cmd
;
888 return SEC_QUEUE_LEN
- atomic_read(&msg_ring
->used
) >= num
;
891 static void sec_queue_hw_init(struct sec_queue
*queue
)
893 sec_queue_ar_alloc(queue
, SEC_QUEUE_AR_FROCE_NOALLOC
);
894 sec_queue_aw_alloc(queue
, SEC_QUEUE_AR_FROCE_NOALLOC
);
895 sec_queue_ar_pkgattr(queue
, 1);
896 sec_queue_aw_pkgattr(queue
, 1);
898 /* Enable out of order queue */
899 sec_queue_reorder(queue
, true);
901 /* Interrupt after a single complete element */
902 writel_relaxed(1, queue
->regs
+ SEC_Q_PROC_NUM_CFG_REG
);
904 sec_queue_depth(queue
, SEC_QUEUE_LEN
- 1);
906 sec_queue_cmdbase_addr(queue
, queue
->ring_cmd
.paddr
);
908 sec_queue_outorder_addr(queue
, queue
->ring_cq
.paddr
);
910 sec_queue_errbase_addr(queue
, queue
->ring_db
.paddr
);
912 writel_relaxed(0x100, queue
->regs
+ SEC_Q_OT_TH_REG
);
914 sec_queue_abn_irq_disable(queue
);
915 sec_queue_irq_disable(queue
);
916 writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR
, queue
->regs
+ SEC_Q_INIT_REG
);
919 static int sec_hw_init(struct sec_dev_info
*info
)
921 struct iommu_domain
*domain
;
922 u32 sec_ipv4_mask
= 0;
923 u32 sec_ipv6_mask
[10] = {};
926 domain
= iommu_get_domain_for_dev(info
->dev
);
929 * Enable all available processing unit clocks.
930 * Only the first cluster is usable with translations.
932 if (domain
&& (domain
->type
& __IOMMU_DOMAIN_PAGING
))
938 writel_relaxed(GENMASK(info
->num_saas
- 1, 0),
939 info
->regs
[SEC_SAA
] + SEC_CLK_EN_REG
);
941 /* 32 bit little endian */
942 sec_bd_endian_little(info
);
944 sec_cache_config(info
);
946 /* Data axi port write and read outstanding config as per datasheet */
947 sec_data_axiwr_otsd_cfg(info
, 0x7);
948 sec_data_axird_otsd_cfg(info
, 0x7);
950 /* Enable clock gating */
951 sec_clk_gate_en(info
, true);
953 /* Set CNT_CYC register not read clear */
954 sec_comm_cnt_cfg(info
, false);
957 sec_commsnap_en(info
, false);
959 writel_relaxed((u32
)~0, info
->regs
[SEC_SAA
] + SEC_FSM_MAX_CNT_REG
);
961 ret
= sec_ipv4_hashmask(info
, sec_ipv4_mask
);
963 dev_err(info
->dev
, "Failed to set ipv4 hashmask %d\n", ret
);
967 sec_ipv6_hashmask(info
, sec_ipv6_mask
);
969 /* do not use debug bd */
970 sec_set_dbg_bd_cfg(info
, 0);
972 if (domain
&& (domain
->type
& __IOMMU_DOMAIN_PAGING
)) {
973 for (i
= 0; i
< SEC_Q_NUM
; i
++) {
974 sec_streamid(info
, i
);
975 /* Same QoS for all queues */
977 info
->regs
[SEC_SAA
] +
978 SEC_Q_WEIGHT_CFG_REG(i
));
982 for (i
= 0; i
< info
->num_saas
; i
++) {
983 sec_saa_getqm_en(info
, i
, 1);
984 sec_saa_int_mask(info
, i
, 0);
990 static void sec_hw_exit(struct sec_dev_info
*info
)
994 for (i
= 0; i
< SEC_MAX_SAA_NUM
; i
++) {
995 sec_saa_int_mask(info
, i
, (u32
)~0);
996 sec_saa_getqm_en(info
, i
, 0);
1000 static void sec_queue_base_init(struct sec_dev_info
*info
,
1001 struct sec_queue
*queue
, int queue_id
)
1003 queue
->dev_info
= info
;
1004 queue
->queue_id
= queue_id
;
1005 snprintf(queue
->name
, sizeof(queue
->name
),
1006 "%s_%d", dev_name(info
->dev
), queue
->queue_id
);
1009 static int sec_map_io(struct sec_dev_info
*info
, struct platform_device
*pdev
)
1011 struct resource
*res
;
1014 for (i
= 0; i
< SEC_NUM_ADDR_REGIONS
; i
++) {
1015 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, i
);
1018 dev_err(info
->dev
, "Memory resource %d not found\n", i
);
1022 info
->regs
[i
] = devm_ioremap(info
->dev
, res
->start
,
1023 resource_size(res
));
1024 if (!info
->regs
[i
]) {
1026 "Memory resource %d could not be remapped\n",
1035 static int sec_base_init(struct sec_dev_info
*info
,
1036 struct platform_device
*pdev
)
1040 ret
= sec_map_io(info
, pdev
);
1044 ret
= sec_clk_en(info
);
1048 ret
= sec_reset_whole_module(info
);
1050 goto sec_clk_disable
;
1052 ret
= sec_hw_init(info
);
1054 goto sec_clk_disable
;
1064 static void sec_base_exit(struct sec_dev_info
*info
)
1070 #define SEC_Q_CMD_SIZE \
1071 round_up(SEC_QUEUE_LEN * sizeof(struct sec_bd_info), PAGE_SIZE)
1072 #define SEC_Q_CQ_SIZE \
1073 round_up(SEC_QUEUE_LEN * sizeof(struct sec_out_bd_info), PAGE_SIZE)
1074 #define SEC_Q_DB_SIZE \
1075 round_up(SEC_QUEUE_LEN * sizeof(struct sec_debug_bd_info), PAGE_SIZE)
1077 static int sec_queue_res_cfg(struct sec_queue
*queue
)
1079 struct device
*dev
= queue
->dev_info
->dev
;
1080 struct sec_queue_ring_cmd
*ring_cmd
= &queue
->ring_cmd
;
1081 struct sec_queue_ring_cq
*ring_cq
= &queue
->ring_cq
;
1082 struct sec_queue_ring_db
*ring_db
= &queue
->ring_db
;
1085 ring_cmd
->vaddr
= dma_alloc_coherent(dev
, SEC_Q_CMD_SIZE
,
1086 &ring_cmd
->paddr
, GFP_KERNEL
);
1087 if (!ring_cmd
->vaddr
)
1090 atomic_set(&ring_cmd
->used
, 0);
1091 mutex_init(&ring_cmd
->lock
);
1092 ring_cmd
->callback
= sec_alg_callback
;
1094 ring_cq
->vaddr
= dma_alloc_coherent(dev
, SEC_Q_CQ_SIZE
,
1095 &ring_cq
->paddr
, GFP_KERNEL
);
1096 if (!ring_cq
->vaddr
) {
1098 goto err_free_ring_cmd
;
1101 ring_db
->vaddr
= dma_alloc_coherent(dev
, SEC_Q_DB_SIZE
,
1102 &ring_db
->paddr
, GFP_KERNEL
);
1103 if (!ring_db
->vaddr
) {
1105 goto err_free_ring_cq
;
1107 queue
->task_irq
= platform_get_irq(to_platform_device(dev
),
1108 queue
->queue_id
* 2 + 1);
1109 if (queue
->task_irq
<= 0) {
1111 goto err_free_ring_db
;
1117 dma_free_coherent(dev
, SEC_Q_DB_SIZE
, queue
->ring_db
.vaddr
,
1118 queue
->ring_db
.paddr
);
1120 dma_free_coherent(dev
, SEC_Q_CQ_SIZE
, queue
->ring_cq
.vaddr
,
1121 queue
->ring_cq
.paddr
);
1123 dma_free_coherent(dev
, SEC_Q_CMD_SIZE
, queue
->ring_cmd
.vaddr
,
1124 queue
->ring_cmd
.paddr
);
1129 static void sec_queue_free_ring_pages(struct sec_queue
*queue
)
1131 struct device
*dev
= queue
->dev_info
->dev
;
1133 dma_free_coherent(dev
, SEC_Q_DB_SIZE
, queue
->ring_db
.vaddr
,
1134 queue
->ring_db
.paddr
);
1135 dma_free_coherent(dev
, SEC_Q_CQ_SIZE
, queue
->ring_cq
.vaddr
,
1136 queue
->ring_cq
.paddr
);
1137 dma_free_coherent(dev
, SEC_Q_CMD_SIZE
, queue
->ring_cmd
.vaddr
,
1138 queue
->ring_cmd
.paddr
);
1141 static int sec_queue_config(struct sec_dev_info
*info
, struct sec_queue
*queue
,
1146 sec_queue_base_init(info
, queue
, queue_id
);
1148 ret
= sec_queue_res_cfg(queue
);
1152 ret
= sec_queue_map_io(queue
);
1154 dev_err(info
->dev
, "Queue map failed %d\n", ret
);
1155 sec_queue_free_ring_pages(queue
);
1159 sec_queue_hw_init(queue
);
1164 static void sec_queue_unconfig(struct sec_dev_info
*info
,
1165 struct sec_queue
*queue
)
1167 sec_queue_unmap_io(queue
);
1168 sec_queue_free_ring_pages(queue
);
1171 static int sec_id_alloc(struct sec_dev_info
*info
)
1176 mutex_lock(&sec_id_lock
);
1178 for (i
= 0; i
< SEC_MAX_DEVICES
; i
++)
1179 if (!sec_devices
[i
])
1181 if (i
== SEC_MAX_DEVICES
) {
1186 sec_devices
[info
->sec_id
] = info
;
1189 mutex_unlock(&sec_id_lock
);
1194 static void sec_id_free(struct sec_dev_info
*info
)
1196 mutex_lock(&sec_id_lock
);
1197 sec_devices
[info
->sec_id
] = NULL
;
1198 mutex_unlock(&sec_id_lock
);
1201 static int sec_probe(struct platform_device
*pdev
)
1203 struct sec_dev_info
*info
;
1204 struct device
*dev
= &pdev
->dev
;
1208 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
1210 dev_err(dev
, "Failed to set 64 bit dma mask %d", ret
);
1214 info
= devm_kzalloc(dev
, (sizeof(*info
)), GFP_KERNEL
);
1219 mutex_init(&info
->dev_lock
);
1221 info
->hw_sgl_pool
= dmam_pool_create("sgl", dev
,
1222 sizeof(struct sec_hw_sgl
), 64, 0);
1223 if (!info
->hw_sgl_pool
) {
1224 dev_err(dev
, "Failed to create sec sgl dma pool\n");
1228 ret
= sec_base_init(info
, pdev
);
1230 dev_err(dev
, "Base initialization fail! %d\n", ret
);
1234 for (i
= 0; i
< SEC_Q_NUM
; i
++) {
1235 ret
= sec_queue_config(info
, &info
->queues
[i
], i
);
1237 goto queues_unconfig
;
1239 ret
= sec_queue_irq_init(&info
->queues
[i
]);
1241 sec_queue_unconfig(info
, &info
->queues
[i
]);
1242 goto queues_unconfig
;
1246 ret
= sec_algs_register();
1248 dev_err(dev
, "Failed to register algorithms with crypto %d\n",
1250 goto queues_unconfig
;
1253 platform_set_drvdata(pdev
, info
);
1255 ret
= sec_id_alloc(info
);
1257 goto algs_unregister
;
1262 sec_algs_unregister();
1264 for (j
= i
- 1; j
>= 0; j
--) {
1265 sec_queue_irq_uninit(&info
->queues
[j
]);
1266 sec_queue_unconfig(info
, &info
->queues
[j
]);
1268 sec_base_exit(info
);
1273 static int sec_remove(struct platform_device
*pdev
)
1275 struct sec_dev_info
*info
= platform_get_drvdata(pdev
);
1278 /* Unexpose as soon as possible, reuse during remove is fine */
1281 sec_algs_unregister();
1283 for (i
= 0; i
< SEC_Q_NUM
; i
++) {
1284 sec_queue_irq_uninit(&info
->queues
[i
]);
1285 sec_queue_unconfig(info
, &info
->queues
[i
]);
1288 sec_base_exit(info
);
1293 static const __maybe_unused
struct of_device_id sec_match
[] = {
1294 { .compatible
= "hisilicon,hip06-sec" },
1295 { .compatible
= "hisilicon,hip07-sec" },
1298 MODULE_DEVICE_TABLE(of
, sec_match
);
1300 static const __maybe_unused
struct acpi_device_id sec_acpi_match
[] = {
1304 MODULE_DEVICE_TABLE(acpi
, sec_acpi_match
);
1306 static struct platform_driver sec_driver
= {
1308 .remove
= sec_remove
,
1310 .name
= "hisi_sec_platform_driver",
1311 .of_match_table
= sec_match
,
1312 .acpi_match_table
= ACPI_PTR(sec_acpi_match
),
1315 module_platform_driver(sec_driver
);
1317 MODULE_LICENSE("GPL");
1318 MODULE_DESCRIPTION("Hisilicon Security Accelerators");
1319 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com");
1320 MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>");