1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2019 Xilinx, Inc.
8 * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
9 * IP. It exposes a char device which supports file operations
10 * like open(), close() and ioctl().
13 #include <linux/miscdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of_platform.h>
19 #include <linux/poll.h>
20 #include <linux/slab.h>
21 #include <linux/clk.h>
22 #include <linux/compat.h>
23 #include <linux/highmem.h>
25 #include <uapi/misc/xilinx_sdfec.h>
27 #define DEV_NAME_LEN 12
29 static DEFINE_IDA(dev_nrs
);
31 /* Xilinx SDFEC Register Map */
32 /* CODE_WRI_PROTECT Register */
33 #define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
36 #define XSDFEC_ACTIVE_ADDR (0x8)
37 #define XSDFEC_IS_ACTIVITY_SET (0x1)
39 /* AXIS_WIDTH Register */
40 #define XSDFEC_AXIS_WIDTH_ADDR (0xC)
41 #define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
42 #define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
43 #define XSDFEC_AXIS_DIN_WORDS_LSB (2)
44 #define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
46 /* AXIS_ENABLE Register */
47 #define XSDFEC_AXIS_ENABLE_ADDR (0x10)
48 #define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
49 #define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
50 #define XSDFEC_AXIS_ENABLE_MASK \
51 (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
53 /* FEC_CODE Register */
54 #define XSDFEC_FEC_CODE_ADDR (0x14)
56 /* ORDER Register Map */
57 #define XSDFEC_ORDER_ADDR (0x18)
59 /* Interrupt Status Register */
60 #define XSDFEC_ISR_ADDR (0x1C)
61 /* Interrupt Status Register Bit Mask */
62 #define XSDFEC_ISR_MASK (0x3F)
64 /* Write Only - Interrupt Enable Register */
65 #define XSDFEC_IER_ADDR (0x20)
66 /* Write Only - Interrupt Disable Register */
67 #define XSDFEC_IDR_ADDR (0x24)
68 /* Read Only - Interrupt Mask Register */
69 #define XSDFEC_IMR_ADDR (0x28)
71 /* ECC Interrupt Status Register */
72 #define XSDFEC_ECC_ISR_ADDR (0x2C)
73 /* Single Bit Errors */
74 #define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
75 /* PL Initialize Single Bit Errors */
76 #define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
77 /* Multi Bit Errors */
78 #define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
79 /* PL Initialize Multi Bit Errors */
80 #define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
81 /* Multi Bit Error to Event Shift */
82 #define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
83 /* PL Initialize Multi Bit Error to Event Shift */
84 #define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
85 /* ECC Interrupt Status Bit Mask */
86 #define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
87 /* ECC Interrupt Status PL Initialize Bit Mask */
88 #define XSDFEC_PL_INIT_ECC_ISR_MASK \
89 (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
90 /* ECC Interrupt Status All Bit Mask */
91 #define XSDFEC_ALL_ECC_ISR_MASK \
92 (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
93 /* ECC Interrupt Status Single Bit Errors Mask */
94 #define XSDFEC_ALL_ECC_ISR_SBE_MASK \
95 (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
96 /* ECC Interrupt Status Multi Bit Errors Mask */
97 #define XSDFEC_ALL_ECC_ISR_MBE_MASK \
98 (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
100 /* Write Only - ECC Interrupt Enable Register */
101 #define XSDFEC_ECC_IER_ADDR (0x30)
102 /* Write Only - ECC Interrupt Disable Register */
103 #define XSDFEC_ECC_IDR_ADDR (0x34)
104 /* Read Only - ECC Interrupt Mask Register */
105 #define XSDFEC_ECC_IMR_ADDR (0x38)
107 /* BYPASS Register */
108 #define XSDFEC_BYPASS_ADDR (0x3C)
110 /* Turbo Code Register */
111 #define XSDFEC_TURBO_ADDR (0x100)
112 #define XSDFEC_TURBO_SCALE_MASK (0xFFF)
113 #define XSDFEC_TURBO_SCALE_BIT_POS (8)
114 #define XSDFEC_TURBO_SCALE_MAX (15)
117 #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
118 #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
119 #define XSDFEC_REG0_N_MIN (4)
120 #define XSDFEC_REG0_N_MAX (32768)
121 #define XSDFEC_REG0_N_MUL_P (256)
122 #define XSDFEC_REG0_N_LSB (0)
123 #define XSDFEC_REG0_K_MIN (2)
124 #define XSDFEC_REG0_K_MAX (32766)
125 #define XSDFEC_REG0_K_MUL_P (256)
126 #define XSDFEC_REG0_K_LSB (16)
129 #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
130 #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
131 #define XSDFEC_REG1_PSIZE_MIN (2)
132 #define XSDFEC_REG1_PSIZE_MAX (512)
133 #define XSDFEC_REG1_NO_PACKING_MASK (0x400)
134 #define XSDFEC_REG1_NO_PACKING_LSB (10)
135 #define XSDFEC_REG1_NM_MASK (0xFF800)
136 #define XSDFEC_REG1_NM_LSB (11)
137 #define XSDFEC_REG1_BYPASS_MASK (0x100000)
140 #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
141 #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
142 #define XSDFEC_REG2_NLAYERS_MIN (1)
143 #define XSDFEC_REG2_NLAYERS_MAX (256)
144 #define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
145 #define XSDFEC_REG2_NMQC_LSB (9)
146 #define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
147 #define XSDFEC_REG2_NORM_TYPE_LSB (20)
148 #define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
149 #define XSDFEC_REG2_SPEICAL_QC_LSB (21)
150 #define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
151 #define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
152 #define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
153 #define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
156 #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
157 #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
158 #define XSDFEC_REG3_LA_OFF_LSB (8)
159 #define XSDFEC_REG3_QC_OFF_LSB (16)
161 #define XSDFEC_LDPC_REG_JUMP (0x10)
162 #define XSDFEC_REG_WIDTH_JUMP (4)
164 /* The maximum number of pinned pages */
165 #define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
168 * struct xsdfec_clks - For managing SD-FEC clocks
169 * @core_clk: Main processing clock for core
170 * @axi_clk: AXI4-Lite memory-mapped clock
171 * @din_words_clk: DIN Words AXI4-Stream Slave clock
172 * @din_clk: DIN AXI4-Stream Slave clock
173 * @dout_clk: DOUT Words AXI4-Stream Slave clock
174 * @dout_words_clk: DOUT AXI4-Stream Slave clock
175 * @ctrl_clk: Control AXI4-Stream Slave clock
176 * @status_clk: Status AXI4-Stream Slave clock
179 struct clk
*core_clk
;
181 struct clk
*din_words_clk
;
183 struct clk
*dout_clk
;
184 struct clk
*dout_words_clk
;
185 struct clk
*ctrl_clk
;
186 struct clk
*status_clk
;
190 * struct xsdfec_dev - Driver data for SDFEC
191 * @miscdev: Misc device handle
192 * @clks: Clocks managed by the SDFEC driver
193 * @waitq: Driver wait queue
194 * @config: Configuration of the SDFEC device
195 * @dev_name: Device name
196 * @flags: spinlock flags
197 * @regs: device physical base address
198 * @dev: pointer to device struct
199 * @state: State of the SDFEC device
200 * @error_data_lock: Error counter and states spinlock
202 * @isr_err_count: Count of ISR errors
203 * @cecc_count: Count of Correctable ECC errors (SBE)
204 * @uecc_count: Count of Uncorrectable ECC errors (MBE)
206 * @state_updated: indicates State updated by interrupt handler
207 * @stats_updated: indicates Stats updated by interrupt handler
208 * @intr_enabled: indicates IRQ enabled
210 * This structure contains necessary state for SDFEC driver to operate
213 struct miscdevice miscdev
;
214 struct xsdfec_clks clks
;
215 wait_queue_head_t waitq
;
216 struct xsdfec_config config
;
217 char dev_name
[DEV_NAME_LEN
];
221 enum xsdfec_state state
;
222 /* Spinlock to protect state_updated and stats_updated */
223 spinlock_t error_data_lock
;
234 static inline void xsdfec_regwrite(struct xsdfec_dev
*xsdfec
, u32 addr
,
237 dev_dbg(xsdfec
->dev
, "Writing 0x%x to offset 0x%x", value
, addr
);
238 iowrite32(value
, xsdfec
->regs
+ addr
);
241 static inline u32
xsdfec_regread(struct xsdfec_dev
*xsdfec
, u32 addr
)
245 rval
= ioread32(xsdfec
->regs
+ addr
);
246 dev_dbg(xsdfec
->dev
, "Read value = 0x%x from offset 0x%x", rval
, addr
);
250 static void update_bool_config_from_reg(struct xsdfec_dev
*xsdfec
,
251 u32 reg_offset
, u32 bit_num
,
255 u32 bit_mask
= 1 << bit_num
;
257 reg_val
= xsdfec_regread(xsdfec
, reg_offset
);
258 *config_value
= (reg_val
& bit_mask
) > 0;
261 static void update_config_from_hw(struct xsdfec_dev
*xsdfec
)
266 /* Update the Order */
267 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_ORDER_ADDR
);
268 xsdfec
->config
.order
= reg_value
;
270 update_bool_config_from_reg(xsdfec
, XSDFEC_BYPASS_ADDR
,
271 0, /* Bit Number, maybe change to mask */
272 &xsdfec
->config
.bypass
);
274 update_bool_config_from_reg(xsdfec
, XSDFEC_CODE_WR_PROTECT_ADDR
,
276 &xsdfec
->config
.code_wr_protect
);
278 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_IMR_ADDR
);
279 xsdfec
->config
.irq
.enable_isr
= (reg_value
& XSDFEC_ISR_MASK
) > 0;
281 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_ECC_IMR_ADDR
);
282 xsdfec
->config
.irq
.enable_ecc_isr
=
283 (reg_value
& XSDFEC_ECC_ISR_MASK
) > 0;
285 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_AXIS_ENABLE_ADDR
);
286 sdfec_started
= (reg_value
& XSDFEC_AXIS_IN_ENABLE_MASK
) > 0;
288 xsdfec
->state
= XSDFEC_STARTED
;
290 xsdfec
->state
= XSDFEC_STOPPED
;
293 static int xsdfec_get_status(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
295 struct xsdfec_status status
;
298 memset(&status
, 0, sizeof(status
));
299 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
300 status
.state
= xsdfec
->state
;
301 xsdfec
->state_updated
= false;
302 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
303 status
.activity
= (xsdfec_regread(xsdfec
, XSDFEC_ACTIVE_ADDR
) &
304 XSDFEC_IS_ACTIVITY_SET
);
306 err
= copy_to_user(arg
, &status
, sizeof(status
));
313 static int xsdfec_get_config(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
317 err
= copy_to_user(arg
, &xsdfec
->config
, sizeof(xsdfec
->config
));
324 static int xsdfec_isr_enable(struct xsdfec_dev
*xsdfec
, bool enable
)
330 xsdfec_regwrite(xsdfec
, XSDFEC_IER_ADDR
, XSDFEC_ISR_MASK
);
331 mask_read
= xsdfec_regread(xsdfec
, XSDFEC_IMR_ADDR
);
332 if (mask_read
& XSDFEC_ISR_MASK
) {
334 "SDFEC enabling irq with IER failed");
339 xsdfec_regwrite(xsdfec
, XSDFEC_IDR_ADDR
, XSDFEC_ISR_MASK
);
340 mask_read
= xsdfec_regread(xsdfec
, XSDFEC_IMR_ADDR
);
341 if ((mask_read
& XSDFEC_ISR_MASK
) != XSDFEC_ISR_MASK
) {
343 "SDFEC disabling irq with IDR failed");
350 static int xsdfec_ecc_isr_enable(struct xsdfec_dev
*xsdfec
, bool enable
)
356 xsdfec_regwrite(xsdfec
, XSDFEC_ECC_IER_ADDR
,
357 XSDFEC_ALL_ECC_ISR_MASK
);
358 mask_read
= xsdfec_regread(xsdfec
, XSDFEC_ECC_IMR_ADDR
);
359 if (mask_read
& XSDFEC_ALL_ECC_ISR_MASK
) {
361 "SDFEC enabling ECC irq with ECC IER failed");
366 xsdfec_regwrite(xsdfec
, XSDFEC_ECC_IDR_ADDR
,
367 XSDFEC_ALL_ECC_ISR_MASK
);
368 mask_read
= xsdfec_regread(xsdfec
, XSDFEC_ECC_IMR_ADDR
);
369 if (!(((mask_read
& XSDFEC_ALL_ECC_ISR_MASK
) ==
370 XSDFEC_ECC_ISR_MASK
) ||
371 ((mask_read
& XSDFEC_ALL_ECC_ISR_MASK
) ==
372 XSDFEC_PL_INIT_ECC_ISR_MASK
))) {
374 "SDFEC disable ECC irq with ECC IDR failed");
381 static int xsdfec_set_irq(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
383 struct xsdfec_irq irq
;
388 err
= copy_from_user(&irq
, arg
, sizeof(irq
));
392 /* Setup tlast related IRQ */
393 isr_err
= xsdfec_isr_enable(xsdfec
, irq
.enable_isr
);
395 xsdfec
->config
.irq
.enable_isr
= irq
.enable_isr
;
397 /* Setup ECC related IRQ */
398 ecc_err
= xsdfec_ecc_isr_enable(xsdfec
, irq
.enable_ecc_isr
);
400 xsdfec
->config
.irq
.enable_ecc_isr
= irq
.enable_ecc_isr
;
402 if (isr_err
< 0 || ecc_err
< 0)
408 static int xsdfec_set_turbo(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
410 struct xsdfec_turbo turbo
;
414 err
= copy_from_user(&turbo
, arg
, sizeof(turbo
));
418 if (turbo
.alg
>= XSDFEC_TURBO_ALG_MAX
)
421 if (turbo
.scale
> XSDFEC_TURBO_SCALE_MAX
)
424 /* Check to see what device tree says about the FEC codes */
425 if (xsdfec
->config
.code
== XSDFEC_LDPC_CODE
)
428 turbo_write
= ((turbo
.scale
& XSDFEC_TURBO_SCALE_MASK
)
429 << XSDFEC_TURBO_SCALE_BIT_POS
) |
431 xsdfec_regwrite(xsdfec
, XSDFEC_TURBO_ADDR
, turbo_write
);
435 static int xsdfec_get_turbo(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
438 struct xsdfec_turbo turbo_params
;
441 if (xsdfec
->config
.code
== XSDFEC_LDPC_CODE
)
444 memset(&turbo_params
, 0, sizeof(turbo_params
));
445 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_TURBO_ADDR
);
447 turbo_params
.scale
= (reg_value
& XSDFEC_TURBO_SCALE_MASK
) >>
448 XSDFEC_TURBO_SCALE_BIT_POS
;
449 turbo_params
.alg
= reg_value
& 0x1;
451 err
= copy_to_user(arg
, &turbo_params
, sizeof(turbo_params
));
458 static int xsdfec_reg0_write(struct xsdfec_dev
*xsdfec
, u32 n
, u32 k
, u32 psize
,
463 if (n
< XSDFEC_REG0_N_MIN
|| n
> XSDFEC_REG0_N_MAX
|| psize
== 0 ||
464 (n
> XSDFEC_REG0_N_MUL_P
* psize
) || n
<= k
|| ((n
% psize
) != 0)) {
465 dev_dbg(xsdfec
->dev
, "N value is not in range");
468 n
<<= XSDFEC_REG0_N_LSB
;
470 if (k
< XSDFEC_REG0_K_MIN
|| k
> XSDFEC_REG0_K_MAX
||
471 (k
> XSDFEC_REG0_K_MUL_P
* psize
) || ((k
% psize
) != 0)) {
472 dev_dbg(xsdfec
->dev
, "K value is not in range");
475 k
= k
<< XSDFEC_REG0_K_LSB
;
478 if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE
+ (offset
* XSDFEC_LDPC_REG_JUMP
) >
479 XSDFEC_LDPC_CODE_REG0_ADDR_HIGH
) {
480 dev_dbg(xsdfec
->dev
, "Writing outside of LDPC reg0 space 0x%x",
481 XSDFEC_LDPC_CODE_REG0_ADDR_BASE
+
482 (offset
* XSDFEC_LDPC_REG_JUMP
));
485 xsdfec_regwrite(xsdfec
,
486 XSDFEC_LDPC_CODE_REG0_ADDR_BASE
+
487 (offset
* XSDFEC_LDPC_REG_JUMP
),
492 static int xsdfec_reg1_write(struct xsdfec_dev
*xsdfec
, u32 psize
,
493 u32 no_packing
, u32 nm
, u32 offset
)
497 if (psize
< XSDFEC_REG1_PSIZE_MIN
|| psize
> XSDFEC_REG1_PSIZE_MAX
) {
498 dev_dbg(xsdfec
->dev
, "Psize is not in range");
502 if (no_packing
!= 0 && no_packing
!= 1)
503 dev_dbg(xsdfec
->dev
, "No-packing bit register invalid");
504 no_packing
= ((no_packing
<< XSDFEC_REG1_NO_PACKING_LSB
) &
505 XSDFEC_REG1_NO_PACKING_MASK
);
507 if (nm
& ~(XSDFEC_REG1_NM_MASK
>> XSDFEC_REG1_NM_LSB
))
508 dev_dbg(xsdfec
->dev
, "NM is beyond 10 bits");
509 nm
= (nm
<< XSDFEC_REG1_NM_LSB
) & XSDFEC_REG1_NM_MASK
;
511 wdata
= nm
| no_packing
| psize
;
512 if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE
+ (offset
* XSDFEC_LDPC_REG_JUMP
) >
513 XSDFEC_LDPC_CODE_REG1_ADDR_HIGH
) {
514 dev_dbg(xsdfec
->dev
, "Writing outside of LDPC reg1 space 0x%x",
515 XSDFEC_LDPC_CODE_REG1_ADDR_BASE
+
516 (offset
* XSDFEC_LDPC_REG_JUMP
));
519 xsdfec_regwrite(xsdfec
,
520 XSDFEC_LDPC_CODE_REG1_ADDR_BASE
+
521 (offset
* XSDFEC_LDPC_REG_JUMP
),
526 static int xsdfec_reg2_write(struct xsdfec_dev
*xsdfec
, u32 nlayers
, u32 nmqc
,
527 u32 norm_type
, u32 special_qc
, u32 no_final_parity
,
528 u32 max_schedule
, u32 offset
)
532 if (nlayers
< XSDFEC_REG2_NLAYERS_MIN
||
533 nlayers
> XSDFEC_REG2_NLAYERS_MAX
) {
534 dev_dbg(xsdfec
->dev
, "Nlayers is not in range");
538 if (nmqc
& ~(XSDFEC_REG2_NNMQC_MASK
>> XSDFEC_REG2_NMQC_LSB
))
539 dev_dbg(xsdfec
->dev
, "NMQC exceeds 11 bits");
540 nmqc
= (nmqc
<< XSDFEC_REG2_NMQC_LSB
) & XSDFEC_REG2_NNMQC_MASK
;
543 dev_dbg(xsdfec
->dev
, "Norm type is invalid");
544 norm_type
= ((norm_type
<< XSDFEC_REG2_NORM_TYPE_LSB
) &
545 XSDFEC_REG2_NORM_TYPE_MASK
);
547 dev_dbg(xsdfec
->dev
, "Special QC in invalid");
548 special_qc
= ((special_qc
<< XSDFEC_REG2_SPEICAL_QC_LSB
) &
549 XSDFEC_REG2_SPECIAL_QC_MASK
);
551 if (no_final_parity
> 1)
552 dev_dbg(xsdfec
->dev
, "No final parity check invalid");
554 ((no_final_parity
<< XSDFEC_REG2_NO_FINAL_PARITY_LSB
) &
555 XSDFEC_REG2_NO_FINAL_PARITY_MASK
);
557 ~(XSDFEC_REG2_MAX_SCHEDULE_MASK
>> XSDFEC_REG2_MAX_SCHEDULE_LSB
))
558 dev_dbg(xsdfec
->dev
, "Max Schedule exceeds 2 bits");
559 max_schedule
= ((max_schedule
<< XSDFEC_REG2_MAX_SCHEDULE_LSB
) &
560 XSDFEC_REG2_MAX_SCHEDULE_MASK
);
562 wdata
= (max_schedule
| no_final_parity
| special_qc
| norm_type
|
565 if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE
+ (offset
* XSDFEC_LDPC_REG_JUMP
) >
566 XSDFEC_LDPC_CODE_REG2_ADDR_HIGH
) {
567 dev_dbg(xsdfec
->dev
, "Writing outside of LDPC reg2 space 0x%x",
568 XSDFEC_LDPC_CODE_REG2_ADDR_BASE
+
569 (offset
* XSDFEC_LDPC_REG_JUMP
));
572 xsdfec_regwrite(xsdfec
,
573 XSDFEC_LDPC_CODE_REG2_ADDR_BASE
+
574 (offset
* XSDFEC_LDPC_REG_JUMP
),
579 static int xsdfec_reg3_write(struct xsdfec_dev
*xsdfec
, u8 sc_off
, u8 la_off
,
580 u16 qc_off
, u32 offset
)
584 wdata
= ((qc_off
<< XSDFEC_REG3_QC_OFF_LSB
) |
585 (la_off
<< XSDFEC_REG3_LA_OFF_LSB
) | sc_off
);
586 if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE
+ (offset
* XSDFEC_LDPC_REG_JUMP
) >
587 XSDFEC_LDPC_CODE_REG3_ADDR_HIGH
) {
588 dev_dbg(xsdfec
->dev
, "Writing outside of LDPC reg3 space 0x%x",
589 XSDFEC_LDPC_CODE_REG3_ADDR_BASE
+
590 (offset
* XSDFEC_LDPC_REG_JUMP
));
593 xsdfec_regwrite(xsdfec
,
594 XSDFEC_LDPC_CODE_REG3_ADDR_BASE
+
595 (offset
* XSDFEC_LDPC_REG_JUMP
),
600 static int xsdfec_table_write(struct xsdfec_dev
*xsdfec
, u32 offset
,
601 u32
*src_ptr
, u32 len
, const u32 base_addr
,
605 int res
, i
, nr_pages
;
608 struct page
*pages
[MAX_NUM_PAGES
];
611 * Writes that go beyond the length of
612 * Shared Scale(SC) table should fail
614 if (offset
> depth
/ XSDFEC_REG_WIDTH_JUMP
||
615 len
> depth
/ XSDFEC_REG_WIDTH_JUMP
||
616 offset
+ len
> depth
/ XSDFEC_REG_WIDTH_JUMP
) {
617 dev_dbg(xsdfec
->dev
, "Write exceeds SC table length");
621 n
= (len
* XSDFEC_REG_WIDTH_JUMP
) / PAGE_SIZE
;
622 if ((len
* XSDFEC_REG_WIDTH_JUMP
) % PAGE_SIZE
)
625 if (WARN_ON_ONCE(n
> INT_MAX
))
630 res
= get_user_pages_fast((unsigned long)src_ptr
, nr_pages
, 0, pages
);
631 if (res
< nr_pages
) {
633 for (i
= 0; i
< res
; i
++)
639 for (i
= 0; i
< nr_pages
; i
++) {
640 addr
= kmap(pages
[i
]);
642 xsdfec_regwrite(xsdfec
,
643 base_addr
+ ((offset
+ reg
) *
644 XSDFEC_REG_WIDTH_JUMP
),
647 } while ((reg
< len
) &&
648 ((reg
* XSDFEC_REG_WIDTH_JUMP
) % PAGE_SIZE
));
654 static int xsdfec_add_ldpc(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
656 struct xsdfec_ldpc_params
*ldpc
;
659 ldpc
= kzalloc(sizeof(*ldpc
), GFP_KERNEL
);
663 if (copy_from_user(ldpc
, arg
, sizeof(*ldpc
))) {
668 if (xsdfec
->config
.code
== XSDFEC_TURBO_CODE
) {
673 /* Verify Device has not started */
674 if (xsdfec
->state
== XSDFEC_STARTED
) {
679 if (xsdfec
->config
.code_wr_protect
) {
685 ret
= xsdfec_reg0_write(xsdfec
, ldpc
->n
, ldpc
->k
, ldpc
->psize
,
691 ret
= xsdfec_reg1_write(xsdfec
, ldpc
->psize
, ldpc
->no_packing
, ldpc
->nm
,
697 ret
= xsdfec_reg2_write(xsdfec
, ldpc
->nlayers
, ldpc
->nmqc
,
698 ldpc
->norm_type
, ldpc
->special_qc
,
699 ldpc
->no_final_parity
, ldpc
->max_schedule
,
705 ret
= xsdfec_reg3_write(xsdfec
, ldpc
->sc_off
, ldpc
->la_off
,
706 ldpc
->qc_off
, ldpc
->code_id
);
710 /* Write Shared Codes */
711 n
= ldpc
->nlayers
/ 4;
712 if (ldpc
->nlayers
% 4)
715 ret
= xsdfec_table_write(xsdfec
, ldpc
->sc_off
, ldpc
->sc_table
, n
,
716 XSDFEC_LDPC_SC_TABLE_ADDR_BASE
,
717 XSDFEC_SC_TABLE_DEPTH
);
721 ret
= xsdfec_table_write(xsdfec
, 4 * ldpc
->la_off
, ldpc
->la_table
,
722 ldpc
->nlayers
, XSDFEC_LDPC_LA_TABLE_ADDR_BASE
,
723 XSDFEC_LA_TABLE_DEPTH
);
727 ret
= xsdfec_table_write(xsdfec
, 4 * ldpc
->qc_off
, ldpc
->qc_table
,
728 ldpc
->nqc
, XSDFEC_LDPC_QC_TABLE_ADDR_BASE
,
729 XSDFEC_QC_TABLE_DEPTH
);
737 static int xsdfec_set_order(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
740 enum xsdfec_order order
;
743 err
= get_user(order
, (enum xsdfec_order __user
*)arg
);
747 order_invalid
= (order
!= XSDFEC_MAINTAIN_ORDER
) &&
748 (order
!= XSDFEC_OUT_OF_ORDER
);
752 /* Verify Device has not started */
753 if (xsdfec
->state
== XSDFEC_STARTED
)
756 xsdfec_regwrite(xsdfec
, XSDFEC_ORDER_ADDR
, order
);
758 xsdfec
->config
.order
= order
;
763 static int xsdfec_set_bypass(struct xsdfec_dev
*xsdfec
, bool __user
*arg
)
768 err
= get_user(bypass
, arg
);
772 /* Verify Device has not started */
773 if (xsdfec
->state
== XSDFEC_STARTED
)
777 xsdfec_regwrite(xsdfec
, XSDFEC_BYPASS_ADDR
, 1);
779 xsdfec_regwrite(xsdfec
, XSDFEC_BYPASS_ADDR
, 0);
781 xsdfec
->config
.bypass
= bypass
;
786 static int xsdfec_is_active(struct xsdfec_dev
*xsdfec
, bool __user
*arg
)
792 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_ACTIVE_ADDR
);
793 /* using a double ! operator instead of casting */
794 is_active
= !!(reg_value
& XSDFEC_IS_ACTIVITY_SET
);
795 err
= put_user(is_active
, arg
);
803 xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg
)
805 u32 axis_width_field
= 0;
807 switch (axis_width_cfg
) {
809 axis_width_field
= 0;
812 axis_width_field
= 1;
815 axis_width_field
= 2;
819 return axis_width_field
;
822 static u32
xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
825 u32 axis_words_field
= 0;
827 if (axis_word_inc_cfg
== XSDFEC_FIXED_VALUE
||
828 axis_word_inc_cfg
== XSDFEC_IN_BLOCK
)
829 axis_words_field
= 0;
830 else if (axis_word_inc_cfg
== XSDFEC_PER_AXI_TRANSACTION
)
831 axis_words_field
= 1;
833 return axis_words_field
;
836 static int xsdfec_cfg_axi_streams(struct xsdfec_dev
*xsdfec
)
839 u32 dout_words_field
;
840 u32 dout_width_field
;
843 struct xsdfec_config
*config
= &xsdfec
->config
;
845 /* translate config info to register values */
847 xsdfec_translate_axis_words_cfg_val(config
->dout_word_include
);
849 xsdfec_translate_axis_width_cfg_val(config
->dout_width
);
851 xsdfec_translate_axis_words_cfg_val(config
->din_word_include
);
853 xsdfec_translate_axis_width_cfg_val(config
->din_width
);
855 reg_value
= dout_words_field
<< XSDFEC_AXIS_DOUT_WORDS_LSB
;
856 reg_value
|= dout_width_field
<< XSDFEC_AXIS_DOUT_WIDTH_LSB
;
857 reg_value
|= din_words_field
<< XSDFEC_AXIS_DIN_WORDS_LSB
;
858 reg_value
|= din_width_field
<< XSDFEC_AXIS_DIN_WIDTH_LSB
;
860 xsdfec_regwrite(xsdfec
, XSDFEC_AXIS_WIDTH_ADDR
, reg_value
);
865 static int xsdfec_dev_open(struct inode
*iptr
, struct file
*fptr
)
870 static int xsdfec_dev_release(struct inode
*iptr
, struct file
*fptr
)
875 static int xsdfec_start(struct xsdfec_dev
*xsdfec
)
879 regread
= xsdfec_regread(xsdfec
, XSDFEC_FEC_CODE_ADDR
);
881 if (regread
!= xsdfec
->config
.code
) {
883 "%s SDFEC HW code does not match driver code, reg %d, code %d",
884 __func__
, regread
, xsdfec
->config
.code
);
888 /* Set AXIS enable */
889 xsdfec_regwrite(xsdfec
, XSDFEC_AXIS_ENABLE_ADDR
,
890 XSDFEC_AXIS_ENABLE_MASK
);
892 xsdfec
->state
= XSDFEC_STARTED
;
896 static int xsdfec_stop(struct xsdfec_dev
*xsdfec
)
900 if (xsdfec
->state
!= XSDFEC_STARTED
)
901 dev_dbg(xsdfec
->dev
, "Device not started correctly");
902 /* Disable AXIS_ENABLE Input interfaces only */
903 regread
= xsdfec_regread(xsdfec
, XSDFEC_AXIS_ENABLE_ADDR
);
904 regread
&= (~XSDFEC_AXIS_IN_ENABLE_MASK
);
905 xsdfec_regwrite(xsdfec
, XSDFEC_AXIS_ENABLE_ADDR
, regread
);
907 xsdfec
->state
= XSDFEC_STOPPED
;
911 static int xsdfec_clear_stats(struct xsdfec_dev
*xsdfec
)
913 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
914 xsdfec
->isr_err_count
= 0;
915 xsdfec
->uecc_count
= 0;
916 xsdfec
->cecc_count
= 0;
917 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
922 static int xsdfec_get_stats(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
925 struct xsdfec_stats user_stats
;
927 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
928 user_stats
.isr_err_count
= xsdfec
->isr_err_count
;
929 user_stats
.cecc_count
= xsdfec
->cecc_count
;
930 user_stats
.uecc_count
= xsdfec
->uecc_count
;
931 xsdfec
->stats_updated
= false;
932 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
934 err
= copy_to_user(arg
, &user_stats
, sizeof(user_stats
));
941 static int xsdfec_set_default_config(struct xsdfec_dev
*xsdfec
)
943 /* Ensure registers are aligned with core configuration */
944 xsdfec_regwrite(xsdfec
, XSDFEC_FEC_CODE_ADDR
, xsdfec
->config
.code
);
945 xsdfec_cfg_axi_streams(xsdfec
);
946 update_config_from_hw(xsdfec
);
951 static long xsdfec_dev_ioctl(struct file
*fptr
, unsigned int cmd
,
954 struct xsdfec_dev
*xsdfec
;
955 void __user
*arg
= NULL
;
958 xsdfec
= container_of(fptr
->private_data
, struct xsdfec_dev
, miscdev
);
960 /* In failed state allow only reset and get status IOCTLs */
961 if (xsdfec
->state
== XSDFEC_NEEDS_RESET
&&
962 (cmd
!= XSDFEC_SET_DEFAULT_CONFIG
&& cmd
!= XSDFEC_GET_STATUS
&&
963 cmd
!= XSDFEC_GET_STATS
&& cmd
!= XSDFEC_CLEAR_STATS
)) {
967 if (_IOC_TYPE(cmd
) != XSDFEC_MAGIC
)
970 /* check if ioctl argument is present and valid */
971 if (_IOC_DIR(cmd
) != _IOC_NONE
) {
972 arg
= (void __user
*)data
;
978 case XSDFEC_START_DEV
:
979 rval
= xsdfec_start(xsdfec
);
981 case XSDFEC_STOP_DEV
:
982 rval
= xsdfec_stop(xsdfec
);
984 case XSDFEC_CLEAR_STATS
:
985 rval
= xsdfec_clear_stats(xsdfec
);
987 case XSDFEC_GET_STATS
:
988 rval
= xsdfec_get_stats(xsdfec
, arg
);
990 case XSDFEC_GET_STATUS
:
991 rval
= xsdfec_get_status(xsdfec
, arg
);
993 case XSDFEC_GET_CONFIG
:
994 rval
= xsdfec_get_config(xsdfec
, arg
);
996 case XSDFEC_SET_DEFAULT_CONFIG
:
997 rval
= xsdfec_set_default_config(xsdfec
);
1000 rval
= xsdfec_set_irq(xsdfec
, arg
);
1002 case XSDFEC_SET_TURBO
:
1003 rval
= xsdfec_set_turbo(xsdfec
, arg
);
1005 case XSDFEC_GET_TURBO
:
1006 rval
= xsdfec_get_turbo(xsdfec
, arg
);
1008 case XSDFEC_ADD_LDPC_CODE_PARAMS
:
1009 rval
= xsdfec_add_ldpc(xsdfec
, arg
);
1011 case XSDFEC_SET_ORDER
:
1012 rval
= xsdfec_set_order(xsdfec
, arg
);
1014 case XSDFEC_SET_BYPASS
:
1015 rval
= xsdfec_set_bypass(xsdfec
, arg
);
1017 case XSDFEC_IS_ACTIVE
:
1018 rval
= xsdfec_is_active(xsdfec
, (bool __user
*)arg
);
1021 /* Should not get here */
1027 #ifdef CONFIG_COMPAT
1028 static long xsdfec_dev_compat_ioctl(struct file
*file
, unsigned int cmd
,
1031 return xsdfec_dev_ioctl(file
, cmd
, (unsigned long)compat_ptr(data
));
1035 static __poll_t
xsdfec_poll(struct file
*file
, poll_table
*wait
)
1038 struct xsdfec_dev
*xsdfec
;
1040 xsdfec
= container_of(file
->private_data
, struct xsdfec_dev
, miscdev
);
1043 return EPOLLNVAL
| EPOLLHUP
;
1045 poll_wait(file
, &xsdfec
->waitq
, wait
);
1047 /* XSDFEC ISR detected an error */
1048 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
1049 if (xsdfec
->state_updated
)
1050 mask
|= EPOLLIN
| EPOLLPRI
;
1052 if (xsdfec
->stats_updated
)
1053 mask
|= EPOLLIN
| EPOLLRDNORM
;
1054 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
1059 static const struct file_operations xsdfec_fops
= {
1060 .owner
= THIS_MODULE
,
1061 .open
= xsdfec_dev_open
,
1062 .release
= xsdfec_dev_release
,
1063 .unlocked_ioctl
= xsdfec_dev_ioctl
,
1064 .poll
= xsdfec_poll
,
1065 #ifdef CONFIG_COMPAT
1066 .compat_ioctl
= xsdfec_dev_compat_ioctl
,
1070 static int xsdfec_parse_of(struct xsdfec_dev
*xsdfec
)
1072 struct device
*dev
= xsdfec
->dev
;
1073 struct device_node
*node
= dev
->of_node
;
1075 const char *fec_code
;
1077 u32 din_word_include
;
1079 u32 dout_word_include
;
1081 rval
= of_property_read_string(node
, "xlnx,sdfec-code", &fec_code
);
1085 if (!strcasecmp(fec_code
, "ldpc"))
1086 xsdfec
->config
.code
= XSDFEC_LDPC_CODE
;
1087 else if (!strcasecmp(fec_code
, "turbo"))
1088 xsdfec
->config
.code
= XSDFEC_TURBO_CODE
;
1092 rval
= of_property_read_u32(node
, "xlnx,sdfec-din-words",
1097 if (din_word_include
< XSDFEC_AXIS_WORDS_INCLUDE_MAX
)
1098 xsdfec
->config
.din_word_include
= din_word_include
;
1102 rval
= of_property_read_u32(node
, "xlnx,sdfec-din-width", &din_width
);
1106 switch (din_width
) {
1107 /* Fall through and set for valid values */
1111 xsdfec
->config
.din_width
= din_width
;
1117 rval
= of_property_read_u32(node
, "xlnx,sdfec-dout-words",
1118 &dout_word_include
);
1122 if (dout_word_include
< XSDFEC_AXIS_WORDS_INCLUDE_MAX
)
1123 xsdfec
->config
.dout_word_include
= dout_word_include
;
1127 rval
= of_property_read_u32(node
, "xlnx,sdfec-dout-width", &dout_width
);
1131 switch (dout_width
) {
1132 /* Fall through and set for valid values */
1136 xsdfec
->config
.dout_width
= dout_width
;
1142 /* Write LDPC to CODE Register */
1143 xsdfec_regwrite(xsdfec
, XSDFEC_FEC_CODE_ADDR
, xsdfec
->config
.code
);
1145 xsdfec_cfg_axi_streams(xsdfec
);
1150 static irqreturn_t
xsdfec_irq_thread(int irq
, void *dev_id
)
1152 struct xsdfec_dev
*xsdfec
= dev_id
;
1153 irqreturn_t ret
= IRQ_HANDLED
;
1162 WARN_ON(xsdfec
->irq
!= irq
);
1164 /* Mask Interrupts */
1165 xsdfec_isr_enable(xsdfec
, false);
1166 xsdfec_ecc_isr_enable(xsdfec
, false);
1168 ecc_err
= xsdfec_regread(xsdfec
, XSDFEC_ECC_ISR_ADDR
);
1169 isr_err
= xsdfec_regread(xsdfec
, XSDFEC_ISR_ADDR
);
1170 /* Clear the interrupts */
1171 xsdfec_regwrite(xsdfec
, XSDFEC_ECC_ISR_ADDR
, ecc_err
);
1172 xsdfec_regwrite(xsdfec
, XSDFEC_ISR_ADDR
, isr_err
);
1174 tmp
= ecc_err
& XSDFEC_ALL_ECC_ISR_MBE_MASK
;
1175 /* Count uncorrectable 2-bit errors */
1176 uecc_count
= hweight32(tmp
);
1177 /* Count all ECC errors */
1178 aecc_count
= hweight32(ecc_err
);
1179 /* Number of correctable 1-bit ECC error */
1180 cecc_count
= aecc_count
- 2 * uecc_count
;
1181 /* Count ISR errors */
1182 isr_err_count
= hweight32(isr_err
);
1183 dev_dbg(xsdfec
->dev
, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp
,
1184 uecc_count
, aecc_count
, cecc_count
, isr_err_count
);
1185 dev_dbg(xsdfec
->dev
, "uecc=%x, cecc=%x, isr=%x", xsdfec
->uecc_count
,
1186 xsdfec
->cecc_count
, xsdfec
->isr_err_count
);
1188 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
1189 /* Add new errors to a 2-bits counter */
1191 xsdfec
->uecc_count
+= uecc_count
;
1192 /* Add new errors to a 1-bits counter */
1194 xsdfec
->cecc_count
+= cecc_count
;
1195 /* Add new errors to a ISR counter */
1197 xsdfec
->isr_err_count
+= isr_err_count
;
1199 /* Update state/stats flag */
1201 if (ecc_err
& XSDFEC_ECC_ISR_MBE_MASK
)
1202 xsdfec
->state
= XSDFEC_NEEDS_RESET
;
1203 else if (ecc_err
& XSDFEC_PL_INIT_ECC_ISR_MBE_MASK
)
1204 xsdfec
->state
= XSDFEC_PL_RECONFIGURE
;
1205 xsdfec
->stats_updated
= true;
1206 xsdfec
->state_updated
= true;
1210 xsdfec
->stats_updated
= true;
1212 if (isr_err_count
) {
1213 xsdfec
->state
= XSDFEC_NEEDS_RESET
;
1214 xsdfec
->stats_updated
= true;
1215 xsdfec
->state_updated
= true;
1218 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
1219 dev_dbg(xsdfec
->dev
, "state=%x, stats=%x", xsdfec
->state_updated
,
1220 xsdfec
->stats_updated
);
1222 /* Enable another polling */
1223 if (xsdfec
->state_updated
|| xsdfec
->stats_updated
)
1224 wake_up_interruptible(&xsdfec
->waitq
);
1228 /* Unmask Interrupts */
1229 xsdfec_isr_enable(xsdfec
, true);
1230 xsdfec_ecc_isr_enable(xsdfec
, true);
1235 static int xsdfec_clk_init(struct platform_device
*pdev
,
1236 struct xsdfec_clks
*clks
)
1240 clks
->core_clk
= devm_clk_get(&pdev
->dev
, "core_clk");
1241 if (IS_ERR(clks
->core_clk
)) {
1242 dev_err(&pdev
->dev
, "failed to get core_clk");
1243 return PTR_ERR(clks
->core_clk
);
1246 clks
->axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_aclk");
1247 if (IS_ERR(clks
->axi_clk
)) {
1248 dev_err(&pdev
->dev
, "failed to get axi_clk");
1249 return PTR_ERR(clks
->axi_clk
);
1252 clks
->din_words_clk
= devm_clk_get(&pdev
->dev
, "s_axis_din_words_aclk");
1253 if (IS_ERR(clks
->din_words_clk
)) {
1254 if (PTR_ERR(clks
->din_words_clk
) != -ENOENT
) {
1255 err
= PTR_ERR(clks
->din_words_clk
);
1258 clks
->din_words_clk
= NULL
;
1261 clks
->din_clk
= devm_clk_get(&pdev
->dev
, "s_axis_din_aclk");
1262 if (IS_ERR(clks
->din_clk
)) {
1263 if (PTR_ERR(clks
->din_clk
) != -ENOENT
) {
1264 err
= PTR_ERR(clks
->din_clk
);
1267 clks
->din_clk
= NULL
;
1270 clks
->dout_clk
= devm_clk_get(&pdev
->dev
, "m_axis_dout_aclk");
1271 if (IS_ERR(clks
->dout_clk
)) {
1272 if (PTR_ERR(clks
->dout_clk
) != -ENOENT
) {
1273 err
= PTR_ERR(clks
->dout_clk
);
1276 clks
->dout_clk
= NULL
;
1279 clks
->dout_words_clk
=
1280 devm_clk_get(&pdev
->dev
, "s_axis_dout_words_aclk");
1281 if (IS_ERR(clks
->dout_words_clk
)) {
1282 if (PTR_ERR(clks
->dout_words_clk
) != -ENOENT
) {
1283 err
= PTR_ERR(clks
->dout_words_clk
);
1286 clks
->dout_words_clk
= NULL
;
1289 clks
->ctrl_clk
= devm_clk_get(&pdev
->dev
, "s_axis_ctrl_aclk");
1290 if (IS_ERR(clks
->ctrl_clk
)) {
1291 if (PTR_ERR(clks
->ctrl_clk
) != -ENOENT
) {
1292 err
= PTR_ERR(clks
->ctrl_clk
);
1295 clks
->ctrl_clk
= NULL
;
1298 clks
->status_clk
= devm_clk_get(&pdev
->dev
, "m_axis_status_aclk");
1299 if (IS_ERR(clks
->status_clk
)) {
1300 if (PTR_ERR(clks
->status_clk
) != -ENOENT
) {
1301 err
= PTR_ERR(clks
->status_clk
);
1304 clks
->status_clk
= NULL
;
1307 err
= clk_prepare_enable(clks
->core_clk
);
1309 dev_err(&pdev
->dev
, "failed to enable core_clk (%d)", err
);
1313 err
= clk_prepare_enable(clks
->axi_clk
);
1315 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)", err
);
1316 goto err_disable_core_clk
;
1319 err
= clk_prepare_enable(clks
->din_clk
);
1321 dev_err(&pdev
->dev
, "failed to enable din_clk (%d)", err
);
1322 goto err_disable_axi_clk
;
1325 err
= clk_prepare_enable(clks
->din_words_clk
);
1327 dev_err(&pdev
->dev
, "failed to enable din_words_clk (%d)", err
);
1328 goto err_disable_din_clk
;
1331 err
= clk_prepare_enable(clks
->dout_clk
);
1333 dev_err(&pdev
->dev
, "failed to enable dout_clk (%d)", err
);
1334 goto err_disable_din_words_clk
;
1337 err
= clk_prepare_enable(clks
->dout_words_clk
);
1339 dev_err(&pdev
->dev
, "failed to enable dout_words_clk (%d)",
1341 goto err_disable_dout_clk
;
1344 err
= clk_prepare_enable(clks
->ctrl_clk
);
1346 dev_err(&pdev
->dev
, "failed to enable ctrl_clk (%d)", err
);
1347 goto err_disable_dout_words_clk
;
1350 err
= clk_prepare_enable(clks
->status_clk
);
1352 dev_err(&pdev
->dev
, "failed to enable status_clk (%d)\n", err
);
1353 goto err_disable_ctrl_clk
;
1358 err_disable_ctrl_clk
:
1359 clk_disable_unprepare(clks
->ctrl_clk
);
1360 err_disable_dout_words_clk
:
1361 clk_disable_unprepare(clks
->dout_words_clk
);
1362 err_disable_dout_clk
:
1363 clk_disable_unprepare(clks
->dout_clk
);
1364 err_disable_din_words_clk
:
1365 clk_disable_unprepare(clks
->din_words_clk
);
1366 err_disable_din_clk
:
1367 clk_disable_unprepare(clks
->din_clk
);
1368 err_disable_axi_clk
:
1369 clk_disable_unprepare(clks
->axi_clk
);
1370 err_disable_core_clk
:
1371 clk_disable_unprepare(clks
->core_clk
);
1376 static void xsdfec_disable_all_clks(struct xsdfec_clks
*clks
)
1378 clk_disable_unprepare(clks
->status_clk
);
1379 clk_disable_unprepare(clks
->ctrl_clk
);
1380 clk_disable_unprepare(clks
->dout_words_clk
);
1381 clk_disable_unprepare(clks
->dout_clk
);
1382 clk_disable_unprepare(clks
->din_words_clk
);
1383 clk_disable_unprepare(clks
->din_clk
);
1384 clk_disable_unprepare(clks
->core_clk
);
1385 clk_disable_unprepare(clks
->axi_clk
);
1388 static int xsdfec_probe(struct platform_device
*pdev
)
1390 struct xsdfec_dev
*xsdfec
;
1392 struct resource
*res
;
1394 bool irq_enabled
= true;
1396 xsdfec
= devm_kzalloc(&pdev
->dev
, sizeof(*xsdfec
), GFP_KERNEL
);
1400 xsdfec
->dev
= &pdev
->dev
;
1401 spin_lock_init(&xsdfec
->error_data_lock
);
1403 err
= xsdfec_clk_init(pdev
, &xsdfec
->clks
);
1408 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1409 xsdfec
->regs
= devm_ioremap_resource(dev
, res
);
1410 if (IS_ERR(xsdfec
->regs
)) {
1411 err
= PTR_ERR(xsdfec
->regs
);
1412 goto err_xsdfec_dev
;
1415 xsdfec
->irq
= platform_get_irq(pdev
, 0);
1416 if (xsdfec
->irq
< 0) {
1417 dev_dbg(dev
, "platform_get_irq failed");
1418 irq_enabled
= false;
1421 err
= xsdfec_parse_of(xsdfec
);
1423 goto err_xsdfec_dev
;
1425 update_config_from_hw(xsdfec
);
1427 /* Save driver private data */
1428 platform_set_drvdata(pdev
, xsdfec
);
1431 init_waitqueue_head(&xsdfec
->waitq
);
1432 /* Register IRQ thread */
1433 err
= devm_request_threaded_irq(dev
, xsdfec
->irq
, NULL
,
1434 xsdfec_irq_thread
, IRQF_ONESHOT
,
1435 "xilinx-sdfec16", xsdfec
);
1437 dev_err(dev
, "unable to request IRQ%d", xsdfec
->irq
);
1438 goto err_xsdfec_dev
;
1442 err
= ida_alloc(&dev_nrs
, GFP_KERNEL
);
1444 goto err_xsdfec_dev
;
1445 xsdfec
->dev_id
= err
;
1447 snprintf(xsdfec
->dev_name
, DEV_NAME_LEN
, "xsdfec%d", xsdfec
->dev_id
);
1448 xsdfec
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
1449 xsdfec
->miscdev
.name
= xsdfec
->dev_name
;
1450 xsdfec
->miscdev
.fops
= &xsdfec_fops
;
1451 xsdfec
->miscdev
.parent
= dev
;
1452 err
= misc_register(&xsdfec
->miscdev
);
1454 dev_err(dev
, "error:%d. Unable to register device", err
);
1455 goto err_xsdfec_ida
;
1460 ida_free(&dev_nrs
, xsdfec
->dev_id
);
1462 xsdfec_disable_all_clks(&xsdfec
->clks
);
1466 static int xsdfec_remove(struct platform_device
*pdev
)
1468 struct xsdfec_dev
*xsdfec
;
1470 xsdfec
= platform_get_drvdata(pdev
);
1471 misc_deregister(&xsdfec
->miscdev
);
1472 ida_free(&dev_nrs
, xsdfec
->dev_id
);
1473 xsdfec_disable_all_clks(&xsdfec
->clks
);
1477 static const struct of_device_id xsdfec_of_match
[] = {
1479 .compatible
= "xlnx,sd-fec-1.1",
1481 { /* end of table */ }
1483 MODULE_DEVICE_TABLE(of
, xsdfec_of_match
);
1485 static struct platform_driver xsdfec_driver
= {
1487 .name
= "xilinx-sdfec",
1488 .of_match_table
= xsdfec_of_match
,
1490 .probe
= xsdfec_probe
,
1491 .remove
= xsdfec_remove
,
1494 static int __init
xsdfec_init(void)
1498 err
= platform_driver_register(&xsdfec_driver
);
1500 pr_err("%s Unabled to register SDFEC driver", __func__
);
1506 static void __exit
xsdfec_exit(void)
1508 platform_driver_unregister(&xsdfec_driver
);
1511 module_init(xsdfec_init
);
1512 module_exit(xsdfec_exit
);
1514 MODULE_AUTHOR("Xilinx, Inc");
1515 MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
1516 MODULE_LICENSE("GPL");