1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2019 Xilinx, Inc.
8 * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
9 * IP. It exposes a char device which supports file operations
10 * like open(), close() and ioctl().
13 #include <linux/miscdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of_platform.h>
19 #include <linux/poll.h>
20 #include <linux/slab.h>
21 #include <linux/clk.h>
22 #include <linux/compat.h>
23 #include <linux/highmem.h>
25 #include <uapi/misc/xilinx_sdfec.h>
27 #define DEV_NAME_LEN 12
29 static DEFINE_IDA(dev_nrs
);
31 /* Xilinx SDFEC Register Map */
32 /* CODE_WRI_PROTECT Register */
33 #define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
36 #define XSDFEC_ACTIVE_ADDR (0x8)
37 #define XSDFEC_IS_ACTIVITY_SET (0x1)
39 /* AXIS_WIDTH Register */
40 #define XSDFEC_AXIS_WIDTH_ADDR (0xC)
41 #define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
42 #define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
43 #define XSDFEC_AXIS_DIN_WORDS_LSB (2)
44 #define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
46 /* AXIS_ENABLE Register */
47 #define XSDFEC_AXIS_ENABLE_ADDR (0x10)
48 #define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
49 #define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
50 #define XSDFEC_AXIS_ENABLE_MASK \
51 (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
53 /* FEC_CODE Register */
54 #define XSDFEC_FEC_CODE_ADDR (0x14)
56 /* ORDER Register Map */
57 #define XSDFEC_ORDER_ADDR (0x18)
59 /* Interrupt Status Register */
60 #define XSDFEC_ISR_ADDR (0x1C)
61 /* Interrupt Status Register Bit Mask */
62 #define XSDFEC_ISR_MASK (0x3F)
64 /* Write Only - Interrupt Enable Register */
65 #define XSDFEC_IER_ADDR (0x20)
66 /* Write Only - Interrupt Disable Register */
67 #define XSDFEC_IDR_ADDR (0x24)
68 /* Read Only - Interrupt Mask Register */
69 #define XSDFEC_IMR_ADDR (0x28)
71 /* ECC Interrupt Status Register */
72 #define XSDFEC_ECC_ISR_ADDR (0x2C)
73 /* Single Bit Errors */
74 #define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
75 /* PL Initialize Single Bit Errors */
76 #define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
77 /* Multi Bit Errors */
78 #define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
79 /* PL Initialize Multi Bit Errors */
80 #define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
81 /* Multi Bit Error to Event Shift */
82 #define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
83 /* PL Initialize Multi Bit Error to Event Shift */
84 #define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
85 /* ECC Interrupt Status Bit Mask */
86 #define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
87 /* ECC Interrupt Status PL Initialize Bit Mask */
88 #define XSDFEC_PL_INIT_ECC_ISR_MASK \
89 (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
90 /* ECC Interrupt Status All Bit Mask */
91 #define XSDFEC_ALL_ECC_ISR_MASK \
92 (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
93 /* ECC Interrupt Status Single Bit Errors Mask */
94 #define XSDFEC_ALL_ECC_ISR_SBE_MASK \
95 (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
96 /* ECC Interrupt Status Multi Bit Errors Mask */
97 #define XSDFEC_ALL_ECC_ISR_MBE_MASK \
98 (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
100 /* Write Only - ECC Interrupt Enable Register */
101 #define XSDFEC_ECC_IER_ADDR (0x30)
102 /* Write Only - ECC Interrupt Disable Register */
103 #define XSDFEC_ECC_IDR_ADDR (0x34)
104 /* Read Only - ECC Interrupt Mask Register */
105 #define XSDFEC_ECC_IMR_ADDR (0x38)
107 /* BYPASS Register */
108 #define XSDFEC_BYPASS_ADDR (0x3C)
110 /* Turbo Code Register */
111 #define XSDFEC_TURBO_ADDR (0x100)
112 #define XSDFEC_TURBO_SCALE_MASK (0xFFF)
113 #define XSDFEC_TURBO_SCALE_BIT_POS (8)
114 #define XSDFEC_TURBO_SCALE_MAX (15)
117 #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
118 #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
119 #define XSDFEC_REG0_N_MIN (4)
120 #define XSDFEC_REG0_N_MAX (32768)
121 #define XSDFEC_REG0_N_MUL_P (256)
122 #define XSDFEC_REG0_N_LSB (0)
123 #define XSDFEC_REG0_K_MIN (2)
124 #define XSDFEC_REG0_K_MAX (32766)
125 #define XSDFEC_REG0_K_MUL_P (256)
126 #define XSDFEC_REG0_K_LSB (16)
129 #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
130 #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
131 #define XSDFEC_REG1_PSIZE_MIN (2)
132 #define XSDFEC_REG1_PSIZE_MAX (512)
133 #define XSDFEC_REG1_NO_PACKING_MASK (0x400)
134 #define XSDFEC_REG1_NO_PACKING_LSB (10)
135 #define XSDFEC_REG1_NM_MASK (0xFF800)
136 #define XSDFEC_REG1_NM_LSB (11)
137 #define XSDFEC_REG1_BYPASS_MASK (0x100000)
140 #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
141 #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
142 #define XSDFEC_REG2_NLAYERS_MIN (1)
143 #define XSDFEC_REG2_NLAYERS_MAX (256)
144 #define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
145 #define XSDFEC_REG2_NMQC_LSB (9)
146 #define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
147 #define XSDFEC_REG2_NORM_TYPE_LSB (20)
148 #define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
149 #define XSDFEC_REG2_SPEICAL_QC_LSB (21)
150 #define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
151 #define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
152 #define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
153 #define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
156 #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
157 #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
158 #define XSDFEC_REG3_LA_OFF_LSB (8)
159 #define XSDFEC_REG3_QC_OFF_LSB (16)
161 #define XSDFEC_LDPC_REG_JUMP (0x10)
162 #define XSDFEC_REG_WIDTH_JUMP (4)
164 /* The maximum number of pinned pages */
165 #define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
168 * struct xsdfec_clks - For managing SD-FEC clocks
169 * @core_clk: Main processing clock for core
170 * @axi_clk: AXI4-Lite memory-mapped clock
171 * @din_words_clk: DIN Words AXI4-Stream Slave clock
172 * @din_clk: DIN AXI4-Stream Slave clock
173 * @dout_clk: DOUT Words AXI4-Stream Slave clock
174 * @dout_words_clk: DOUT AXI4-Stream Slave clock
175 * @ctrl_clk: Control AXI4-Stream Slave clock
176 * @status_clk: Status AXI4-Stream Slave clock
179 struct clk
*core_clk
;
181 struct clk
*din_words_clk
;
183 struct clk
*dout_clk
;
184 struct clk
*dout_words_clk
;
185 struct clk
*ctrl_clk
;
186 struct clk
*status_clk
;
190 * struct xsdfec_dev - Driver data for SDFEC
191 * @miscdev: Misc device handle
192 * @clks: Clocks managed by the SDFEC driver
193 * @waitq: Driver wait queue
194 * @config: Configuration of the SDFEC device
195 * @dev_name: Device name
196 * @flags: spinlock flags
197 * @regs: device physical base address
198 * @dev: pointer to device struct
199 * @state: State of the SDFEC device
200 * @error_data_lock: Error counter and states spinlock
202 * @isr_err_count: Count of ISR errors
203 * @cecc_count: Count of Correctable ECC errors (SBE)
204 * @uecc_count: Count of Uncorrectable ECC errors (MBE)
206 * @state_updated: indicates State updated by interrupt handler
207 * @stats_updated: indicates Stats updated by interrupt handler
208 * @intr_enabled: indicates IRQ enabled
210 * This structure contains necessary state for SDFEC driver to operate
213 struct miscdevice miscdev
;
214 struct xsdfec_clks clks
;
215 wait_queue_head_t waitq
;
216 struct xsdfec_config config
;
217 char dev_name
[DEV_NAME_LEN
];
221 enum xsdfec_state state
;
222 /* Spinlock to protect state_updated and stats_updated */
223 spinlock_t error_data_lock
;
234 static inline void xsdfec_regwrite(struct xsdfec_dev
*xsdfec
, u32 addr
,
237 dev_dbg(xsdfec
->dev
, "Writing 0x%x to offset 0x%x", value
, addr
);
238 iowrite32(value
, xsdfec
->regs
+ addr
);
241 static inline u32
xsdfec_regread(struct xsdfec_dev
*xsdfec
, u32 addr
)
245 rval
= ioread32(xsdfec
->regs
+ addr
);
246 dev_dbg(xsdfec
->dev
, "Read value = 0x%x from offset 0x%x", rval
, addr
);
250 static void update_bool_config_from_reg(struct xsdfec_dev
*xsdfec
,
251 u32 reg_offset
, u32 bit_num
,
255 u32 bit_mask
= 1 << bit_num
;
257 reg_val
= xsdfec_regread(xsdfec
, reg_offset
);
258 *config_value
= (reg_val
& bit_mask
) > 0;
261 static void update_config_from_hw(struct xsdfec_dev
*xsdfec
)
266 /* Update the Order */
267 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_ORDER_ADDR
);
268 xsdfec
->config
.order
= reg_value
;
270 update_bool_config_from_reg(xsdfec
, XSDFEC_BYPASS_ADDR
,
271 0, /* Bit Number, maybe change to mask */
272 &xsdfec
->config
.bypass
);
274 update_bool_config_from_reg(xsdfec
, XSDFEC_CODE_WR_PROTECT_ADDR
,
276 &xsdfec
->config
.code_wr_protect
);
278 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_IMR_ADDR
);
279 xsdfec
->config
.irq
.enable_isr
= (reg_value
& XSDFEC_ISR_MASK
) > 0;
281 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_ECC_IMR_ADDR
);
282 xsdfec
->config
.irq
.enable_ecc_isr
=
283 (reg_value
& XSDFEC_ECC_ISR_MASK
) > 0;
285 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_AXIS_ENABLE_ADDR
);
286 sdfec_started
= (reg_value
& XSDFEC_AXIS_IN_ENABLE_MASK
) > 0;
288 xsdfec
->state
= XSDFEC_STARTED
;
290 xsdfec
->state
= XSDFEC_STOPPED
;
293 static int xsdfec_get_status(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
295 struct xsdfec_status status
;
298 memset(&status
, 0, sizeof(status
));
299 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
300 status
.state
= xsdfec
->state
;
301 xsdfec
->state_updated
= false;
302 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
303 status
.activity
= (xsdfec_regread(xsdfec
, XSDFEC_ACTIVE_ADDR
) &
304 XSDFEC_IS_ACTIVITY_SET
);
306 err
= copy_to_user(arg
, &status
, sizeof(status
));
313 static int xsdfec_get_config(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
317 err
= copy_to_user(arg
, &xsdfec
->config
, sizeof(xsdfec
->config
));
324 static int xsdfec_isr_enable(struct xsdfec_dev
*xsdfec
, bool enable
)
330 xsdfec_regwrite(xsdfec
, XSDFEC_IER_ADDR
, XSDFEC_ISR_MASK
);
331 mask_read
= xsdfec_regread(xsdfec
, XSDFEC_IMR_ADDR
);
332 if (mask_read
& XSDFEC_ISR_MASK
) {
334 "SDFEC enabling irq with IER failed");
339 xsdfec_regwrite(xsdfec
, XSDFEC_IDR_ADDR
, XSDFEC_ISR_MASK
);
340 mask_read
= xsdfec_regread(xsdfec
, XSDFEC_IMR_ADDR
);
341 if ((mask_read
& XSDFEC_ISR_MASK
) != XSDFEC_ISR_MASK
) {
343 "SDFEC disabling irq with IDR failed");
350 static int xsdfec_ecc_isr_enable(struct xsdfec_dev
*xsdfec
, bool enable
)
356 xsdfec_regwrite(xsdfec
, XSDFEC_ECC_IER_ADDR
,
357 XSDFEC_ALL_ECC_ISR_MASK
);
358 mask_read
= xsdfec_regread(xsdfec
, XSDFEC_ECC_IMR_ADDR
);
359 if (mask_read
& XSDFEC_ALL_ECC_ISR_MASK
) {
361 "SDFEC enabling ECC irq with ECC IER failed");
366 xsdfec_regwrite(xsdfec
, XSDFEC_ECC_IDR_ADDR
,
367 XSDFEC_ALL_ECC_ISR_MASK
);
368 mask_read
= xsdfec_regread(xsdfec
, XSDFEC_ECC_IMR_ADDR
);
369 if (!(((mask_read
& XSDFEC_ALL_ECC_ISR_MASK
) ==
370 XSDFEC_ECC_ISR_MASK
) ||
371 ((mask_read
& XSDFEC_ALL_ECC_ISR_MASK
) ==
372 XSDFEC_PL_INIT_ECC_ISR_MASK
))) {
374 "SDFEC disable ECC irq with ECC IDR failed");
381 static int xsdfec_set_irq(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
383 struct xsdfec_irq irq
;
388 err
= copy_from_user(&irq
, arg
, sizeof(irq
));
392 /* Setup tlast related IRQ */
393 isr_err
= xsdfec_isr_enable(xsdfec
, irq
.enable_isr
);
395 xsdfec
->config
.irq
.enable_isr
= irq
.enable_isr
;
397 /* Setup ECC related IRQ */
398 ecc_err
= xsdfec_ecc_isr_enable(xsdfec
, irq
.enable_ecc_isr
);
400 xsdfec
->config
.irq
.enable_ecc_isr
= irq
.enable_ecc_isr
;
402 if (isr_err
< 0 || ecc_err
< 0)
408 static int xsdfec_set_turbo(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
410 struct xsdfec_turbo turbo
;
414 err
= copy_from_user(&turbo
, arg
, sizeof(turbo
));
418 if (turbo
.alg
>= XSDFEC_TURBO_ALG_MAX
)
421 if (turbo
.scale
> XSDFEC_TURBO_SCALE_MAX
)
424 /* Check to see what device tree says about the FEC codes */
425 if (xsdfec
->config
.code
== XSDFEC_LDPC_CODE
)
428 turbo_write
= ((turbo
.scale
& XSDFEC_TURBO_SCALE_MASK
)
429 << XSDFEC_TURBO_SCALE_BIT_POS
) |
431 xsdfec_regwrite(xsdfec
, XSDFEC_TURBO_ADDR
, turbo_write
);
435 static int xsdfec_get_turbo(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
438 struct xsdfec_turbo turbo_params
;
441 if (xsdfec
->config
.code
== XSDFEC_LDPC_CODE
)
444 memset(&turbo_params
, 0, sizeof(turbo_params
));
445 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_TURBO_ADDR
);
447 turbo_params
.scale
= (reg_value
& XSDFEC_TURBO_SCALE_MASK
) >>
448 XSDFEC_TURBO_SCALE_BIT_POS
;
449 turbo_params
.alg
= reg_value
& 0x1;
451 err
= copy_to_user(arg
, &turbo_params
, sizeof(turbo_params
));
458 static int xsdfec_reg0_write(struct xsdfec_dev
*xsdfec
, u32 n
, u32 k
, u32 psize
,
463 if (n
< XSDFEC_REG0_N_MIN
|| n
> XSDFEC_REG0_N_MAX
|| psize
== 0 ||
464 (n
> XSDFEC_REG0_N_MUL_P
* psize
) || n
<= k
|| ((n
% psize
) != 0)) {
465 dev_dbg(xsdfec
->dev
, "N value is not in range");
468 n
<<= XSDFEC_REG0_N_LSB
;
470 if (k
< XSDFEC_REG0_K_MIN
|| k
> XSDFEC_REG0_K_MAX
||
471 (k
> XSDFEC_REG0_K_MUL_P
* psize
) || ((k
% psize
) != 0)) {
472 dev_dbg(xsdfec
->dev
, "K value is not in range");
475 k
= k
<< XSDFEC_REG0_K_LSB
;
478 if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE
+ (offset
* XSDFEC_LDPC_REG_JUMP
) >
479 XSDFEC_LDPC_CODE_REG0_ADDR_HIGH
) {
480 dev_dbg(xsdfec
->dev
, "Writing outside of LDPC reg0 space 0x%x",
481 XSDFEC_LDPC_CODE_REG0_ADDR_BASE
+
482 (offset
* XSDFEC_LDPC_REG_JUMP
));
485 xsdfec_regwrite(xsdfec
,
486 XSDFEC_LDPC_CODE_REG0_ADDR_BASE
+
487 (offset
* XSDFEC_LDPC_REG_JUMP
),
492 static int xsdfec_reg1_write(struct xsdfec_dev
*xsdfec
, u32 psize
,
493 u32 no_packing
, u32 nm
, u32 offset
)
497 if (psize
< XSDFEC_REG1_PSIZE_MIN
|| psize
> XSDFEC_REG1_PSIZE_MAX
) {
498 dev_dbg(xsdfec
->dev
, "Psize is not in range");
502 if (no_packing
!= 0 && no_packing
!= 1)
503 dev_dbg(xsdfec
->dev
, "No-packing bit register invalid");
504 no_packing
= ((no_packing
<< XSDFEC_REG1_NO_PACKING_LSB
) &
505 XSDFEC_REG1_NO_PACKING_MASK
);
507 if (nm
& ~(XSDFEC_REG1_NM_MASK
>> XSDFEC_REG1_NM_LSB
))
508 dev_dbg(xsdfec
->dev
, "NM is beyond 10 bits");
509 nm
= (nm
<< XSDFEC_REG1_NM_LSB
) & XSDFEC_REG1_NM_MASK
;
511 wdata
= nm
| no_packing
| psize
;
512 if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE
+ (offset
* XSDFEC_LDPC_REG_JUMP
) >
513 XSDFEC_LDPC_CODE_REG1_ADDR_HIGH
) {
514 dev_dbg(xsdfec
->dev
, "Writing outside of LDPC reg1 space 0x%x",
515 XSDFEC_LDPC_CODE_REG1_ADDR_BASE
+
516 (offset
* XSDFEC_LDPC_REG_JUMP
));
519 xsdfec_regwrite(xsdfec
,
520 XSDFEC_LDPC_CODE_REG1_ADDR_BASE
+
521 (offset
* XSDFEC_LDPC_REG_JUMP
),
526 static int xsdfec_reg2_write(struct xsdfec_dev
*xsdfec
, u32 nlayers
, u32 nmqc
,
527 u32 norm_type
, u32 special_qc
, u32 no_final_parity
,
528 u32 max_schedule
, u32 offset
)
532 if (nlayers
< XSDFEC_REG2_NLAYERS_MIN
||
533 nlayers
> XSDFEC_REG2_NLAYERS_MAX
) {
534 dev_dbg(xsdfec
->dev
, "Nlayers is not in range");
538 if (nmqc
& ~(XSDFEC_REG2_NNMQC_MASK
>> XSDFEC_REG2_NMQC_LSB
))
539 dev_dbg(xsdfec
->dev
, "NMQC exceeds 11 bits");
540 nmqc
= (nmqc
<< XSDFEC_REG2_NMQC_LSB
) & XSDFEC_REG2_NNMQC_MASK
;
543 dev_dbg(xsdfec
->dev
, "Norm type is invalid");
544 norm_type
= ((norm_type
<< XSDFEC_REG2_NORM_TYPE_LSB
) &
545 XSDFEC_REG2_NORM_TYPE_MASK
);
547 dev_dbg(xsdfec
->dev
, "Special QC in invalid");
548 special_qc
= ((special_qc
<< XSDFEC_REG2_SPEICAL_QC_LSB
) &
549 XSDFEC_REG2_SPECIAL_QC_MASK
);
551 if (no_final_parity
> 1)
552 dev_dbg(xsdfec
->dev
, "No final parity check invalid");
554 ((no_final_parity
<< XSDFEC_REG2_NO_FINAL_PARITY_LSB
) &
555 XSDFEC_REG2_NO_FINAL_PARITY_MASK
);
557 ~(XSDFEC_REG2_MAX_SCHEDULE_MASK
>> XSDFEC_REG2_MAX_SCHEDULE_LSB
))
558 dev_dbg(xsdfec
->dev
, "Max Schedule exceeds 2 bits");
559 max_schedule
= ((max_schedule
<< XSDFEC_REG2_MAX_SCHEDULE_LSB
) &
560 XSDFEC_REG2_MAX_SCHEDULE_MASK
);
562 wdata
= (max_schedule
| no_final_parity
| special_qc
| norm_type
|
565 if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE
+ (offset
* XSDFEC_LDPC_REG_JUMP
) >
566 XSDFEC_LDPC_CODE_REG2_ADDR_HIGH
) {
567 dev_dbg(xsdfec
->dev
, "Writing outside of LDPC reg2 space 0x%x",
568 XSDFEC_LDPC_CODE_REG2_ADDR_BASE
+
569 (offset
* XSDFEC_LDPC_REG_JUMP
));
572 xsdfec_regwrite(xsdfec
,
573 XSDFEC_LDPC_CODE_REG2_ADDR_BASE
+
574 (offset
* XSDFEC_LDPC_REG_JUMP
),
579 static int xsdfec_reg3_write(struct xsdfec_dev
*xsdfec
, u8 sc_off
, u8 la_off
,
580 u16 qc_off
, u32 offset
)
584 wdata
= ((qc_off
<< XSDFEC_REG3_QC_OFF_LSB
) |
585 (la_off
<< XSDFEC_REG3_LA_OFF_LSB
) | sc_off
);
586 if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE
+ (offset
* XSDFEC_LDPC_REG_JUMP
) >
587 XSDFEC_LDPC_CODE_REG3_ADDR_HIGH
) {
588 dev_dbg(xsdfec
->dev
, "Writing outside of LDPC reg3 space 0x%x",
589 XSDFEC_LDPC_CODE_REG3_ADDR_BASE
+
590 (offset
* XSDFEC_LDPC_REG_JUMP
));
593 xsdfec_regwrite(xsdfec
,
594 XSDFEC_LDPC_CODE_REG3_ADDR_BASE
+
595 (offset
* XSDFEC_LDPC_REG_JUMP
),
600 static int xsdfec_table_write(struct xsdfec_dev
*xsdfec
, u32 offset
,
601 u32
*src_ptr
, u32 len
, const u32 base_addr
,
608 struct page
*page
[MAX_NUM_PAGES
];
611 * Writes that go beyond the length of
612 * Shared Scale(SC) table should fail
614 if (offset
> depth
/ XSDFEC_REG_WIDTH_JUMP
||
615 len
> depth
/ XSDFEC_REG_WIDTH_JUMP
||
616 offset
+ len
> depth
/ XSDFEC_REG_WIDTH_JUMP
) {
617 dev_dbg(xsdfec
->dev
, "Write exceeds SC table length");
621 n
= (len
* XSDFEC_REG_WIDTH_JUMP
) / PAGE_SIZE
;
622 if ((len
* XSDFEC_REG_WIDTH_JUMP
) % PAGE_SIZE
)
625 res
= get_user_pages_fast((unsigned long)src_ptr
, n
, 0, page
);
627 for (i
= 0; i
< res
; i
++)
632 for (i
= 0; i
< n
; i
++) {
633 addr
= kmap(page
[i
]);
635 xsdfec_regwrite(xsdfec
,
636 base_addr
+ ((offset
+ reg
) *
637 XSDFEC_REG_WIDTH_JUMP
),
640 } while ((reg
< len
) &&
641 ((reg
* XSDFEC_REG_WIDTH_JUMP
) % PAGE_SIZE
));
647 static int xsdfec_add_ldpc(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
649 struct xsdfec_ldpc_params
*ldpc
;
652 ldpc
= kzalloc(sizeof(*ldpc
), GFP_KERNEL
);
656 if (copy_from_user(ldpc
, arg
, sizeof(*ldpc
))) {
661 if (xsdfec
->config
.code
== XSDFEC_TURBO_CODE
) {
666 /* Verify Device has not started */
667 if (xsdfec
->state
== XSDFEC_STARTED
) {
672 if (xsdfec
->config
.code_wr_protect
) {
678 ret
= xsdfec_reg0_write(xsdfec
, ldpc
->n
, ldpc
->k
, ldpc
->psize
,
684 ret
= xsdfec_reg1_write(xsdfec
, ldpc
->psize
, ldpc
->no_packing
, ldpc
->nm
,
690 ret
= xsdfec_reg2_write(xsdfec
, ldpc
->nlayers
, ldpc
->nmqc
,
691 ldpc
->norm_type
, ldpc
->special_qc
,
692 ldpc
->no_final_parity
, ldpc
->max_schedule
,
698 ret
= xsdfec_reg3_write(xsdfec
, ldpc
->sc_off
, ldpc
->la_off
,
699 ldpc
->qc_off
, ldpc
->code_id
);
703 /* Write Shared Codes */
704 n
= ldpc
->nlayers
/ 4;
705 if (ldpc
->nlayers
% 4)
708 ret
= xsdfec_table_write(xsdfec
, ldpc
->sc_off
, ldpc
->sc_table
, n
,
709 XSDFEC_LDPC_SC_TABLE_ADDR_BASE
,
710 XSDFEC_SC_TABLE_DEPTH
);
714 ret
= xsdfec_table_write(xsdfec
, 4 * ldpc
->la_off
, ldpc
->la_table
,
715 ldpc
->nlayers
, XSDFEC_LDPC_LA_TABLE_ADDR_BASE
,
716 XSDFEC_LA_TABLE_DEPTH
);
720 ret
= xsdfec_table_write(xsdfec
, 4 * ldpc
->qc_off
, ldpc
->qc_table
,
721 ldpc
->nqc
, XSDFEC_LDPC_QC_TABLE_ADDR_BASE
,
722 XSDFEC_QC_TABLE_DEPTH
);
730 static int xsdfec_set_order(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
733 enum xsdfec_order order
;
736 err
= get_user(order
, (enum xsdfec_order __user
*)arg
);
740 order_invalid
= (order
!= XSDFEC_MAINTAIN_ORDER
) &&
741 (order
!= XSDFEC_OUT_OF_ORDER
);
745 /* Verify Device has not started */
746 if (xsdfec
->state
== XSDFEC_STARTED
)
749 xsdfec_regwrite(xsdfec
, XSDFEC_ORDER_ADDR
, order
);
751 xsdfec
->config
.order
= order
;
756 static int xsdfec_set_bypass(struct xsdfec_dev
*xsdfec
, bool __user
*arg
)
761 err
= get_user(bypass
, arg
);
765 /* Verify Device has not started */
766 if (xsdfec
->state
== XSDFEC_STARTED
)
770 xsdfec_regwrite(xsdfec
, XSDFEC_BYPASS_ADDR
, 1);
772 xsdfec_regwrite(xsdfec
, XSDFEC_BYPASS_ADDR
, 0);
774 xsdfec
->config
.bypass
= bypass
;
779 static int xsdfec_is_active(struct xsdfec_dev
*xsdfec
, bool __user
*arg
)
785 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_ACTIVE_ADDR
);
786 /* using a double ! operator instead of casting */
787 is_active
= !!(reg_value
& XSDFEC_IS_ACTIVITY_SET
);
788 err
= put_user(is_active
, arg
);
796 xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg
)
798 u32 axis_width_field
= 0;
800 switch (axis_width_cfg
) {
802 axis_width_field
= 0;
805 axis_width_field
= 1;
808 axis_width_field
= 2;
812 return axis_width_field
;
815 static u32
xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
818 u32 axis_words_field
= 0;
820 if (axis_word_inc_cfg
== XSDFEC_FIXED_VALUE
||
821 axis_word_inc_cfg
== XSDFEC_IN_BLOCK
)
822 axis_words_field
= 0;
823 else if (axis_word_inc_cfg
== XSDFEC_PER_AXI_TRANSACTION
)
824 axis_words_field
= 1;
826 return axis_words_field
;
829 static int xsdfec_cfg_axi_streams(struct xsdfec_dev
*xsdfec
)
832 u32 dout_words_field
;
833 u32 dout_width_field
;
836 struct xsdfec_config
*config
= &xsdfec
->config
;
838 /* translate config info to register values */
840 xsdfec_translate_axis_words_cfg_val(config
->dout_word_include
);
842 xsdfec_translate_axis_width_cfg_val(config
->dout_width
);
844 xsdfec_translate_axis_words_cfg_val(config
->din_word_include
);
846 xsdfec_translate_axis_width_cfg_val(config
->din_width
);
848 reg_value
= dout_words_field
<< XSDFEC_AXIS_DOUT_WORDS_LSB
;
849 reg_value
|= dout_width_field
<< XSDFEC_AXIS_DOUT_WIDTH_LSB
;
850 reg_value
|= din_words_field
<< XSDFEC_AXIS_DIN_WORDS_LSB
;
851 reg_value
|= din_width_field
<< XSDFEC_AXIS_DIN_WIDTH_LSB
;
853 xsdfec_regwrite(xsdfec
, XSDFEC_AXIS_WIDTH_ADDR
, reg_value
);
858 static int xsdfec_dev_open(struct inode
*iptr
, struct file
*fptr
)
863 static int xsdfec_dev_release(struct inode
*iptr
, struct file
*fptr
)
868 static int xsdfec_start(struct xsdfec_dev
*xsdfec
)
872 regread
= xsdfec_regread(xsdfec
, XSDFEC_FEC_CODE_ADDR
);
874 if (regread
!= xsdfec
->config
.code
) {
876 "%s SDFEC HW code does not match driver code, reg %d, code %d",
877 __func__
, regread
, xsdfec
->config
.code
);
881 /* Set AXIS enable */
882 xsdfec_regwrite(xsdfec
, XSDFEC_AXIS_ENABLE_ADDR
,
883 XSDFEC_AXIS_ENABLE_MASK
);
885 xsdfec
->state
= XSDFEC_STARTED
;
889 static int xsdfec_stop(struct xsdfec_dev
*xsdfec
)
893 if (xsdfec
->state
!= XSDFEC_STARTED
)
894 dev_dbg(xsdfec
->dev
, "Device not started correctly");
895 /* Disable AXIS_ENABLE Input interfaces only */
896 regread
= xsdfec_regread(xsdfec
, XSDFEC_AXIS_ENABLE_ADDR
);
897 regread
&= (~XSDFEC_AXIS_IN_ENABLE_MASK
);
898 xsdfec_regwrite(xsdfec
, XSDFEC_AXIS_ENABLE_ADDR
, regread
);
900 xsdfec
->state
= XSDFEC_STOPPED
;
904 static int xsdfec_clear_stats(struct xsdfec_dev
*xsdfec
)
906 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
907 xsdfec
->isr_err_count
= 0;
908 xsdfec
->uecc_count
= 0;
909 xsdfec
->cecc_count
= 0;
910 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
915 static int xsdfec_get_stats(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
918 struct xsdfec_stats user_stats
;
920 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
921 user_stats
.isr_err_count
= xsdfec
->isr_err_count
;
922 user_stats
.cecc_count
= xsdfec
->cecc_count
;
923 user_stats
.uecc_count
= xsdfec
->uecc_count
;
924 xsdfec
->stats_updated
= false;
925 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
927 err
= copy_to_user(arg
, &user_stats
, sizeof(user_stats
));
934 static int xsdfec_set_default_config(struct xsdfec_dev
*xsdfec
)
936 /* Ensure registers are aligned with core configuration */
937 xsdfec_regwrite(xsdfec
, XSDFEC_FEC_CODE_ADDR
, xsdfec
->config
.code
);
938 xsdfec_cfg_axi_streams(xsdfec
);
939 update_config_from_hw(xsdfec
);
944 static long xsdfec_dev_ioctl(struct file
*fptr
, unsigned int cmd
,
947 struct xsdfec_dev
*xsdfec
;
948 void __user
*arg
= NULL
;
951 xsdfec
= container_of(fptr
->private_data
, struct xsdfec_dev
, miscdev
);
953 /* In failed state allow only reset and get status IOCTLs */
954 if (xsdfec
->state
== XSDFEC_NEEDS_RESET
&&
955 (cmd
!= XSDFEC_SET_DEFAULT_CONFIG
&& cmd
!= XSDFEC_GET_STATUS
&&
956 cmd
!= XSDFEC_GET_STATS
&& cmd
!= XSDFEC_CLEAR_STATS
)) {
960 if (_IOC_TYPE(cmd
) != XSDFEC_MAGIC
)
963 /* check if ioctl argument is present and valid */
964 if (_IOC_DIR(cmd
) != _IOC_NONE
) {
965 arg
= (void __user
*)data
;
971 case XSDFEC_START_DEV
:
972 rval
= xsdfec_start(xsdfec
);
974 case XSDFEC_STOP_DEV
:
975 rval
= xsdfec_stop(xsdfec
);
977 case XSDFEC_CLEAR_STATS
:
978 rval
= xsdfec_clear_stats(xsdfec
);
980 case XSDFEC_GET_STATS
:
981 rval
= xsdfec_get_stats(xsdfec
, arg
);
983 case XSDFEC_GET_STATUS
:
984 rval
= xsdfec_get_status(xsdfec
, arg
);
986 case XSDFEC_GET_CONFIG
:
987 rval
= xsdfec_get_config(xsdfec
, arg
);
989 case XSDFEC_SET_DEFAULT_CONFIG
:
990 rval
= xsdfec_set_default_config(xsdfec
);
993 rval
= xsdfec_set_irq(xsdfec
, arg
);
995 case XSDFEC_SET_TURBO
:
996 rval
= xsdfec_set_turbo(xsdfec
, arg
);
998 case XSDFEC_GET_TURBO
:
999 rval
= xsdfec_get_turbo(xsdfec
, arg
);
1001 case XSDFEC_ADD_LDPC_CODE_PARAMS
:
1002 rval
= xsdfec_add_ldpc(xsdfec
, arg
);
1004 case XSDFEC_SET_ORDER
:
1005 rval
= xsdfec_set_order(xsdfec
, arg
);
1007 case XSDFEC_SET_BYPASS
:
1008 rval
= xsdfec_set_bypass(xsdfec
, arg
);
1010 case XSDFEC_IS_ACTIVE
:
1011 rval
= xsdfec_is_active(xsdfec
, (bool __user
*)arg
);
1014 /* Should not get here */
1020 #ifdef CONFIG_COMPAT
1021 static long xsdfec_dev_compat_ioctl(struct file
*file
, unsigned int cmd
,
1024 return xsdfec_dev_ioctl(file
, cmd
, (unsigned long)compat_ptr(data
));
1028 static __poll_t
xsdfec_poll(struct file
*file
, poll_table
*wait
)
1031 struct xsdfec_dev
*xsdfec
;
1033 xsdfec
= container_of(file
->private_data
, struct xsdfec_dev
, miscdev
);
1036 return EPOLLNVAL
| EPOLLHUP
;
1038 poll_wait(file
, &xsdfec
->waitq
, wait
);
1040 /* XSDFEC ISR detected an error */
1041 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
1042 if (xsdfec
->state_updated
)
1043 mask
|= EPOLLIN
| EPOLLPRI
;
1045 if (xsdfec
->stats_updated
)
1046 mask
|= EPOLLIN
| EPOLLRDNORM
;
1047 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
1052 static const struct file_operations xsdfec_fops
= {
1053 .owner
= THIS_MODULE
,
1054 .open
= xsdfec_dev_open
,
1055 .release
= xsdfec_dev_release
,
1056 .unlocked_ioctl
= xsdfec_dev_ioctl
,
1057 .poll
= xsdfec_poll
,
1058 #ifdef CONFIG_COMPAT
1059 .compat_ioctl
= xsdfec_dev_compat_ioctl
,
1063 static int xsdfec_parse_of(struct xsdfec_dev
*xsdfec
)
1065 struct device
*dev
= xsdfec
->dev
;
1066 struct device_node
*node
= dev
->of_node
;
1068 const char *fec_code
;
1070 u32 din_word_include
;
1072 u32 dout_word_include
;
1074 rval
= of_property_read_string(node
, "xlnx,sdfec-code", &fec_code
);
1078 if (!strcasecmp(fec_code
, "ldpc"))
1079 xsdfec
->config
.code
= XSDFEC_LDPC_CODE
;
1080 else if (!strcasecmp(fec_code
, "turbo"))
1081 xsdfec
->config
.code
= XSDFEC_TURBO_CODE
;
1085 rval
= of_property_read_u32(node
, "xlnx,sdfec-din-words",
1090 if (din_word_include
< XSDFEC_AXIS_WORDS_INCLUDE_MAX
)
1091 xsdfec
->config
.din_word_include
= din_word_include
;
1095 rval
= of_property_read_u32(node
, "xlnx,sdfec-din-width", &din_width
);
1099 switch (din_width
) {
1100 /* Fall through and set for valid values */
1104 xsdfec
->config
.din_width
= din_width
;
1110 rval
= of_property_read_u32(node
, "xlnx,sdfec-dout-words",
1111 &dout_word_include
);
1115 if (dout_word_include
< XSDFEC_AXIS_WORDS_INCLUDE_MAX
)
1116 xsdfec
->config
.dout_word_include
= dout_word_include
;
1120 rval
= of_property_read_u32(node
, "xlnx,sdfec-dout-width", &dout_width
);
1124 switch (dout_width
) {
1125 /* Fall through and set for valid values */
1129 xsdfec
->config
.dout_width
= dout_width
;
1135 /* Write LDPC to CODE Register */
1136 xsdfec_regwrite(xsdfec
, XSDFEC_FEC_CODE_ADDR
, xsdfec
->config
.code
);
1138 xsdfec_cfg_axi_streams(xsdfec
);
1143 static irqreturn_t
xsdfec_irq_thread(int irq
, void *dev_id
)
1145 struct xsdfec_dev
*xsdfec
= dev_id
;
1146 irqreturn_t ret
= IRQ_HANDLED
;
1155 WARN_ON(xsdfec
->irq
!= irq
);
1157 /* Mask Interrupts */
1158 xsdfec_isr_enable(xsdfec
, false);
1159 xsdfec_ecc_isr_enable(xsdfec
, false);
1161 ecc_err
= xsdfec_regread(xsdfec
, XSDFEC_ECC_ISR_ADDR
);
1162 isr_err
= xsdfec_regread(xsdfec
, XSDFEC_ISR_ADDR
);
1163 /* Clear the interrupts */
1164 xsdfec_regwrite(xsdfec
, XSDFEC_ECC_ISR_ADDR
, ecc_err
);
1165 xsdfec_regwrite(xsdfec
, XSDFEC_ISR_ADDR
, isr_err
);
1167 tmp
= ecc_err
& XSDFEC_ALL_ECC_ISR_MBE_MASK
;
1168 /* Count uncorrectable 2-bit errors */
1169 uecc_count
= hweight32(tmp
);
1170 /* Count all ECC errors */
1171 aecc_count
= hweight32(ecc_err
);
1172 /* Number of correctable 1-bit ECC error */
1173 cecc_count
= aecc_count
- 2 * uecc_count
;
1174 /* Count ISR errors */
1175 isr_err_count
= hweight32(isr_err
);
1176 dev_dbg(xsdfec
->dev
, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp
,
1177 uecc_count
, aecc_count
, cecc_count
, isr_err_count
);
1178 dev_dbg(xsdfec
->dev
, "uecc=%x, cecc=%x, isr=%x", xsdfec
->uecc_count
,
1179 xsdfec
->cecc_count
, xsdfec
->isr_err_count
);
1181 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
1182 /* Add new errors to a 2-bits counter */
1184 xsdfec
->uecc_count
+= uecc_count
;
1185 /* Add new errors to a 1-bits counter */
1187 xsdfec
->cecc_count
+= cecc_count
;
1188 /* Add new errors to a ISR counter */
1190 xsdfec
->isr_err_count
+= isr_err_count
;
1192 /* Update state/stats flag */
1194 if (ecc_err
& XSDFEC_ECC_ISR_MBE_MASK
)
1195 xsdfec
->state
= XSDFEC_NEEDS_RESET
;
1196 else if (ecc_err
& XSDFEC_PL_INIT_ECC_ISR_MBE_MASK
)
1197 xsdfec
->state
= XSDFEC_PL_RECONFIGURE
;
1198 xsdfec
->stats_updated
= true;
1199 xsdfec
->state_updated
= true;
1203 xsdfec
->stats_updated
= true;
1205 if (isr_err_count
) {
1206 xsdfec
->state
= XSDFEC_NEEDS_RESET
;
1207 xsdfec
->stats_updated
= true;
1208 xsdfec
->state_updated
= true;
1211 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
1212 dev_dbg(xsdfec
->dev
, "state=%x, stats=%x", xsdfec
->state_updated
,
1213 xsdfec
->stats_updated
);
1215 /* Enable another polling */
1216 if (xsdfec
->state_updated
|| xsdfec
->stats_updated
)
1217 wake_up_interruptible(&xsdfec
->waitq
);
1221 /* Unmask Interrupts */
1222 xsdfec_isr_enable(xsdfec
, true);
1223 xsdfec_ecc_isr_enable(xsdfec
, true);
1228 static int xsdfec_clk_init(struct platform_device
*pdev
,
1229 struct xsdfec_clks
*clks
)
1233 clks
->core_clk
= devm_clk_get(&pdev
->dev
, "core_clk");
1234 if (IS_ERR(clks
->core_clk
)) {
1235 dev_err(&pdev
->dev
, "failed to get core_clk");
1236 return PTR_ERR(clks
->core_clk
);
1239 clks
->axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_aclk");
1240 if (IS_ERR(clks
->axi_clk
)) {
1241 dev_err(&pdev
->dev
, "failed to get axi_clk");
1242 return PTR_ERR(clks
->axi_clk
);
1245 clks
->din_words_clk
= devm_clk_get(&pdev
->dev
, "s_axis_din_words_aclk");
1246 if (IS_ERR(clks
->din_words_clk
)) {
1247 if (PTR_ERR(clks
->din_words_clk
) != -ENOENT
) {
1248 err
= PTR_ERR(clks
->din_words_clk
);
1251 clks
->din_words_clk
= NULL
;
1254 clks
->din_clk
= devm_clk_get(&pdev
->dev
, "s_axis_din_aclk");
1255 if (IS_ERR(clks
->din_clk
)) {
1256 if (PTR_ERR(clks
->din_clk
) != -ENOENT
) {
1257 err
= PTR_ERR(clks
->din_clk
);
1260 clks
->din_clk
= NULL
;
1263 clks
->dout_clk
= devm_clk_get(&pdev
->dev
, "m_axis_dout_aclk");
1264 if (IS_ERR(clks
->dout_clk
)) {
1265 if (PTR_ERR(clks
->dout_clk
) != -ENOENT
) {
1266 err
= PTR_ERR(clks
->dout_clk
);
1269 clks
->dout_clk
= NULL
;
1272 clks
->dout_words_clk
=
1273 devm_clk_get(&pdev
->dev
, "s_axis_dout_words_aclk");
1274 if (IS_ERR(clks
->dout_words_clk
)) {
1275 if (PTR_ERR(clks
->dout_words_clk
) != -ENOENT
) {
1276 err
= PTR_ERR(clks
->dout_words_clk
);
1279 clks
->dout_words_clk
= NULL
;
1282 clks
->ctrl_clk
= devm_clk_get(&pdev
->dev
, "s_axis_ctrl_aclk");
1283 if (IS_ERR(clks
->ctrl_clk
)) {
1284 if (PTR_ERR(clks
->ctrl_clk
) != -ENOENT
) {
1285 err
= PTR_ERR(clks
->ctrl_clk
);
1288 clks
->ctrl_clk
= NULL
;
1291 clks
->status_clk
= devm_clk_get(&pdev
->dev
, "m_axis_status_aclk");
1292 if (IS_ERR(clks
->status_clk
)) {
1293 if (PTR_ERR(clks
->status_clk
) != -ENOENT
) {
1294 err
= PTR_ERR(clks
->status_clk
);
1297 clks
->status_clk
= NULL
;
1300 err
= clk_prepare_enable(clks
->core_clk
);
1302 dev_err(&pdev
->dev
, "failed to enable core_clk (%d)", err
);
1306 err
= clk_prepare_enable(clks
->axi_clk
);
1308 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)", err
);
1309 goto err_disable_core_clk
;
1312 err
= clk_prepare_enable(clks
->din_clk
);
1314 dev_err(&pdev
->dev
, "failed to enable din_clk (%d)", err
);
1315 goto err_disable_axi_clk
;
1318 err
= clk_prepare_enable(clks
->din_words_clk
);
1320 dev_err(&pdev
->dev
, "failed to enable din_words_clk (%d)", err
);
1321 goto err_disable_din_clk
;
1324 err
= clk_prepare_enable(clks
->dout_clk
);
1326 dev_err(&pdev
->dev
, "failed to enable dout_clk (%d)", err
);
1327 goto err_disable_din_words_clk
;
1330 err
= clk_prepare_enable(clks
->dout_words_clk
);
1332 dev_err(&pdev
->dev
, "failed to enable dout_words_clk (%d)",
1334 goto err_disable_dout_clk
;
1337 err
= clk_prepare_enable(clks
->ctrl_clk
);
1339 dev_err(&pdev
->dev
, "failed to enable ctrl_clk (%d)", err
);
1340 goto err_disable_dout_words_clk
;
1343 err
= clk_prepare_enable(clks
->status_clk
);
1345 dev_err(&pdev
->dev
, "failed to enable status_clk (%d)\n", err
);
1346 goto err_disable_ctrl_clk
;
1351 err_disable_ctrl_clk
:
1352 clk_disable_unprepare(clks
->ctrl_clk
);
1353 err_disable_dout_words_clk
:
1354 clk_disable_unprepare(clks
->dout_words_clk
);
1355 err_disable_dout_clk
:
1356 clk_disable_unprepare(clks
->dout_clk
);
1357 err_disable_din_words_clk
:
1358 clk_disable_unprepare(clks
->din_words_clk
);
1359 err_disable_din_clk
:
1360 clk_disable_unprepare(clks
->din_clk
);
1361 err_disable_axi_clk
:
1362 clk_disable_unprepare(clks
->axi_clk
);
1363 err_disable_core_clk
:
1364 clk_disable_unprepare(clks
->core_clk
);
1369 static void xsdfec_disable_all_clks(struct xsdfec_clks
*clks
)
1371 clk_disable_unprepare(clks
->status_clk
);
1372 clk_disable_unprepare(clks
->ctrl_clk
);
1373 clk_disable_unprepare(clks
->dout_words_clk
);
1374 clk_disable_unprepare(clks
->dout_clk
);
1375 clk_disable_unprepare(clks
->din_words_clk
);
1376 clk_disable_unprepare(clks
->din_clk
);
1377 clk_disable_unprepare(clks
->core_clk
);
1378 clk_disable_unprepare(clks
->axi_clk
);
1381 static int xsdfec_probe(struct platform_device
*pdev
)
1383 struct xsdfec_dev
*xsdfec
;
1385 struct resource
*res
;
1387 bool irq_enabled
= true;
1389 xsdfec
= devm_kzalloc(&pdev
->dev
, sizeof(*xsdfec
), GFP_KERNEL
);
1393 xsdfec
->dev
= &pdev
->dev
;
1394 spin_lock_init(&xsdfec
->error_data_lock
);
1396 err
= xsdfec_clk_init(pdev
, &xsdfec
->clks
);
1401 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1402 xsdfec
->regs
= devm_ioremap_resource(dev
, res
);
1403 if (IS_ERR(xsdfec
->regs
)) {
1404 err
= PTR_ERR(xsdfec
->regs
);
1405 goto err_xsdfec_dev
;
1408 xsdfec
->irq
= platform_get_irq(pdev
, 0);
1409 if (xsdfec
->irq
< 0) {
1410 dev_dbg(dev
, "platform_get_irq failed");
1411 irq_enabled
= false;
1414 err
= xsdfec_parse_of(xsdfec
);
1416 goto err_xsdfec_dev
;
1418 update_config_from_hw(xsdfec
);
1420 /* Save driver private data */
1421 platform_set_drvdata(pdev
, xsdfec
);
1424 init_waitqueue_head(&xsdfec
->waitq
);
1425 /* Register IRQ thread */
1426 err
= devm_request_threaded_irq(dev
, xsdfec
->irq
, NULL
,
1427 xsdfec_irq_thread
, IRQF_ONESHOT
,
1428 "xilinx-sdfec16", xsdfec
);
1430 dev_err(dev
, "unable to request IRQ%d", xsdfec
->irq
);
1431 goto err_xsdfec_dev
;
1435 err
= ida_alloc(&dev_nrs
, GFP_KERNEL
);
1437 goto err_xsdfec_dev
;
1438 xsdfec
->dev_id
= err
;
1440 snprintf(xsdfec
->dev_name
, DEV_NAME_LEN
, "xsdfec%d", xsdfec
->dev_id
);
1441 xsdfec
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
1442 xsdfec
->miscdev
.name
= xsdfec
->dev_name
;
1443 xsdfec
->miscdev
.fops
= &xsdfec_fops
;
1444 xsdfec
->miscdev
.parent
= dev
;
1445 err
= misc_register(&xsdfec
->miscdev
);
1447 dev_err(dev
, "error:%d. Unable to register device", err
);
1448 goto err_xsdfec_ida
;
1453 ida_free(&dev_nrs
, xsdfec
->dev_id
);
1455 xsdfec_disable_all_clks(&xsdfec
->clks
);
1459 static int xsdfec_remove(struct platform_device
*pdev
)
1461 struct xsdfec_dev
*xsdfec
;
1463 xsdfec
= platform_get_drvdata(pdev
);
1464 misc_deregister(&xsdfec
->miscdev
);
1465 ida_free(&dev_nrs
, xsdfec
->dev_id
);
1466 xsdfec_disable_all_clks(&xsdfec
->clks
);
1470 static const struct of_device_id xsdfec_of_match
[] = {
1472 .compatible
= "xlnx,sd-fec-1.1",
1474 { /* end of table */ }
1476 MODULE_DEVICE_TABLE(of
, xsdfec_of_match
);
1478 static struct platform_driver xsdfec_driver
= {
1480 .name
= "xilinx-sdfec",
1481 .of_match_table
= xsdfec_of_match
,
1483 .probe
= xsdfec_probe
,
1484 .remove
= xsdfec_remove
,
1487 static int __init
xsdfec_init(void)
1491 err
= platform_driver_register(&xsdfec_driver
);
1493 pr_err("%s Unabled to register SDFEC driver", __func__
);
1499 static void __exit
xsdfec_exit(void)
1501 platform_driver_unregister(&xsdfec_driver
);
1504 module_init(xsdfec_init
);
1505 module_exit(xsdfec_exit
);
1507 MODULE_AUTHOR("Xilinx, Inc");
1508 MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
1509 MODULE_LICENSE("GPL");