1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2019 Xilinx, Inc.
8 * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
9 * IP. It exposes a char device which supports file operations
10 * like open(), close() and ioctl().
13 #include <linux/miscdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of_platform.h>
19 #include <linux/poll.h>
20 #include <linux/slab.h>
21 #include <linux/clk.h>
22 #include <linux/compat.h>
23 #include <linux/highmem.h>
25 #include <uapi/misc/xilinx_sdfec.h>
27 #define DEV_NAME_LEN 12
29 static DEFINE_IDA(dev_nrs
);
31 /* Xilinx SDFEC Register Map */
32 /* CODE_WRI_PROTECT Register */
33 #define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
36 #define XSDFEC_ACTIVE_ADDR (0x8)
37 #define XSDFEC_IS_ACTIVITY_SET (0x1)
39 /* AXIS_WIDTH Register */
40 #define XSDFEC_AXIS_WIDTH_ADDR (0xC)
41 #define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
42 #define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
43 #define XSDFEC_AXIS_DIN_WORDS_LSB (2)
44 #define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
46 /* AXIS_ENABLE Register */
47 #define XSDFEC_AXIS_ENABLE_ADDR (0x10)
48 #define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
49 #define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
50 #define XSDFEC_AXIS_ENABLE_MASK \
51 (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
53 /* FEC_CODE Register */
54 #define XSDFEC_FEC_CODE_ADDR (0x14)
56 /* ORDER Register Map */
57 #define XSDFEC_ORDER_ADDR (0x18)
59 /* Interrupt Status Register */
60 #define XSDFEC_ISR_ADDR (0x1C)
61 /* Interrupt Status Register Bit Mask */
62 #define XSDFEC_ISR_MASK (0x3F)
64 /* Write Only - Interrupt Enable Register */
65 #define XSDFEC_IER_ADDR (0x20)
66 /* Write Only - Interrupt Disable Register */
67 #define XSDFEC_IDR_ADDR (0x24)
68 /* Read Only - Interrupt Mask Register */
69 #define XSDFEC_IMR_ADDR (0x28)
71 /* ECC Interrupt Status Register */
72 #define XSDFEC_ECC_ISR_ADDR (0x2C)
73 /* Single Bit Errors */
74 #define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
75 /* PL Initialize Single Bit Errors */
76 #define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
77 /* Multi Bit Errors */
78 #define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
79 /* PL Initialize Multi Bit Errors */
80 #define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
81 /* Multi Bit Error to Event Shift */
82 #define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
83 /* PL Initialize Multi Bit Error to Event Shift */
84 #define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
85 /* ECC Interrupt Status Bit Mask */
86 #define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
87 /* ECC Interrupt Status PL Initialize Bit Mask */
88 #define XSDFEC_PL_INIT_ECC_ISR_MASK \
89 (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
90 /* ECC Interrupt Status All Bit Mask */
91 #define XSDFEC_ALL_ECC_ISR_MASK \
92 (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
93 /* ECC Interrupt Status Single Bit Errors Mask */
94 #define XSDFEC_ALL_ECC_ISR_SBE_MASK \
95 (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
96 /* ECC Interrupt Status Multi Bit Errors Mask */
97 #define XSDFEC_ALL_ECC_ISR_MBE_MASK \
98 (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
100 /* Write Only - ECC Interrupt Enable Register */
101 #define XSDFEC_ECC_IER_ADDR (0x30)
102 /* Write Only - ECC Interrupt Disable Register */
103 #define XSDFEC_ECC_IDR_ADDR (0x34)
104 /* Read Only - ECC Interrupt Mask Register */
105 #define XSDFEC_ECC_IMR_ADDR (0x38)
107 /* BYPASS Register */
108 #define XSDFEC_BYPASS_ADDR (0x3C)
110 /* Turbo Code Register */
111 #define XSDFEC_TURBO_ADDR (0x100)
112 #define XSDFEC_TURBO_SCALE_MASK (0xFFF)
113 #define XSDFEC_TURBO_SCALE_BIT_POS (8)
114 #define XSDFEC_TURBO_SCALE_MAX (15)
117 #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
118 #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
119 #define XSDFEC_REG0_N_MIN (4)
120 #define XSDFEC_REG0_N_MAX (32768)
121 #define XSDFEC_REG0_N_MUL_P (256)
122 #define XSDFEC_REG0_N_LSB (0)
123 #define XSDFEC_REG0_K_MIN (2)
124 #define XSDFEC_REG0_K_MAX (32766)
125 #define XSDFEC_REG0_K_MUL_P (256)
126 #define XSDFEC_REG0_K_LSB (16)
129 #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
130 #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
131 #define XSDFEC_REG1_PSIZE_MIN (2)
132 #define XSDFEC_REG1_PSIZE_MAX (512)
133 #define XSDFEC_REG1_NO_PACKING_MASK (0x400)
134 #define XSDFEC_REG1_NO_PACKING_LSB (10)
135 #define XSDFEC_REG1_NM_MASK (0xFF800)
136 #define XSDFEC_REG1_NM_LSB (11)
137 #define XSDFEC_REG1_BYPASS_MASK (0x100000)
140 #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
141 #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
142 #define XSDFEC_REG2_NLAYERS_MIN (1)
143 #define XSDFEC_REG2_NLAYERS_MAX (256)
144 #define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
145 #define XSDFEC_REG2_NMQC_LSB (9)
146 #define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
147 #define XSDFEC_REG2_NORM_TYPE_LSB (20)
148 #define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
149 #define XSDFEC_REG2_SPEICAL_QC_LSB (21)
150 #define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
151 #define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
152 #define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
153 #define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
156 #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
157 #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
158 #define XSDFEC_REG3_LA_OFF_LSB (8)
159 #define XSDFEC_REG3_QC_OFF_LSB (16)
161 #define XSDFEC_LDPC_REG_JUMP (0x10)
162 #define XSDFEC_REG_WIDTH_JUMP (4)
164 /* The maximum number of pinned pages */
165 #define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
168 * struct xsdfec_clks - For managing SD-FEC clocks
169 * @core_clk: Main processing clock for core
170 * @axi_clk: AXI4-Lite memory-mapped clock
171 * @din_words_clk: DIN Words AXI4-Stream Slave clock
172 * @din_clk: DIN AXI4-Stream Slave clock
173 * @dout_clk: DOUT Words AXI4-Stream Slave clock
174 * @dout_words_clk: DOUT AXI4-Stream Slave clock
175 * @ctrl_clk: Control AXI4-Stream Slave clock
176 * @status_clk: Status AXI4-Stream Slave clock
179 struct clk
*core_clk
;
181 struct clk
*din_words_clk
;
183 struct clk
*dout_clk
;
184 struct clk
*dout_words_clk
;
185 struct clk
*ctrl_clk
;
186 struct clk
*status_clk
;
190 * struct xsdfec_dev - Driver data for SDFEC
191 * @miscdev: Misc device handle
192 * @clks: Clocks managed by the SDFEC driver
193 * @waitq: Driver wait queue
194 * @config: Configuration of the SDFEC device
195 * @dev_name: Device name
196 * @flags: spinlock flags
197 * @regs: device physical base address
198 * @dev: pointer to device struct
199 * @state: State of the SDFEC device
200 * @error_data_lock: Error counter and states spinlock
202 * @isr_err_count: Count of ISR errors
203 * @cecc_count: Count of Correctable ECC errors (SBE)
204 * @uecc_count: Count of Uncorrectable ECC errors (MBE)
206 * @state_updated: indicates State updated by interrupt handler
207 * @stats_updated: indicates Stats updated by interrupt handler
208 * @intr_enabled: indicates IRQ enabled
210 * This structure contains necessary state for SDFEC driver to operate
213 struct miscdevice miscdev
;
214 struct xsdfec_clks clks
;
215 wait_queue_head_t waitq
;
216 struct xsdfec_config config
;
217 char dev_name
[DEV_NAME_LEN
];
221 enum xsdfec_state state
;
222 /* Spinlock to protect state_updated and stats_updated */
223 spinlock_t error_data_lock
;
234 static inline void xsdfec_regwrite(struct xsdfec_dev
*xsdfec
, u32 addr
,
237 dev_dbg(xsdfec
->dev
, "Writing 0x%x to offset 0x%x", value
, addr
);
238 iowrite32(value
, xsdfec
->regs
+ addr
);
241 static inline u32
xsdfec_regread(struct xsdfec_dev
*xsdfec
, u32 addr
)
245 rval
= ioread32(xsdfec
->regs
+ addr
);
246 dev_dbg(xsdfec
->dev
, "Read value = 0x%x from offset 0x%x", rval
, addr
);
250 static void update_bool_config_from_reg(struct xsdfec_dev
*xsdfec
,
251 u32 reg_offset
, u32 bit_num
,
255 u32 bit_mask
= 1 << bit_num
;
257 reg_val
= xsdfec_regread(xsdfec
, reg_offset
);
258 *config_value
= (reg_val
& bit_mask
) > 0;
261 static void update_config_from_hw(struct xsdfec_dev
*xsdfec
)
266 /* Update the Order */
267 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_ORDER_ADDR
);
268 xsdfec
->config
.order
= reg_value
;
270 update_bool_config_from_reg(xsdfec
, XSDFEC_BYPASS_ADDR
,
271 0, /* Bit Number, maybe change to mask */
272 &xsdfec
->config
.bypass
);
274 update_bool_config_from_reg(xsdfec
, XSDFEC_CODE_WR_PROTECT_ADDR
,
276 &xsdfec
->config
.code_wr_protect
);
278 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_IMR_ADDR
);
279 xsdfec
->config
.irq
.enable_isr
= (reg_value
& XSDFEC_ISR_MASK
) > 0;
281 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_ECC_IMR_ADDR
);
282 xsdfec
->config
.irq
.enable_ecc_isr
=
283 (reg_value
& XSDFEC_ECC_ISR_MASK
) > 0;
285 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_AXIS_ENABLE_ADDR
);
286 sdfec_started
= (reg_value
& XSDFEC_AXIS_IN_ENABLE_MASK
) > 0;
288 xsdfec
->state
= XSDFEC_STARTED
;
290 xsdfec
->state
= XSDFEC_STOPPED
;
293 static int xsdfec_get_status(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
295 struct xsdfec_status status
;
298 memset(&status
, 0, sizeof(status
));
299 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
300 status
.state
= xsdfec
->state
;
301 xsdfec
->state_updated
= false;
302 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
303 status
.activity
= (xsdfec_regread(xsdfec
, XSDFEC_ACTIVE_ADDR
) &
304 XSDFEC_IS_ACTIVITY_SET
);
306 err
= copy_to_user(arg
, &status
, sizeof(status
));
313 static int xsdfec_get_config(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
317 err
= copy_to_user(arg
, &xsdfec
->config
, sizeof(xsdfec
->config
));
324 static int xsdfec_isr_enable(struct xsdfec_dev
*xsdfec
, bool enable
)
330 xsdfec_regwrite(xsdfec
, XSDFEC_IER_ADDR
, XSDFEC_ISR_MASK
);
331 mask_read
= xsdfec_regread(xsdfec
, XSDFEC_IMR_ADDR
);
332 if (mask_read
& XSDFEC_ISR_MASK
) {
334 "SDFEC enabling irq with IER failed");
339 xsdfec_regwrite(xsdfec
, XSDFEC_IDR_ADDR
, XSDFEC_ISR_MASK
);
340 mask_read
= xsdfec_regread(xsdfec
, XSDFEC_IMR_ADDR
);
341 if ((mask_read
& XSDFEC_ISR_MASK
) != XSDFEC_ISR_MASK
) {
343 "SDFEC disabling irq with IDR failed");
350 static int xsdfec_ecc_isr_enable(struct xsdfec_dev
*xsdfec
, bool enable
)
356 xsdfec_regwrite(xsdfec
, XSDFEC_ECC_IER_ADDR
,
357 XSDFEC_ALL_ECC_ISR_MASK
);
358 mask_read
= xsdfec_regread(xsdfec
, XSDFEC_ECC_IMR_ADDR
);
359 if (mask_read
& XSDFEC_ALL_ECC_ISR_MASK
) {
361 "SDFEC enabling ECC irq with ECC IER failed");
366 xsdfec_regwrite(xsdfec
, XSDFEC_ECC_IDR_ADDR
,
367 XSDFEC_ALL_ECC_ISR_MASK
);
368 mask_read
= xsdfec_regread(xsdfec
, XSDFEC_ECC_IMR_ADDR
);
369 if (!(((mask_read
& XSDFEC_ALL_ECC_ISR_MASK
) ==
370 XSDFEC_ECC_ISR_MASK
) ||
371 ((mask_read
& XSDFEC_ALL_ECC_ISR_MASK
) ==
372 XSDFEC_PL_INIT_ECC_ISR_MASK
))) {
374 "SDFEC disable ECC irq with ECC IDR failed");
381 static int xsdfec_set_irq(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
383 struct xsdfec_irq irq
;
388 err
= copy_from_user(&irq
, arg
, sizeof(irq
));
392 /* Setup tlast related IRQ */
393 isr_err
= xsdfec_isr_enable(xsdfec
, irq
.enable_isr
);
395 xsdfec
->config
.irq
.enable_isr
= irq
.enable_isr
;
397 /* Setup ECC related IRQ */
398 ecc_err
= xsdfec_ecc_isr_enable(xsdfec
, irq
.enable_ecc_isr
);
400 xsdfec
->config
.irq
.enable_ecc_isr
= irq
.enable_ecc_isr
;
402 if (isr_err
< 0 || ecc_err
< 0)
408 static int xsdfec_set_turbo(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
410 struct xsdfec_turbo turbo
;
414 err
= copy_from_user(&turbo
, arg
, sizeof(turbo
));
418 if (turbo
.alg
>= XSDFEC_TURBO_ALG_MAX
)
421 if (turbo
.scale
> XSDFEC_TURBO_SCALE_MAX
)
424 /* Check to see what device tree says about the FEC codes */
425 if (xsdfec
->config
.code
== XSDFEC_LDPC_CODE
)
428 turbo_write
= ((turbo
.scale
& XSDFEC_TURBO_SCALE_MASK
)
429 << XSDFEC_TURBO_SCALE_BIT_POS
) |
431 xsdfec_regwrite(xsdfec
, XSDFEC_TURBO_ADDR
, turbo_write
);
435 static int xsdfec_get_turbo(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
438 struct xsdfec_turbo turbo_params
;
441 if (xsdfec
->config
.code
== XSDFEC_LDPC_CODE
)
444 memset(&turbo_params
, 0, sizeof(turbo_params
));
445 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_TURBO_ADDR
);
447 turbo_params
.scale
= (reg_value
& XSDFEC_TURBO_SCALE_MASK
) >>
448 XSDFEC_TURBO_SCALE_BIT_POS
;
449 turbo_params
.alg
= reg_value
& 0x1;
451 err
= copy_to_user(arg
, &turbo_params
, sizeof(turbo_params
));
458 static int xsdfec_reg0_write(struct xsdfec_dev
*xsdfec
, u32 n
, u32 k
, u32 psize
,
463 if (n
< XSDFEC_REG0_N_MIN
|| n
> XSDFEC_REG0_N_MAX
|| psize
== 0 ||
464 (n
> XSDFEC_REG0_N_MUL_P
* psize
) || n
<= k
|| ((n
% psize
) != 0)) {
465 dev_dbg(xsdfec
->dev
, "N value is not in range");
468 n
<<= XSDFEC_REG0_N_LSB
;
470 if (k
< XSDFEC_REG0_K_MIN
|| k
> XSDFEC_REG0_K_MAX
||
471 (k
> XSDFEC_REG0_K_MUL_P
* psize
) || ((k
% psize
) != 0)) {
472 dev_dbg(xsdfec
->dev
, "K value is not in range");
475 k
= k
<< XSDFEC_REG0_K_LSB
;
478 if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE
+ (offset
* XSDFEC_LDPC_REG_JUMP
) >
479 XSDFEC_LDPC_CODE_REG0_ADDR_HIGH
) {
480 dev_dbg(xsdfec
->dev
, "Writing outside of LDPC reg0 space 0x%x",
481 XSDFEC_LDPC_CODE_REG0_ADDR_BASE
+
482 (offset
* XSDFEC_LDPC_REG_JUMP
));
485 xsdfec_regwrite(xsdfec
,
486 XSDFEC_LDPC_CODE_REG0_ADDR_BASE
+
487 (offset
* XSDFEC_LDPC_REG_JUMP
),
492 static int xsdfec_reg1_write(struct xsdfec_dev
*xsdfec
, u32 psize
,
493 u32 no_packing
, u32 nm
, u32 offset
)
497 if (psize
< XSDFEC_REG1_PSIZE_MIN
|| psize
> XSDFEC_REG1_PSIZE_MAX
) {
498 dev_dbg(xsdfec
->dev
, "Psize is not in range");
502 if (no_packing
!= 0 && no_packing
!= 1)
503 dev_dbg(xsdfec
->dev
, "No-packing bit register invalid");
504 no_packing
= ((no_packing
<< XSDFEC_REG1_NO_PACKING_LSB
) &
505 XSDFEC_REG1_NO_PACKING_MASK
);
507 if (nm
& ~(XSDFEC_REG1_NM_MASK
>> XSDFEC_REG1_NM_LSB
))
508 dev_dbg(xsdfec
->dev
, "NM is beyond 10 bits");
509 nm
= (nm
<< XSDFEC_REG1_NM_LSB
) & XSDFEC_REG1_NM_MASK
;
511 wdata
= nm
| no_packing
| psize
;
512 if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE
+ (offset
* XSDFEC_LDPC_REG_JUMP
) >
513 XSDFEC_LDPC_CODE_REG1_ADDR_HIGH
) {
514 dev_dbg(xsdfec
->dev
, "Writing outside of LDPC reg1 space 0x%x",
515 XSDFEC_LDPC_CODE_REG1_ADDR_BASE
+
516 (offset
* XSDFEC_LDPC_REG_JUMP
));
519 xsdfec_regwrite(xsdfec
,
520 XSDFEC_LDPC_CODE_REG1_ADDR_BASE
+
521 (offset
* XSDFEC_LDPC_REG_JUMP
),
526 static int xsdfec_reg2_write(struct xsdfec_dev
*xsdfec
, u32 nlayers
, u32 nmqc
,
527 u32 norm_type
, u32 special_qc
, u32 no_final_parity
,
528 u32 max_schedule
, u32 offset
)
532 if (nlayers
< XSDFEC_REG2_NLAYERS_MIN
||
533 nlayers
> XSDFEC_REG2_NLAYERS_MAX
) {
534 dev_dbg(xsdfec
->dev
, "Nlayers is not in range");
538 if (nmqc
& ~(XSDFEC_REG2_NNMQC_MASK
>> XSDFEC_REG2_NMQC_LSB
))
539 dev_dbg(xsdfec
->dev
, "NMQC exceeds 11 bits");
540 nmqc
= (nmqc
<< XSDFEC_REG2_NMQC_LSB
) & XSDFEC_REG2_NNMQC_MASK
;
543 dev_dbg(xsdfec
->dev
, "Norm type is invalid");
544 norm_type
= ((norm_type
<< XSDFEC_REG2_NORM_TYPE_LSB
) &
545 XSDFEC_REG2_NORM_TYPE_MASK
);
547 dev_dbg(xsdfec
->dev
, "Special QC in invalid");
548 special_qc
= ((special_qc
<< XSDFEC_REG2_SPEICAL_QC_LSB
) &
549 XSDFEC_REG2_SPECIAL_QC_MASK
);
551 if (no_final_parity
> 1)
552 dev_dbg(xsdfec
->dev
, "No final parity check invalid");
554 ((no_final_parity
<< XSDFEC_REG2_NO_FINAL_PARITY_LSB
) &
555 XSDFEC_REG2_NO_FINAL_PARITY_MASK
);
557 ~(XSDFEC_REG2_MAX_SCHEDULE_MASK
>> XSDFEC_REG2_MAX_SCHEDULE_LSB
))
558 dev_dbg(xsdfec
->dev
, "Max Schedule exceeds 2 bits");
559 max_schedule
= ((max_schedule
<< XSDFEC_REG2_MAX_SCHEDULE_LSB
) &
560 XSDFEC_REG2_MAX_SCHEDULE_MASK
);
562 wdata
= (max_schedule
| no_final_parity
| special_qc
| norm_type
|
565 if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE
+ (offset
* XSDFEC_LDPC_REG_JUMP
) >
566 XSDFEC_LDPC_CODE_REG2_ADDR_HIGH
) {
567 dev_dbg(xsdfec
->dev
, "Writing outside of LDPC reg2 space 0x%x",
568 XSDFEC_LDPC_CODE_REG2_ADDR_BASE
+
569 (offset
* XSDFEC_LDPC_REG_JUMP
));
572 xsdfec_regwrite(xsdfec
,
573 XSDFEC_LDPC_CODE_REG2_ADDR_BASE
+
574 (offset
* XSDFEC_LDPC_REG_JUMP
),
579 static int xsdfec_reg3_write(struct xsdfec_dev
*xsdfec
, u8 sc_off
, u8 la_off
,
580 u16 qc_off
, u32 offset
)
584 wdata
= ((qc_off
<< XSDFEC_REG3_QC_OFF_LSB
) |
585 (la_off
<< XSDFEC_REG3_LA_OFF_LSB
) | sc_off
);
586 if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE
+ (offset
* XSDFEC_LDPC_REG_JUMP
) >
587 XSDFEC_LDPC_CODE_REG3_ADDR_HIGH
) {
588 dev_dbg(xsdfec
->dev
, "Writing outside of LDPC reg3 space 0x%x",
589 XSDFEC_LDPC_CODE_REG3_ADDR_BASE
+
590 (offset
* XSDFEC_LDPC_REG_JUMP
));
593 xsdfec_regwrite(xsdfec
,
594 XSDFEC_LDPC_CODE_REG3_ADDR_BASE
+
595 (offset
* XSDFEC_LDPC_REG_JUMP
),
600 static int xsdfec_table_write(struct xsdfec_dev
*xsdfec
, u32 offset
,
601 u32
*src_ptr
, u32 len
, const u32 base_addr
,
605 int res
, i
, nr_pages
;
608 struct page
*pages
[MAX_NUM_PAGES
];
611 * Writes that go beyond the length of
612 * Shared Scale(SC) table should fail
614 if (offset
> depth
/ XSDFEC_REG_WIDTH_JUMP
||
615 len
> depth
/ XSDFEC_REG_WIDTH_JUMP
||
616 offset
+ len
> depth
/ XSDFEC_REG_WIDTH_JUMP
) {
617 dev_dbg(xsdfec
->dev
, "Write exceeds SC table length");
621 n
= (len
* XSDFEC_REG_WIDTH_JUMP
) / PAGE_SIZE
;
622 if ((len
* XSDFEC_REG_WIDTH_JUMP
) % PAGE_SIZE
)
625 if (WARN_ON_ONCE(n
> INT_MAX
))
630 res
= pin_user_pages_fast((unsigned long)src_ptr
, nr_pages
, 0, pages
);
631 if (res
< nr_pages
) {
633 unpin_user_pages(pages
, res
);
638 for (i
= 0; i
< nr_pages
; i
++) {
639 addr
= kmap(pages
[i
]);
641 xsdfec_regwrite(xsdfec
,
642 base_addr
+ ((offset
+ reg
) *
643 XSDFEC_REG_WIDTH_JUMP
),
646 } while ((reg
< len
) &&
647 ((reg
* XSDFEC_REG_WIDTH_JUMP
) % PAGE_SIZE
));
648 unpin_user_page(pages
[i
]);
653 static int xsdfec_add_ldpc(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
655 struct xsdfec_ldpc_params
*ldpc
;
658 ldpc
= memdup_user(arg
, sizeof(*ldpc
));
660 return PTR_ERR(ldpc
);
662 if (xsdfec
->config
.code
== XSDFEC_TURBO_CODE
) {
667 /* Verify Device has not started */
668 if (xsdfec
->state
== XSDFEC_STARTED
) {
673 if (xsdfec
->config
.code_wr_protect
) {
679 ret
= xsdfec_reg0_write(xsdfec
, ldpc
->n
, ldpc
->k
, ldpc
->psize
,
685 ret
= xsdfec_reg1_write(xsdfec
, ldpc
->psize
, ldpc
->no_packing
, ldpc
->nm
,
691 ret
= xsdfec_reg2_write(xsdfec
, ldpc
->nlayers
, ldpc
->nmqc
,
692 ldpc
->norm_type
, ldpc
->special_qc
,
693 ldpc
->no_final_parity
, ldpc
->max_schedule
,
699 ret
= xsdfec_reg3_write(xsdfec
, ldpc
->sc_off
, ldpc
->la_off
,
700 ldpc
->qc_off
, ldpc
->code_id
);
704 /* Write Shared Codes */
705 n
= ldpc
->nlayers
/ 4;
706 if (ldpc
->nlayers
% 4)
709 ret
= xsdfec_table_write(xsdfec
, ldpc
->sc_off
, ldpc
->sc_table
, n
,
710 XSDFEC_LDPC_SC_TABLE_ADDR_BASE
,
711 XSDFEC_SC_TABLE_DEPTH
);
715 ret
= xsdfec_table_write(xsdfec
, 4 * ldpc
->la_off
, ldpc
->la_table
,
716 ldpc
->nlayers
, XSDFEC_LDPC_LA_TABLE_ADDR_BASE
,
717 XSDFEC_LA_TABLE_DEPTH
);
721 ret
= xsdfec_table_write(xsdfec
, 4 * ldpc
->qc_off
, ldpc
->qc_table
,
722 ldpc
->nqc
, XSDFEC_LDPC_QC_TABLE_ADDR_BASE
,
723 XSDFEC_QC_TABLE_DEPTH
);
729 static int xsdfec_set_order(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
732 enum xsdfec_order order
;
735 err
= get_user(order
, (enum xsdfec_order __user
*)arg
);
739 order_invalid
= (order
!= XSDFEC_MAINTAIN_ORDER
) &&
740 (order
!= XSDFEC_OUT_OF_ORDER
);
744 /* Verify Device has not started */
745 if (xsdfec
->state
== XSDFEC_STARTED
)
748 xsdfec_regwrite(xsdfec
, XSDFEC_ORDER_ADDR
, order
);
750 xsdfec
->config
.order
= order
;
755 static int xsdfec_set_bypass(struct xsdfec_dev
*xsdfec
, bool __user
*arg
)
760 err
= get_user(bypass
, arg
);
764 /* Verify Device has not started */
765 if (xsdfec
->state
== XSDFEC_STARTED
)
769 xsdfec_regwrite(xsdfec
, XSDFEC_BYPASS_ADDR
, 1);
771 xsdfec_regwrite(xsdfec
, XSDFEC_BYPASS_ADDR
, 0);
773 xsdfec
->config
.bypass
= bypass
;
778 static int xsdfec_is_active(struct xsdfec_dev
*xsdfec
, bool __user
*arg
)
784 reg_value
= xsdfec_regread(xsdfec
, XSDFEC_ACTIVE_ADDR
);
785 /* using a double ! operator instead of casting */
786 is_active
= !!(reg_value
& XSDFEC_IS_ACTIVITY_SET
);
787 err
= put_user(is_active
, arg
);
795 xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg
)
797 u32 axis_width_field
= 0;
799 switch (axis_width_cfg
) {
801 axis_width_field
= 0;
804 axis_width_field
= 1;
807 axis_width_field
= 2;
811 return axis_width_field
;
814 static u32
xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
817 u32 axis_words_field
= 0;
819 if (axis_word_inc_cfg
== XSDFEC_FIXED_VALUE
||
820 axis_word_inc_cfg
== XSDFEC_IN_BLOCK
)
821 axis_words_field
= 0;
822 else if (axis_word_inc_cfg
== XSDFEC_PER_AXI_TRANSACTION
)
823 axis_words_field
= 1;
825 return axis_words_field
;
828 static int xsdfec_cfg_axi_streams(struct xsdfec_dev
*xsdfec
)
831 u32 dout_words_field
;
832 u32 dout_width_field
;
835 struct xsdfec_config
*config
= &xsdfec
->config
;
837 /* translate config info to register values */
839 xsdfec_translate_axis_words_cfg_val(config
->dout_word_include
);
841 xsdfec_translate_axis_width_cfg_val(config
->dout_width
);
843 xsdfec_translate_axis_words_cfg_val(config
->din_word_include
);
845 xsdfec_translate_axis_width_cfg_val(config
->din_width
);
847 reg_value
= dout_words_field
<< XSDFEC_AXIS_DOUT_WORDS_LSB
;
848 reg_value
|= dout_width_field
<< XSDFEC_AXIS_DOUT_WIDTH_LSB
;
849 reg_value
|= din_words_field
<< XSDFEC_AXIS_DIN_WORDS_LSB
;
850 reg_value
|= din_width_field
<< XSDFEC_AXIS_DIN_WIDTH_LSB
;
852 xsdfec_regwrite(xsdfec
, XSDFEC_AXIS_WIDTH_ADDR
, reg_value
);
857 static int xsdfec_dev_open(struct inode
*iptr
, struct file
*fptr
)
862 static int xsdfec_dev_release(struct inode
*iptr
, struct file
*fptr
)
867 static int xsdfec_start(struct xsdfec_dev
*xsdfec
)
871 regread
= xsdfec_regread(xsdfec
, XSDFEC_FEC_CODE_ADDR
);
873 if (regread
!= xsdfec
->config
.code
) {
875 "%s SDFEC HW code does not match driver code, reg %d, code %d",
876 __func__
, regread
, xsdfec
->config
.code
);
880 /* Set AXIS enable */
881 xsdfec_regwrite(xsdfec
, XSDFEC_AXIS_ENABLE_ADDR
,
882 XSDFEC_AXIS_ENABLE_MASK
);
884 xsdfec
->state
= XSDFEC_STARTED
;
888 static int xsdfec_stop(struct xsdfec_dev
*xsdfec
)
892 if (xsdfec
->state
!= XSDFEC_STARTED
)
893 dev_dbg(xsdfec
->dev
, "Device not started correctly");
894 /* Disable AXIS_ENABLE Input interfaces only */
895 regread
= xsdfec_regread(xsdfec
, XSDFEC_AXIS_ENABLE_ADDR
);
896 regread
&= (~XSDFEC_AXIS_IN_ENABLE_MASK
);
897 xsdfec_regwrite(xsdfec
, XSDFEC_AXIS_ENABLE_ADDR
, regread
);
899 xsdfec
->state
= XSDFEC_STOPPED
;
903 static int xsdfec_clear_stats(struct xsdfec_dev
*xsdfec
)
905 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
906 xsdfec
->isr_err_count
= 0;
907 xsdfec
->uecc_count
= 0;
908 xsdfec
->cecc_count
= 0;
909 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
914 static int xsdfec_get_stats(struct xsdfec_dev
*xsdfec
, void __user
*arg
)
917 struct xsdfec_stats user_stats
;
919 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
920 user_stats
.isr_err_count
= xsdfec
->isr_err_count
;
921 user_stats
.cecc_count
= xsdfec
->cecc_count
;
922 user_stats
.uecc_count
= xsdfec
->uecc_count
;
923 xsdfec
->stats_updated
= false;
924 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
926 err
= copy_to_user(arg
, &user_stats
, sizeof(user_stats
));
933 static int xsdfec_set_default_config(struct xsdfec_dev
*xsdfec
)
935 /* Ensure registers are aligned with core configuration */
936 xsdfec_regwrite(xsdfec
, XSDFEC_FEC_CODE_ADDR
, xsdfec
->config
.code
);
937 xsdfec_cfg_axi_streams(xsdfec
);
938 update_config_from_hw(xsdfec
);
943 static long xsdfec_dev_ioctl(struct file
*fptr
, unsigned int cmd
,
946 struct xsdfec_dev
*xsdfec
;
947 void __user
*arg
= (void __user
*)data
;
950 xsdfec
= container_of(fptr
->private_data
, struct xsdfec_dev
, miscdev
);
952 /* In failed state allow only reset and get status IOCTLs */
953 if (xsdfec
->state
== XSDFEC_NEEDS_RESET
&&
954 (cmd
!= XSDFEC_SET_DEFAULT_CONFIG
&& cmd
!= XSDFEC_GET_STATUS
&&
955 cmd
!= XSDFEC_GET_STATS
&& cmd
!= XSDFEC_CLEAR_STATS
)) {
960 case XSDFEC_START_DEV
:
961 rval
= xsdfec_start(xsdfec
);
963 case XSDFEC_STOP_DEV
:
964 rval
= xsdfec_stop(xsdfec
);
966 case XSDFEC_CLEAR_STATS
:
967 rval
= xsdfec_clear_stats(xsdfec
);
969 case XSDFEC_GET_STATS
:
970 rval
= xsdfec_get_stats(xsdfec
, arg
);
972 case XSDFEC_GET_STATUS
:
973 rval
= xsdfec_get_status(xsdfec
, arg
);
975 case XSDFEC_GET_CONFIG
:
976 rval
= xsdfec_get_config(xsdfec
, arg
);
978 case XSDFEC_SET_DEFAULT_CONFIG
:
979 rval
= xsdfec_set_default_config(xsdfec
);
982 rval
= xsdfec_set_irq(xsdfec
, arg
);
984 case XSDFEC_SET_TURBO
:
985 rval
= xsdfec_set_turbo(xsdfec
, arg
);
987 case XSDFEC_GET_TURBO
:
988 rval
= xsdfec_get_turbo(xsdfec
, arg
);
990 case XSDFEC_ADD_LDPC_CODE_PARAMS
:
991 rval
= xsdfec_add_ldpc(xsdfec
, arg
);
993 case XSDFEC_SET_ORDER
:
994 rval
= xsdfec_set_order(xsdfec
, arg
);
996 case XSDFEC_SET_BYPASS
:
997 rval
= xsdfec_set_bypass(xsdfec
, arg
);
999 case XSDFEC_IS_ACTIVE
:
1000 rval
= xsdfec_is_active(xsdfec
, (bool __user
*)arg
);
1009 static __poll_t
xsdfec_poll(struct file
*file
, poll_table
*wait
)
1012 struct xsdfec_dev
*xsdfec
;
1014 xsdfec
= container_of(file
->private_data
, struct xsdfec_dev
, miscdev
);
1017 return EPOLLNVAL
| EPOLLHUP
;
1019 poll_wait(file
, &xsdfec
->waitq
, wait
);
1021 /* XSDFEC ISR detected an error */
1022 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
1023 if (xsdfec
->state_updated
)
1024 mask
|= EPOLLIN
| EPOLLPRI
;
1026 if (xsdfec
->stats_updated
)
1027 mask
|= EPOLLIN
| EPOLLRDNORM
;
1028 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
1033 static const struct file_operations xsdfec_fops
= {
1034 .owner
= THIS_MODULE
,
1035 .open
= xsdfec_dev_open
,
1036 .release
= xsdfec_dev_release
,
1037 .unlocked_ioctl
= xsdfec_dev_ioctl
,
1038 .poll
= xsdfec_poll
,
1039 .compat_ioctl
= compat_ptr_ioctl
,
1042 static int xsdfec_parse_of(struct xsdfec_dev
*xsdfec
)
1044 struct device
*dev
= xsdfec
->dev
;
1045 struct device_node
*node
= dev
->of_node
;
1047 const char *fec_code
;
1049 u32 din_word_include
;
1051 u32 dout_word_include
;
1053 rval
= of_property_read_string(node
, "xlnx,sdfec-code", &fec_code
);
1057 if (!strcasecmp(fec_code
, "ldpc"))
1058 xsdfec
->config
.code
= XSDFEC_LDPC_CODE
;
1059 else if (!strcasecmp(fec_code
, "turbo"))
1060 xsdfec
->config
.code
= XSDFEC_TURBO_CODE
;
1064 rval
= of_property_read_u32(node
, "xlnx,sdfec-din-words",
1069 if (din_word_include
< XSDFEC_AXIS_WORDS_INCLUDE_MAX
)
1070 xsdfec
->config
.din_word_include
= din_word_include
;
1074 rval
= of_property_read_u32(node
, "xlnx,sdfec-din-width", &din_width
);
1078 switch (din_width
) {
1079 /* Fall through and set for valid values */
1083 xsdfec
->config
.din_width
= din_width
;
1089 rval
= of_property_read_u32(node
, "xlnx,sdfec-dout-words",
1090 &dout_word_include
);
1094 if (dout_word_include
< XSDFEC_AXIS_WORDS_INCLUDE_MAX
)
1095 xsdfec
->config
.dout_word_include
= dout_word_include
;
1099 rval
= of_property_read_u32(node
, "xlnx,sdfec-dout-width", &dout_width
);
1103 switch (dout_width
) {
1104 /* Fall through and set for valid values */
1108 xsdfec
->config
.dout_width
= dout_width
;
1114 /* Write LDPC to CODE Register */
1115 xsdfec_regwrite(xsdfec
, XSDFEC_FEC_CODE_ADDR
, xsdfec
->config
.code
);
1117 xsdfec_cfg_axi_streams(xsdfec
);
1122 static irqreturn_t
xsdfec_irq_thread(int irq
, void *dev_id
)
1124 struct xsdfec_dev
*xsdfec
= dev_id
;
1125 irqreturn_t ret
= IRQ_HANDLED
;
1134 WARN_ON(xsdfec
->irq
!= irq
);
1136 /* Mask Interrupts */
1137 xsdfec_isr_enable(xsdfec
, false);
1138 xsdfec_ecc_isr_enable(xsdfec
, false);
1140 ecc_err
= xsdfec_regread(xsdfec
, XSDFEC_ECC_ISR_ADDR
);
1141 isr_err
= xsdfec_regread(xsdfec
, XSDFEC_ISR_ADDR
);
1142 /* Clear the interrupts */
1143 xsdfec_regwrite(xsdfec
, XSDFEC_ECC_ISR_ADDR
, ecc_err
);
1144 xsdfec_regwrite(xsdfec
, XSDFEC_ISR_ADDR
, isr_err
);
1146 tmp
= ecc_err
& XSDFEC_ALL_ECC_ISR_MBE_MASK
;
1147 /* Count uncorrectable 2-bit errors */
1148 uecc_count
= hweight32(tmp
);
1149 /* Count all ECC errors */
1150 aecc_count
= hweight32(ecc_err
);
1151 /* Number of correctable 1-bit ECC error */
1152 cecc_count
= aecc_count
- 2 * uecc_count
;
1153 /* Count ISR errors */
1154 isr_err_count
= hweight32(isr_err
);
1155 dev_dbg(xsdfec
->dev
, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp
,
1156 uecc_count
, aecc_count
, cecc_count
, isr_err_count
);
1157 dev_dbg(xsdfec
->dev
, "uecc=%x, cecc=%x, isr=%x", xsdfec
->uecc_count
,
1158 xsdfec
->cecc_count
, xsdfec
->isr_err_count
);
1160 spin_lock_irqsave(&xsdfec
->error_data_lock
, xsdfec
->flags
);
1161 /* Add new errors to a 2-bits counter */
1163 xsdfec
->uecc_count
+= uecc_count
;
1164 /* Add new errors to a 1-bits counter */
1166 xsdfec
->cecc_count
+= cecc_count
;
1167 /* Add new errors to a ISR counter */
1169 xsdfec
->isr_err_count
+= isr_err_count
;
1171 /* Update state/stats flag */
1173 if (ecc_err
& XSDFEC_ECC_ISR_MBE_MASK
)
1174 xsdfec
->state
= XSDFEC_NEEDS_RESET
;
1175 else if (ecc_err
& XSDFEC_PL_INIT_ECC_ISR_MBE_MASK
)
1176 xsdfec
->state
= XSDFEC_PL_RECONFIGURE
;
1177 xsdfec
->stats_updated
= true;
1178 xsdfec
->state_updated
= true;
1182 xsdfec
->stats_updated
= true;
1184 if (isr_err_count
) {
1185 xsdfec
->state
= XSDFEC_NEEDS_RESET
;
1186 xsdfec
->stats_updated
= true;
1187 xsdfec
->state_updated
= true;
1190 spin_unlock_irqrestore(&xsdfec
->error_data_lock
, xsdfec
->flags
);
1191 dev_dbg(xsdfec
->dev
, "state=%x, stats=%x", xsdfec
->state_updated
,
1192 xsdfec
->stats_updated
);
1194 /* Enable another polling */
1195 if (xsdfec
->state_updated
|| xsdfec
->stats_updated
)
1196 wake_up_interruptible(&xsdfec
->waitq
);
1200 /* Unmask Interrupts */
1201 xsdfec_isr_enable(xsdfec
, true);
1202 xsdfec_ecc_isr_enable(xsdfec
, true);
1207 static int xsdfec_clk_init(struct platform_device
*pdev
,
1208 struct xsdfec_clks
*clks
)
1212 clks
->core_clk
= devm_clk_get(&pdev
->dev
, "core_clk");
1213 if (IS_ERR(clks
->core_clk
)) {
1214 dev_err(&pdev
->dev
, "failed to get core_clk");
1215 return PTR_ERR(clks
->core_clk
);
1218 clks
->axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_aclk");
1219 if (IS_ERR(clks
->axi_clk
)) {
1220 dev_err(&pdev
->dev
, "failed to get axi_clk");
1221 return PTR_ERR(clks
->axi_clk
);
1224 clks
->din_words_clk
= devm_clk_get(&pdev
->dev
, "s_axis_din_words_aclk");
1225 if (IS_ERR(clks
->din_words_clk
)) {
1226 if (PTR_ERR(clks
->din_words_clk
) != -ENOENT
) {
1227 err
= PTR_ERR(clks
->din_words_clk
);
1230 clks
->din_words_clk
= NULL
;
1233 clks
->din_clk
= devm_clk_get(&pdev
->dev
, "s_axis_din_aclk");
1234 if (IS_ERR(clks
->din_clk
)) {
1235 if (PTR_ERR(clks
->din_clk
) != -ENOENT
) {
1236 err
= PTR_ERR(clks
->din_clk
);
1239 clks
->din_clk
= NULL
;
1242 clks
->dout_clk
= devm_clk_get(&pdev
->dev
, "m_axis_dout_aclk");
1243 if (IS_ERR(clks
->dout_clk
)) {
1244 if (PTR_ERR(clks
->dout_clk
) != -ENOENT
) {
1245 err
= PTR_ERR(clks
->dout_clk
);
1248 clks
->dout_clk
= NULL
;
1251 clks
->dout_words_clk
=
1252 devm_clk_get(&pdev
->dev
, "s_axis_dout_words_aclk");
1253 if (IS_ERR(clks
->dout_words_clk
)) {
1254 if (PTR_ERR(clks
->dout_words_clk
) != -ENOENT
) {
1255 err
= PTR_ERR(clks
->dout_words_clk
);
1258 clks
->dout_words_clk
= NULL
;
1261 clks
->ctrl_clk
= devm_clk_get(&pdev
->dev
, "s_axis_ctrl_aclk");
1262 if (IS_ERR(clks
->ctrl_clk
)) {
1263 if (PTR_ERR(clks
->ctrl_clk
) != -ENOENT
) {
1264 err
= PTR_ERR(clks
->ctrl_clk
);
1267 clks
->ctrl_clk
= NULL
;
1270 clks
->status_clk
= devm_clk_get(&pdev
->dev
, "m_axis_status_aclk");
1271 if (IS_ERR(clks
->status_clk
)) {
1272 if (PTR_ERR(clks
->status_clk
) != -ENOENT
) {
1273 err
= PTR_ERR(clks
->status_clk
);
1276 clks
->status_clk
= NULL
;
1279 err
= clk_prepare_enable(clks
->core_clk
);
1281 dev_err(&pdev
->dev
, "failed to enable core_clk (%d)", err
);
1285 err
= clk_prepare_enable(clks
->axi_clk
);
1287 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)", err
);
1288 goto err_disable_core_clk
;
1291 err
= clk_prepare_enable(clks
->din_clk
);
1293 dev_err(&pdev
->dev
, "failed to enable din_clk (%d)", err
);
1294 goto err_disable_axi_clk
;
1297 err
= clk_prepare_enable(clks
->din_words_clk
);
1299 dev_err(&pdev
->dev
, "failed to enable din_words_clk (%d)", err
);
1300 goto err_disable_din_clk
;
1303 err
= clk_prepare_enable(clks
->dout_clk
);
1305 dev_err(&pdev
->dev
, "failed to enable dout_clk (%d)", err
);
1306 goto err_disable_din_words_clk
;
1309 err
= clk_prepare_enable(clks
->dout_words_clk
);
1311 dev_err(&pdev
->dev
, "failed to enable dout_words_clk (%d)",
1313 goto err_disable_dout_clk
;
1316 err
= clk_prepare_enable(clks
->ctrl_clk
);
1318 dev_err(&pdev
->dev
, "failed to enable ctrl_clk (%d)", err
);
1319 goto err_disable_dout_words_clk
;
1322 err
= clk_prepare_enable(clks
->status_clk
);
1324 dev_err(&pdev
->dev
, "failed to enable status_clk (%d)\n", err
);
1325 goto err_disable_ctrl_clk
;
1330 err_disable_ctrl_clk
:
1331 clk_disable_unprepare(clks
->ctrl_clk
);
1332 err_disable_dout_words_clk
:
1333 clk_disable_unprepare(clks
->dout_words_clk
);
1334 err_disable_dout_clk
:
1335 clk_disable_unprepare(clks
->dout_clk
);
1336 err_disable_din_words_clk
:
1337 clk_disable_unprepare(clks
->din_words_clk
);
1338 err_disable_din_clk
:
1339 clk_disable_unprepare(clks
->din_clk
);
1340 err_disable_axi_clk
:
1341 clk_disable_unprepare(clks
->axi_clk
);
1342 err_disable_core_clk
:
1343 clk_disable_unprepare(clks
->core_clk
);
1348 static void xsdfec_disable_all_clks(struct xsdfec_clks
*clks
)
1350 clk_disable_unprepare(clks
->status_clk
);
1351 clk_disable_unprepare(clks
->ctrl_clk
);
1352 clk_disable_unprepare(clks
->dout_words_clk
);
1353 clk_disable_unprepare(clks
->dout_clk
);
1354 clk_disable_unprepare(clks
->din_words_clk
);
1355 clk_disable_unprepare(clks
->din_clk
);
1356 clk_disable_unprepare(clks
->core_clk
);
1357 clk_disable_unprepare(clks
->axi_clk
);
1360 static int xsdfec_probe(struct platform_device
*pdev
)
1362 struct xsdfec_dev
*xsdfec
;
1364 struct resource
*res
;
1366 bool irq_enabled
= true;
1368 xsdfec
= devm_kzalloc(&pdev
->dev
, sizeof(*xsdfec
), GFP_KERNEL
);
1372 xsdfec
->dev
= &pdev
->dev
;
1373 spin_lock_init(&xsdfec
->error_data_lock
);
1375 err
= xsdfec_clk_init(pdev
, &xsdfec
->clks
);
1380 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1381 xsdfec
->regs
= devm_ioremap_resource(dev
, res
);
1382 if (IS_ERR(xsdfec
->regs
)) {
1383 err
= PTR_ERR(xsdfec
->regs
);
1384 goto err_xsdfec_dev
;
1387 xsdfec
->irq
= platform_get_irq(pdev
, 0);
1388 if (xsdfec
->irq
< 0) {
1389 dev_dbg(dev
, "platform_get_irq failed");
1390 irq_enabled
= false;
1393 err
= xsdfec_parse_of(xsdfec
);
1395 goto err_xsdfec_dev
;
1397 update_config_from_hw(xsdfec
);
1399 /* Save driver private data */
1400 platform_set_drvdata(pdev
, xsdfec
);
1403 init_waitqueue_head(&xsdfec
->waitq
);
1404 /* Register IRQ thread */
1405 err
= devm_request_threaded_irq(dev
, xsdfec
->irq
, NULL
,
1406 xsdfec_irq_thread
, IRQF_ONESHOT
,
1407 "xilinx-sdfec16", xsdfec
);
1409 dev_err(dev
, "unable to request IRQ%d", xsdfec
->irq
);
1410 goto err_xsdfec_dev
;
1414 err
= ida_alloc(&dev_nrs
, GFP_KERNEL
);
1416 goto err_xsdfec_dev
;
1417 xsdfec
->dev_id
= err
;
1419 snprintf(xsdfec
->dev_name
, DEV_NAME_LEN
, "xsdfec%d", xsdfec
->dev_id
);
1420 xsdfec
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
1421 xsdfec
->miscdev
.name
= xsdfec
->dev_name
;
1422 xsdfec
->miscdev
.fops
= &xsdfec_fops
;
1423 xsdfec
->miscdev
.parent
= dev
;
1424 err
= misc_register(&xsdfec
->miscdev
);
1426 dev_err(dev
, "error:%d. Unable to register device", err
);
1427 goto err_xsdfec_ida
;
1432 ida_free(&dev_nrs
, xsdfec
->dev_id
);
1434 xsdfec_disable_all_clks(&xsdfec
->clks
);
1438 static int xsdfec_remove(struct platform_device
*pdev
)
1440 struct xsdfec_dev
*xsdfec
;
1442 xsdfec
= platform_get_drvdata(pdev
);
1443 misc_deregister(&xsdfec
->miscdev
);
1444 ida_free(&dev_nrs
, xsdfec
->dev_id
);
1445 xsdfec_disable_all_clks(&xsdfec
->clks
);
1449 static const struct of_device_id xsdfec_of_match
[] = {
1451 .compatible
= "xlnx,sd-fec-1.1",
1453 { /* end of table */ }
1455 MODULE_DEVICE_TABLE(of
, xsdfec_of_match
);
1457 static struct platform_driver xsdfec_driver
= {
1459 .name
= "xilinx-sdfec",
1460 .of_match_table
= xsdfec_of_match
,
1462 .probe
= xsdfec_probe
,
1463 .remove
= xsdfec_remove
,
1466 module_platform_driver(xsdfec_driver
);
1468 MODULE_AUTHOR("Xilinx, Inc");
1469 MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
1470 MODULE_LICENSE("GPL");