1 // SPDX-License-Identifier: GPL-2.0
3 /* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
5 * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/err.h>
13 #include <linux/module.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/timekeeping.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_irq.h>
20 #include <linux/workqueue.h>
24 #define IEP_MAX_DEF_INC 0xf
25 #define IEP_MAX_COMPEN_INC 0xfff
26 #define IEP_MAX_COMPEN_COUNT 0xffffff
28 #define IEP_GLOBAL_CFG_CNT_ENABLE BIT(0)
29 #define IEP_GLOBAL_CFG_DEFAULT_INC_MASK GENMASK(7, 4)
30 #define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT 4
31 #define IEP_GLOBAL_CFG_COMPEN_INC_MASK GENMASK(19, 8)
32 #define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT 8
34 #define IEP_GLOBAL_STATUS_CNT_OVF BIT(0)
36 #define IEP_CMP_CFG_SHADOW_EN BIT(17)
37 #define IEP_CMP_CFG_CMP0_RST_CNT_EN BIT(0)
38 #define IEP_CMP_CFG_CMP_EN(cmp) (GENMASK(16, 1) & (1 << ((cmp) + 1)))
40 #define IEP_CMP_STATUS(cmp) (1 << (cmp))
42 #define IEP_SYNC_CTRL_SYNC_EN BIT(0)
43 #define IEP_SYNC_CTRL_SYNC_N_EN(n) (GENMASK(2, 1) & (BIT(1) << (n)))
46 #define IEP_MAX_CMP 15
48 #define ICSS_IEP_64BIT_COUNTER_SUPPORT BIT(0)
49 #define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT BIT(1)
50 #define ICSS_IEP_SHADOW_MODE_SUPPORT BIT(2)
52 #define LATCH_INDEX(ts_index) ((ts_index) + 6)
53 #define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n))
54 #define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10)
57 * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
58 * @iep: Pointer to structure representing IEP.
60 * Return: upper 32 bit IEP counter
62 int icss_iep_get_count_hi(struct icss_iep
*iep
)
66 if (iep
&& (iep
->plat_data
->flags
& ICSS_IEP_64BIT_COUNTER_SUPPORT
))
67 val
= readl(iep
->base
+ iep
->plat_data
->reg_offs
[ICSS_IEP_COUNT_REG1
]);
71 EXPORT_SYMBOL_GPL(icss_iep_get_count_hi
);
74 * icss_iep_get_count_low() - Get the lower 32 bit IEP counter
75 * @iep: Pointer to structure representing IEP.
77 * Return: lower 32 bit IEP counter
79 int icss_iep_get_count_low(struct icss_iep
*iep
)
84 val
= readl(iep
->base
+ iep
->plat_data
->reg_offs
[ICSS_IEP_COUNT_REG0
]);
88 EXPORT_SYMBOL_GPL(icss_iep_get_count_low
);
91 * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver
92 * @iep: Pointer to structure representing IEP.
94 * Return: PTP clock index, -1 if not registered
96 int icss_iep_get_ptp_clock_idx(struct icss_iep
*iep
)
98 if (!iep
|| !iep
->ptp_clock
)
100 return ptp_clock_index(iep
->ptp_clock
);
102 EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx
);
104 static void icss_iep_set_counter(struct icss_iep
*iep
, u64 ns
)
106 if (iep
->plat_data
->flags
& ICSS_IEP_64BIT_COUNTER_SUPPORT
)
107 writel(upper_32_bits(ns
), iep
->base
+
108 iep
->plat_data
->reg_offs
[ICSS_IEP_COUNT_REG1
]);
109 writel(lower_32_bits(ns
), iep
->base
+ iep
->plat_data
->reg_offs
[ICSS_IEP_COUNT_REG0
]);
112 static void icss_iep_update_to_next_boundary(struct icss_iep
*iep
, u64 start_ns
);
115 * icss_iep_settime() - Set time of the PTP clock using IEP driver
116 * @iep: Pointer to structure representing IEP.
117 * @ns: Time to be set in nanoseconds
119 * This API uses writel() instead of regmap_write() for write operations as
120 * regmap_write() is too slow and this API is time sensitive.
122 static void icss_iep_settime(struct icss_iep
*iep
, u64 ns
)
124 if (iep
->ops
&& iep
->ops
->settime
) {
125 iep
->ops
->settime(iep
->clockops_data
, ns
);
129 if (iep
->pps_enabled
|| iep
->perout_enabled
)
130 writel(0, iep
->base
+ iep
->plat_data
->reg_offs
[ICSS_IEP_SYNC_CTRL_REG
]);
132 icss_iep_set_counter(iep
, ns
);
134 if (iep
->pps_enabled
|| iep
->perout_enabled
) {
135 icss_iep_update_to_next_boundary(iep
, ns
);
136 writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN
,
137 iep
->base
+ iep
->plat_data
->reg_offs
[ICSS_IEP_SYNC_CTRL_REG
]);
142 * icss_iep_gettime() - Get time of the PTP clock using IEP driver
143 * @iep: Pointer to structure representing IEP.
144 * @sts: Pointer to structure representing PTP system timestamp.
146 * This API uses readl() instead of regmap_read() for read operations as
147 * regmap_read() is too slow and this API is time sensitive.
149 * Return: The current timestamp of the PTP clock using IEP driver
151 static u64
icss_iep_gettime(struct icss_iep
*iep
,
152 struct ptp_system_timestamp
*sts
)
154 u32 ts_hi
= 0, ts_lo
;
157 if (iep
->ops
&& iep
->ops
->gettime
)
158 return iep
->ops
->gettime(iep
->clockops_data
, sts
);
160 /* use local_irq_x() to make it work for both RT/non-RT */
161 local_irq_save(flags
);
163 /* no need to play with hi-lo, hi is latched when lo is read */
164 ptp_read_system_prets(sts
);
165 ts_lo
= readl(iep
->base
+ iep
->plat_data
->reg_offs
[ICSS_IEP_COUNT_REG0
]);
166 ptp_read_system_postts(sts
);
167 if (iep
->plat_data
->flags
& ICSS_IEP_64BIT_COUNTER_SUPPORT
)
168 ts_hi
= readl(iep
->base
+ iep
->plat_data
->reg_offs
[ICSS_IEP_COUNT_REG1
]);
170 local_irq_restore(flags
);
172 return (u64
)ts_lo
| (u64
)ts_hi
<< 32;
175 static void icss_iep_enable(struct icss_iep
*iep
)
177 regmap_update_bits(iep
->map
, ICSS_IEP_GLOBAL_CFG_REG
,
178 IEP_GLOBAL_CFG_CNT_ENABLE
,
179 IEP_GLOBAL_CFG_CNT_ENABLE
);
182 static void icss_iep_disable(struct icss_iep
*iep
)
184 regmap_update_bits(iep
->map
, ICSS_IEP_GLOBAL_CFG_REG
,
185 IEP_GLOBAL_CFG_CNT_ENABLE
,
189 static void icss_iep_enable_shadow_mode(struct icss_iep
*iep
)
194 cycle_time
= iep
->cycle_time_ns
- iep
->def_inc
;
196 icss_iep_disable(iep
);
198 /* disable shadow mode */
199 regmap_update_bits(iep
->map
, ICSS_IEP_CMP_CFG_REG
,
200 IEP_CMP_CFG_SHADOW_EN
, 0);
202 /* enable shadow mode */
203 regmap_update_bits(iep
->map
, ICSS_IEP_CMP_CFG_REG
,
204 IEP_CMP_CFG_SHADOW_EN
, IEP_CMP_CFG_SHADOW_EN
);
207 icss_iep_set_counter(iep
, 0);
209 /* clear overflow status */
210 regmap_update_bits(iep
->map
, ICSS_IEP_GLOBAL_STATUS_REG
,
211 IEP_GLOBAL_STATUS_CNT_OVF
,
212 IEP_GLOBAL_STATUS_CNT_OVF
);
214 /* clear compare status */
215 for (cmp
= IEP_MIN_CMP
; cmp
< IEP_MAX_CMP
; cmp
++) {
216 regmap_update_bits(iep
->map
, ICSS_IEP_CMP_STAT_REG
,
217 IEP_CMP_STATUS(cmp
), IEP_CMP_STATUS(cmp
));
220 /* enable reset counter on CMP0 event */
221 regmap_update_bits(iep
->map
, ICSS_IEP_CMP_CFG_REG
,
222 IEP_CMP_CFG_CMP0_RST_CNT_EN
,
223 IEP_CMP_CFG_CMP0_RST_CNT_EN
);
225 regmap_update_bits(iep
->map
, ICSS_IEP_CMP_CFG_REG
,
226 IEP_CMP_CFG_CMP_EN(0),
227 IEP_CMP_CFG_CMP_EN(0));
229 /* set CMP0 value to cycle time */
230 regmap_write(iep
->map
, ICSS_IEP_CMP0_REG0
, cycle_time
);
231 if (iep
->plat_data
->flags
& ICSS_IEP_64BIT_COUNTER_SUPPORT
)
232 regmap_write(iep
->map
, ICSS_IEP_CMP0_REG1
, cycle_time
);
234 icss_iep_set_counter(iep
, 0);
235 icss_iep_enable(iep
);
238 static void icss_iep_set_default_inc(struct icss_iep
*iep
, u8 def_inc
)
240 regmap_update_bits(iep
->map
, ICSS_IEP_GLOBAL_CFG_REG
,
241 IEP_GLOBAL_CFG_DEFAULT_INC_MASK
,
242 def_inc
<< IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT
);
245 static void icss_iep_set_compensation_inc(struct icss_iep
*iep
, u16 compen_inc
)
247 struct device
*dev
= regmap_get_device(iep
->map
);
249 if (compen_inc
> IEP_MAX_COMPEN_INC
) {
250 dev_err(dev
, "%s: too high compensation inc %d\n",
251 __func__
, compen_inc
);
252 compen_inc
= IEP_MAX_COMPEN_INC
;
255 regmap_update_bits(iep
->map
, ICSS_IEP_GLOBAL_CFG_REG
,
256 IEP_GLOBAL_CFG_COMPEN_INC_MASK
,
257 compen_inc
<< IEP_GLOBAL_CFG_COMPEN_INC_SHIFT
);
260 static void icss_iep_set_compensation_count(struct icss_iep
*iep
,
263 struct device
*dev
= regmap_get_device(iep
->map
);
265 if (compen_count
> IEP_MAX_COMPEN_COUNT
) {
266 dev_err(dev
, "%s: too high compensation count %d\n",
267 __func__
, compen_count
);
268 compen_count
= IEP_MAX_COMPEN_COUNT
;
271 regmap_write(iep
->map
, ICSS_IEP_COMPEN_REG
, compen_count
);
274 static void icss_iep_set_slow_compensation_count(struct icss_iep
*iep
,
277 regmap_write(iep
->map
, ICSS_IEP_SLOW_COMPEN_REG
, compen_count
);
280 /* PTP PHC operations */
281 static int icss_iep_ptp_adjfine(struct ptp_clock_info
*ptp
, long scaled_ppm
)
283 struct icss_iep
*iep
= container_of(ptp
, struct icss_iep
, ptp_info
);
284 s32 ppb
= scaled_ppm_to_ppb(scaled_ppm
);
288 mutex_lock(&iep
->ptp_clk_mutex
);
290 /* ppb is amount of frequency we want to adjust in 1GHz (billion)
291 * e.g. 100ppb means we need to speed up clock by 100Hz
292 * i.e. at end of 1 second (1 billion ns) clock time, we should be
293 * counting 100 more ns.
294 * We use IEP slow compensation to achieve continuous freq. adjustment.
295 * There are 2 parts. Cycle time and adjustment per cycle.
296 * Simplest case would be 1 sec Cycle time. Then adjustment
297 * pre cycle would be (def_inc + ppb) value.
298 * Cycle time will have to be chosen based on how worse the ppb is.
299 * e.g. smaller the ppb, cycle time has to be large.
300 * The minimum adjustment we can do is +-1ns per cycle so let's
301 * reduce the cycle time to get 1ns per cycle adjustment.
302 * 1ppb = 1sec cycle time & 1ns adjust
303 * 1000ppb = 1/1000 cycle time & 1ns adjust per cycle
306 if (iep
->cycle_time_ns
)
307 iep
->slow_cmp_inc
= iep
->clk_tick_time
; /* 4ns adj per cycle */
309 iep
->slow_cmp_inc
= 1; /* 1ns adjust per cycle */
312 iep
->slow_cmp_inc
= -iep
->slow_cmp_inc
;
316 cyc_count
= NSEC_PER_SEC
; /* 1s cycle time @1GHz */
317 cyc_count
/= ppb
; /* cycle time per ppb */
319 /* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */
320 if (!iep
->cycle_time_ns
)
321 cyc_count
/= iep
->clk_tick_time
;
322 iep
->slow_cmp_count
= cyc_count
;
324 /* iep->clk_tick_time is def_inc */
325 cmp_inc
= iep
->clk_tick_time
+ iep
->slow_cmp_inc
;
326 icss_iep_set_compensation_inc(iep
, cmp_inc
);
327 icss_iep_set_slow_compensation_count(iep
, iep
->slow_cmp_count
);
329 mutex_unlock(&iep
->ptp_clk_mutex
);
334 static int icss_iep_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
336 struct icss_iep
*iep
= container_of(ptp
, struct icss_iep
, ptp_info
);
339 mutex_lock(&iep
->ptp_clk_mutex
);
340 if (iep
->ops
&& iep
->ops
->adjtime
) {
341 iep
->ops
->adjtime(iep
->clockops_data
, delta
);
343 ns
= icss_iep_gettime(iep
, NULL
);
345 icss_iep_settime(iep
, ns
);
347 mutex_unlock(&iep
->ptp_clk_mutex
);
352 static int icss_iep_ptp_gettimeex(struct ptp_clock_info
*ptp
,
353 struct timespec64
*ts
,
354 struct ptp_system_timestamp
*sts
)
356 struct icss_iep
*iep
= container_of(ptp
, struct icss_iep
, ptp_info
);
359 mutex_lock(&iep
->ptp_clk_mutex
);
360 ns
= icss_iep_gettime(iep
, sts
);
361 *ts
= ns_to_timespec64(ns
);
362 mutex_unlock(&iep
->ptp_clk_mutex
);
367 static int icss_iep_ptp_settime(struct ptp_clock_info
*ptp
,
368 const struct timespec64
*ts
)
370 struct icss_iep
*iep
= container_of(ptp
, struct icss_iep
, ptp_info
);
373 mutex_lock(&iep
->ptp_clk_mutex
);
374 ns
= timespec64_to_ns(ts
);
375 icss_iep_settime(iep
, ns
);
376 mutex_unlock(&iep
->ptp_clk_mutex
);
381 static void icss_iep_update_to_next_boundary(struct icss_iep
*iep
, u64 start_ns
)
386 ns
= icss_iep_gettime(iep
, NULL
);
390 /* Round up to next period boundary */
391 start_ns
+= p_ns
- 1;
392 offset
= do_div(start_ns
, p_ns
);
393 start_ns
= start_ns
* p_ns
;
394 /* If it is too close to update, shift to next boundary */
395 if (p_ns
- offset
< 10)
398 regmap_write(iep
->map
, ICSS_IEP_CMP1_REG0
, lower_32_bits(start_ns
));
399 if (iep
->plat_data
->flags
& ICSS_IEP_64BIT_COUNTER_SUPPORT
)
400 regmap_write(iep
->map
, ICSS_IEP_CMP1_REG1
, upper_32_bits(start_ns
));
403 static int icss_iep_perout_enable_hw(struct icss_iep
*iep
,
404 struct ptp_perout_request
*req
, int on
)
409 if (iep
->ops
&& iep
->ops
->perout_enable
) {
410 ret
= iep
->ops
->perout_enable(iep
->clockops_data
, req
, on
, &cmp
);
416 regmap_write(iep
->map
, ICSS_IEP_CMP1_REG0
, lower_32_bits(cmp
));
417 if (iep
->plat_data
->flags
& ICSS_IEP_64BIT_COUNTER_SUPPORT
)
418 regmap_write(iep
->map
, ICSS_IEP_CMP1_REG1
, upper_32_bits(cmp
));
419 /* Configure SYNC, 1ms pulse width */
420 regmap_write(iep
->map
, ICSS_IEP_SYNC_PWIDTH_REG
, 1000000);
421 regmap_write(iep
->map
, ICSS_IEP_SYNC0_PERIOD_REG
, 0);
422 regmap_write(iep
->map
, ICSS_IEP_SYNC_START_REG
, 0);
423 regmap_write(iep
->map
, ICSS_IEP_SYNC_CTRL_REG
, 0); /* one-shot mode */
425 regmap_update_bits(iep
->map
, ICSS_IEP_CMP_CFG_REG
,
426 IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
429 regmap_update_bits(iep
->map
, ICSS_IEP_CMP_CFG_REG
,
430 IEP_CMP_CFG_CMP_EN(1), 0);
433 regmap_write(iep
->map
, ICSS_IEP_CMP1_REG0
, 0);
434 if (iep
->plat_data
->flags
& ICSS_IEP_64BIT_COUNTER_SUPPORT
)
435 regmap_write(iep
->map
, ICSS_IEP_CMP1_REG1
, 0);
441 iep
->period
= ((u64
)req
->period
.sec
* NSEC_PER_SEC
) +
443 start_ns
= ((u64
)req
->period
.sec
* NSEC_PER_SEC
)
445 icss_iep_update_to_next_boundary(iep
, start_ns
);
447 /* Enable Sync in single shot mode */
448 regmap_write(iep
->map
, ICSS_IEP_SYNC_CTRL_REG
,
449 IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN
);
451 regmap_update_bits(iep
->map
, ICSS_IEP_CMP_CFG_REG
,
452 IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
455 regmap_update_bits(iep
->map
, ICSS_IEP_CMP_CFG_REG
,
456 IEP_CMP_CFG_CMP_EN(1), 0);
459 regmap_write(iep
->map
, ICSS_IEP_CMP1_REG0
, 0);
460 if (iep
->plat_data
->flags
& ICSS_IEP_64BIT_COUNTER_SUPPORT
)
461 regmap_write(iep
->map
, ICSS_IEP_CMP1_REG1
, 0);
464 regmap_write(iep
->map
, ICSS_IEP_SYNC_CTRL_REG
, 0);
471 static int icss_iep_perout_enable(struct icss_iep
*iep
,
472 struct ptp_perout_request
*req
, int on
)
476 mutex_lock(&iep
->ptp_clk_mutex
);
478 if (iep
->pps_enabled
) {
483 if (iep
->perout_enabled
== !!on
)
486 ret
= icss_iep_perout_enable_hw(iep
, req
, on
);
488 iep
->perout_enabled
= !!on
;
491 mutex_unlock(&iep
->ptp_clk_mutex
);
496 static void icss_iep_cap_cmp_work(struct work_struct
*work
)
498 struct icss_iep
*iep
= container_of(work
, struct icss_iep
, work
);
499 const u32
*reg_offs
= iep
->plat_data
->reg_offs
;
500 struct ptp_clock_event pevent
;
504 mutex_lock(&iep
->ptp_clk_mutex
);
506 ns
= readl(iep
->base
+ reg_offs
[ICSS_IEP_CMP1_REG0
]);
507 if (iep
->plat_data
->flags
& ICSS_IEP_64BIT_COUNTER_SUPPORT
) {
508 val
= readl(iep
->base
+ reg_offs
[ICSS_IEP_CMP1_REG1
]);
509 ns
|= (u64
)val
<< 32;
512 ns_next
= ns
+ iep
->period
;
513 writel(lower_32_bits(ns_next
),
514 iep
->base
+ reg_offs
[ICSS_IEP_CMP1_REG0
]);
515 if (iep
->plat_data
->flags
& ICSS_IEP_64BIT_COUNTER_SUPPORT
)
516 writel(upper_32_bits(ns_next
),
517 iep
->base
+ reg_offs
[ICSS_IEP_CMP1_REG1
]);
519 pevent
.pps_times
.ts_real
= ns_to_timespec64(ns
);
520 pevent
.type
= PTP_CLOCK_PPSUSR
;
522 ptp_clock_event(iep
->ptp_clock
, &pevent
);
523 dev_dbg(iep
->dev
, "IEP:pps ts: %llu next:%llu:\n", ns
, ns_next
);
525 mutex_unlock(&iep
->ptp_clk_mutex
);
528 static irqreturn_t
icss_iep_cap_cmp_irq(int irq
, void *dev_id
)
530 struct icss_iep
*iep
= (struct icss_iep
*)dev_id
;
531 const u32
*reg_offs
= iep
->plat_data
->reg_offs
;
534 val
= readl(iep
->base
+ reg_offs
[ICSS_IEP_CMP_STAT_REG
]);
535 /* The driver only enables CMP1 */
537 /* Clear the event */
538 writel(BIT(1), iep
->base
+ reg_offs
[ICSS_IEP_CMP_STAT_REG
]);
539 if (iep
->pps_enabled
|| iep
->perout_enabled
)
540 schedule_work(&iep
->work
);
547 static int icss_iep_pps_enable(struct icss_iep
*iep
, int on
)
549 struct ptp_clock_request rq
;
550 struct timespec64 ts
;
554 mutex_lock(&iep
->ptp_clk_mutex
);
556 if (iep
->perout_enabled
) {
561 if (iep
->pps_enabled
== !!on
)
566 ns
= icss_iep_gettime(iep
, NULL
);
567 ts
= ns_to_timespec64(ns
);
568 rq
.perout
.period
.sec
= 1;
569 rq
.perout
.period
.nsec
= 0;
570 rq
.perout
.start
.sec
= ts
.tv_sec
+ 2;
571 rq
.perout
.start
.nsec
= 0;
572 ret
= icss_iep_perout_enable_hw(iep
, &rq
.perout
, on
);
574 ret
= icss_iep_perout_enable_hw(iep
, &rq
.perout
, on
);
575 if (iep
->cap_cmp_irq
)
576 cancel_work_sync(&iep
->work
);
580 iep
->pps_enabled
= !!on
;
583 mutex_unlock(&iep
->ptp_clk_mutex
);
588 static int icss_iep_extts_enable(struct icss_iep
*iep
, u32 index
, int on
)
590 u32 val
, cap
, ret
= 0;
592 mutex_lock(&iep
->ptp_clk_mutex
);
594 if (iep
->ops
&& iep
->ops
->extts_enable
) {
595 ret
= iep
->ops
->extts_enable(iep
->clockops_data
, index
, on
);
599 if (((iep
->latch_enable
& BIT(index
)) >> index
) == on
)
602 regmap_read(iep
->map
, ICSS_IEP_CAPTURE_CFG_REG
, &val
);
603 cap
= IEP_CAP_CFG_CAP_ASYNC_EN(index
) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index
);
606 iep
->latch_enable
|= BIT(index
);
609 iep
->latch_enable
&= ~BIT(index
);
611 regmap_write(iep
->map
, ICSS_IEP_CAPTURE_CFG_REG
, val
);
614 mutex_unlock(&iep
->ptp_clk_mutex
);
619 static int icss_iep_ptp_enable(struct ptp_clock_info
*ptp
,
620 struct ptp_clock_request
*rq
, int on
)
622 struct icss_iep
*iep
= container_of(ptp
, struct icss_iep
, ptp_info
);
625 case PTP_CLK_REQ_PEROUT
:
626 return icss_iep_perout_enable(iep
, &rq
->perout
, on
);
627 case PTP_CLK_REQ_PPS
:
628 return icss_iep_pps_enable(iep
, on
);
629 case PTP_CLK_REQ_EXTTS
:
630 return icss_iep_extts_enable(iep
, rq
->extts
.index
, on
);
638 static struct ptp_clock_info icss_iep_ptp_info
= {
639 .owner
= THIS_MODULE
,
640 .name
= "ICSS IEP timer",
642 .adjfine
= icss_iep_ptp_adjfine
,
643 .adjtime
= icss_iep_ptp_adjtime
,
644 .gettimex64
= icss_iep_ptp_gettimeex
,
645 .settime64
= icss_iep_ptp_settime
,
646 .enable
= icss_iep_ptp_enable
,
649 struct icss_iep
*icss_iep_get_idx(struct device_node
*np
, int idx
)
651 struct platform_device
*pdev
;
652 struct device_node
*iep_np
;
653 struct icss_iep
*iep
;
655 iep_np
= of_parse_phandle(np
, "ti,iep", idx
);
656 if (!iep_np
|| !of_device_is_available(iep_np
))
657 return ERR_PTR(-ENODEV
);
659 pdev
= of_find_device_by_node(iep_np
);
663 /* probably IEP not yet probed */
664 return ERR_PTR(-EPROBE_DEFER
);
666 iep
= platform_get_drvdata(pdev
);
668 return ERR_PTR(-EPROBE_DEFER
);
670 device_lock(iep
->dev
);
671 if (iep
->client_np
) {
672 device_unlock(iep
->dev
);
673 dev_err(iep
->dev
, "IEP is already acquired by %s",
674 iep
->client_np
->name
);
675 return ERR_PTR(-EBUSY
);
678 device_unlock(iep
->dev
);
679 get_device(iep
->dev
);
683 EXPORT_SYMBOL_GPL(icss_iep_get_idx
);
685 struct icss_iep
*icss_iep_get(struct device_node
*np
)
687 return icss_iep_get_idx(np
, 0);
689 EXPORT_SYMBOL_GPL(icss_iep_get
);
691 void icss_iep_put(struct icss_iep
*iep
)
693 device_lock(iep
->dev
);
694 iep
->client_np
= NULL
;
695 device_unlock(iep
->dev
);
696 put_device(iep
->dev
);
698 EXPORT_SYMBOL_GPL(icss_iep_put
);
700 void icss_iep_init_fw(struct icss_iep
*iep
)
702 /* start IEP for FW use in raw 64bit mode, no PTP support */
703 iep
->clk_tick_time
= iep
->def_inc
;
704 iep
->cycle_time_ns
= 0;
706 iep
->clockops_data
= NULL
;
707 icss_iep_set_default_inc(iep
, iep
->def_inc
);
708 icss_iep_set_compensation_inc(iep
, iep
->def_inc
);
709 icss_iep_set_compensation_count(iep
, 0);
710 regmap_write(iep
->map
, ICSS_IEP_SYNC_PWIDTH_REG
, iep
->refclk_freq
/ 10); /* 100 ms pulse */
711 regmap_write(iep
->map
, ICSS_IEP_SYNC0_PERIOD_REG
, 0);
712 if (iep
->plat_data
->flags
& ICSS_IEP_SLOW_COMPEN_REG_SUPPORT
)
713 icss_iep_set_slow_compensation_count(iep
, 0);
715 icss_iep_enable(iep
);
716 icss_iep_settime(iep
, 0);
718 EXPORT_SYMBOL_GPL(icss_iep_init_fw
);
720 void icss_iep_exit_fw(struct icss_iep
*iep
)
722 icss_iep_disable(iep
);
724 EXPORT_SYMBOL_GPL(icss_iep_exit_fw
);
726 int icss_iep_init(struct icss_iep
*iep
, const struct icss_iep_clockops
*clkops
,
727 void *clockops_data
, u32 cycle_time_ns
)
731 iep
->cycle_time_ns
= cycle_time_ns
;
732 iep
->clk_tick_time
= iep
->def_inc
;
734 iep
->clockops_data
= clockops_data
;
735 icss_iep_set_default_inc(iep
, iep
->def_inc
);
736 icss_iep_set_compensation_inc(iep
, iep
->def_inc
);
737 icss_iep_set_compensation_count(iep
, 0);
738 regmap_write(iep
->map
, ICSS_IEP_SYNC_PWIDTH_REG
, iep
->refclk_freq
/ 10); /* 100 ms pulse */
739 regmap_write(iep
->map
, ICSS_IEP_SYNC0_PERIOD_REG
, 0);
740 if (iep
->plat_data
->flags
& ICSS_IEP_SLOW_COMPEN_REG_SUPPORT
)
741 icss_iep_set_slow_compensation_count(iep
, 0);
743 if (!(iep
->plat_data
->flags
& ICSS_IEP_64BIT_COUNTER_SUPPORT
) ||
744 !(iep
->plat_data
->flags
& ICSS_IEP_SLOW_COMPEN_REG_SUPPORT
))
747 if (iep
->ops
&& iep
->ops
->perout_enable
) {
748 iep
->ptp_info
.n_per_out
= 1;
749 iep
->ptp_info
.pps
= 1;
750 } else if (iep
->cap_cmp_irq
) {
751 iep
->ptp_info
.pps
= 1;
754 if (iep
->ops
&& iep
->ops
->extts_enable
)
755 iep
->ptp_info
.n_ext_ts
= 2;
759 icss_iep_enable_shadow_mode(iep
);
761 icss_iep_enable(iep
);
762 icss_iep_settime(iep
, ktime_get_real_ns());
764 iep
->ptp_clock
= ptp_clock_register(&iep
->ptp_info
, iep
->dev
);
765 if (IS_ERR(iep
->ptp_clock
)) {
766 ret
= PTR_ERR(iep
->ptp_clock
);
767 iep
->ptp_clock
= NULL
;
768 dev_err(iep
->dev
, "Failed to register ptp clk %d\n", ret
);
773 EXPORT_SYMBOL_GPL(icss_iep_init
);
775 int icss_iep_exit(struct icss_iep
*iep
)
777 if (iep
->ptp_clock
) {
778 ptp_clock_unregister(iep
->ptp_clock
);
779 iep
->ptp_clock
= NULL
;
781 icss_iep_disable(iep
);
785 EXPORT_SYMBOL_GPL(icss_iep_exit
);
787 static int icss_iep_probe(struct platform_device
*pdev
)
789 struct device
*dev
= &pdev
->dev
;
790 struct icss_iep
*iep
;
794 iep
= devm_kzalloc(dev
, sizeof(*iep
), GFP_KERNEL
);
799 iep
->base
= devm_platform_ioremap_resource(pdev
, 0);
800 if (IS_ERR(iep
->base
))
803 irq
= platform_get_irq_byname_optional(pdev
, "iep_cap_cmp");
804 if (irq
== -EPROBE_DEFER
)
808 ret
= devm_request_irq(dev
, irq
, icss_iep_cap_cmp_irq
,
809 IRQF_TRIGGER_HIGH
, "iep_cap_cmp", iep
);
811 dev_info(iep
->dev
, "cap_cmp irq request failed: %x\n",
814 iep
->cap_cmp_irq
= irq
;
815 INIT_WORK(&iep
->work
, icss_iep_cap_cmp_work
);
819 iep_clk
= devm_clk_get(dev
, NULL
);
821 return PTR_ERR(iep_clk
);
823 iep
->refclk_freq
= clk_get_rate(iep_clk
);
825 iep
->def_inc
= NSEC_PER_SEC
/ iep
->refclk_freq
; /* ns per clock tick */
826 if (iep
->def_inc
> IEP_MAX_DEF_INC
) {
827 dev_err(dev
, "Failed to set def_inc %d. IEP_clock is too slow to be supported\n",
832 iep
->plat_data
= device_get_match_data(dev
);
836 iep
->map
= devm_regmap_init(dev
, NULL
, iep
, iep
->plat_data
->config
);
837 if (IS_ERR(iep
->map
)) {
838 dev_err(dev
, "Failed to create regmap for IEP %ld\n",
840 return PTR_ERR(iep
->map
);
843 iep
->ptp_info
= icss_iep_ptp_info
;
844 mutex_init(&iep
->ptp_clk_mutex
);
845 dev_set_drvdata(dev
, iep
);
846 icss_iep_disable(iep
);
851 static bool am654_icss_iep_valid_reg(struct device
*dev
, unsigned int reg
)
854 case ICSS_IEP_GLOBAL_CFG_REG
... ICSS_IEP_SYNC_START_REG
:
863 static int icss_iep_regmap_write(void *context
, unsigned int reg
,
866 struct icss_iep
*iep
= context
;
868 writel(val
, iep
->base
+ iep
->plat_data
->reg_offs
[reg
]);
873 static int icss_iep_regmap_read(void *context
, unsigned int reg
,
876 struct icss_iep
*iep
= context
;
878 *val
= readl(iep
->base
+ iep
->plat_data
->reg_offs
[reg
]);
883 static const struct regmap_config am654_icss_iep_regmap_config
= {
886 .reg_write
= icss_iep_regmap_write
,
887 .reg_read
= icss_iep_regmap_read
,
888 .writeable_reg
= am654_icss_iep_valid_reg
,
889 .readable_reg
= am654_icss_iep_valid_reg
,
893 static const struct icss_iep_plat_data am654_icss_iep_plat_data
= {
894 .flags
= ICSS_IEP_64BIT_COUNTER_SUPPORT
|
895 ICSS_IEP_SLOW_COMPEN_REG_SUPPORT
|
896 ICSS_IEP_SHADOW_MODE_SUPPORT
,
898 [ICSS_IEP_GLOBAL_CFG_REG
] = 0x00,
899 [ICSS_IEP_COMPEN_REG
] = 0x08,
900 [ICSS_IEP_SLOW_COMPEN_REG
] = 0x0C,
901 [ICSS_IEP_COUNT_REG0
] = 0x10,
902 [ICSS_IEP_COUNT_REG1
] = 0x14,
903 [ICSS_IEP_CAPTURE_CFG_REG
] = 0x18,
904 [ICSS_IEP_CAPTURE_STAT_REG
] = 0x1c,
906 [ICSS_IEP_CAP6_RISE_REG0
] = 0x50,
907 [ICSS_IEP_CAP6_RISE_REG1
] = 0x54,
909 [ICSS_IEP_CAP7_RISE_REG0
] = 0x60,
910 [ICSS_IEP_CAP7_RISE_REG1
] = 0x64,
912 [ICSS_IEP_CMP_CFG_REG
] = 0x70,
913 [ICSS_IEP_CMP_STAT_REG
] = 0x74,
914 [ICSS_IEP_CMP0_REG0
] = 0x78,
915 [ICSS_IEP_CMP0_REG1
] = 0x7c,
916 [ICSS_IEP_CMP1_REG0
] = 0x80,
917 [ICSS_IEP_CMP1_REG1
] = 0x84,
919 [ICSS_IEP_CMP8_REG0
] = 0xc0,
920 [ICSS_IEP_CMP8_REG1
] = 0xc4,
921 [ICSS_IEP_SYNC_CTRL_REG
] = 0x180,
922 [ICSS_IEP_SYNC0_STAT_REG
] = 0x188,
923 [ICSS_IEP_SYNC1_STAT_REG
] = 0x18c,
924 [ICSS_IEP_SYNC_PWIDTH_REG
] = 0x190,
925 [ICSS_IEP_SYNC0_PERIOD_REG
] = 0x194,
926 [ICSS_IEP_SYNC1_DELAY_REG
] = 0x198,
927 [ICSS_IEP_SYNC_START_REG
] = 0x19c,
929 .config
= &am654_icss_iep_regmap_config
,
932 static const struct of_device_id icss_iep_of_match
[] = {
934 .compatible
= "ti,am654-icss-iep",
935 .data
= &am654_icss_iep_plat_data
,
939 MODULE_DEVICE_TABLE(of
, icss_iep_of_match
);
941 static struct platform_driver icss_iep_driver
= {
944 .of_match_table
= icss_iep_of_match
,
946 .probe
= icss_iep_probe
,
948 module_platform_driver(icss_iep_driver
);
950 MODULE_LICENSE("GPL");
951 MODULE_DESCRIPTION("TI ICSS IEP driver");
952 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
953 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");