1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * PTP 1588 clock support - character device implementation.
5 * Copyright (C) 2010 OMICRON electronics GmbH
7 #include <linux/module.h>
8 #include <linux/posix-clock.h>
9 #include <linux/poll.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/timekeeping.h>
13 #include <linux/debugfs.h>
15 #include <linux/nospec.h>
17 #include "ptp_private.h"
19 static int ptp_disable_pinfunc(struct ptp_clock_info
*ops
,
20 enum ptp_pin_function func
, unsigned int chan
)
22 struct ptp_clock_request rq
;
25 memset(&rq
, 0, sizeof(rq
));
31 rq
.type
= PTP_CLK_REQ_EXTTS
;
32 rq
.extts
.index
= chan
;
33 err
= ops
->enable(ops
, &rq
, 0);
36 rq
.type
= PTP_CLK_REQ_PEROUT
;
37 rq
.perout
.index
= chan
;
38 err
= ops
->enable(ops
, &rq
, 0);
49 int ptp_set_pinfunc(struct ptp_clock
*ptp
, unsigned int pin
,
50 enum ptp_pin_function func
, unsigned int chan
)
52 struct ptp_clock_info
*info
= ptp
->info
;
53 struct ptp_pin_desc
*pin1
= NULL
, *pin2
= &info
->pin_config
[pin
];
56 /* Check to see if any other pin previously had this function. */
57 for (i
= 0; i
< info
->n_pins
; i
++) {
58 if (info
->pin_config
[i
].func
== func
&&
59 info
->pin_config
[i
].chan
== chan
) {
60 pin1
= &info
->pin_config
[i
];
67 /* Check the desired function and channel. */
72 if (chan
>= info
->n_ext_ts
)
76 if (chan
>= info
->n_per_out
)
87 if (info
->verify(info
, pin
, func
, chan
)) {
88 pr_err("driver cannot use function %u and channel %u on pin %u\n",
93 /* Disable whatever function was previously assigned. */
95 ptp_disable_pinfunc(info
, func
, chan
);
96 pin1
->func
= PTP_PF_NONE
;
99 ptp_disable_pinfunc(info
, pin2
->func
, pin2
->chan
);
106 int ptp_open(struct posix_clock_context
*pccontext
, fmode_t fmode
)
108 struct ptp_clock
*ptp
=
109 container_of(pccontext
->clk
, struct ptp_clock
, clock
);
110 struct timestamp_event_queue
*queue
;
111 char debugfsname
[32];
114 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
117 queue
->mask
= bitmap_alloc(PTP_MAX_CHANNELS
, GFP_KERNEL
);
122 bitmap_set(queue
->mask
, 0, PTP_MAX_CHANNELS
);
123 spin_lock_init(&queue
->lock
);
124 spin_lock_irqsave(&ptp
->tsevqs_lock
, flags
);
125 list_add_tail(&queue
->qlist
, &ptp
->tsevqs
);
126 spin_unlock_irqrestore(&ptp
->tsevqs_lock
, flags
);
127 pccontext
->private_clkdata
= queue
;
129 /* Debugfs contents */
130 sprintf(debugfsname
, "0x%p", queue
);
131 queue
->debugfs_instance
=
132 debugfs_create_dir(debugfsname
, ptp
->debugfs_root
);
133 queue
->dfs_bitmap
.array
= (u32
*)queue
->mask
;
134 queue
->dfs_bitmap
.n_elements
=
135 DIV_ROUND_UP(PTP_MAX_CHANNELS
, BITS_PER_BYTE
* sizeof(u32
));
136 debugfs_create_u32_array("mask", 0444, queue
->debugfs_instance
,
142 int ptp_release(struct posix_clock_context
*pccontext
)
144 struct timestamp_event_queue
*queue
= pccontext
->private_clkdata
;
146 struct ptp_clock
*ptp
=
147 container_of(pccontext
->clk
, struct ptp_clock
, clock
);
149 debugfs_remove(queue
->debugfs_instance
);
150 pccontext
->private_clkdata
= NULL
;
151 spin_lock_irqsave(&ptp
->tsevqs_lock
, flags
);
152 list_del(&queue
->qlist
);
153 spin_unlock_irqrestore(&ptp
->tsevqs_lock
, flags
);
154 bitmap_free(queue
->mask
);
159 long ptp_ioctl(struct posix_clock_context
*pccontext
, unsigned int cmd
,
162 struct ptp_clock
*ptp
=
163 container_of(pccontext
->clk
, struct ptp_clock
, clock
);
164 struct ptp_sys_offset_extended
*extoff
= NULL
;
165 struct ptp_sys_offset_precise precise_offset
;
166 struct system_device_crosststamp xtstamp
;
167 struct ptp_clock_info
*ops
= ptp
->info
;
168 struct ptp_sys_offset
*sysoff
= NULL
;
169 struct timestamp_event_queue
*tsevq
;
170 struct ptp_system_timestamp sts
;
171 struct ptp_clock_request req
;
172 struct ptp_clock_caps caps
;
173 struct ptp_clock_time
*pct
;
174 unsigned int i
, pin_index
;
175 struct ptp_pin_desc pd
;
176 struct timespec64 ts
;
179 tsevq
= pccontext
->private_clkdata
;
183 case PTP_CLOCK_GETCAPS
:
184 case PTP_CLOCK_GETCAPS2
:
185 memset(&caps
, 0, sizeof(caps
));
187 caps
.max_adj
= ptp
->info
->max_adj
;
188 caps
.n_alarm
= ptp
->info
->n_alarm
;
189 caps
.n_ext_ts
= ptp
->info
->n_ext_ts
;
190 caps
.n_per_out
= ptp
->info
->n_per_out
;
191 caps
.pps
= ptp
->info
->pps
;
192 caps
.n_pins
= ptp
->info
->n_pins
;
193 caps
.cross_timestamping
= ptp
->info
->getcrosststamp
!= NULL
;
194 caps
.adjust_phase
= ptp
->info
->adjphase
!= NULL
&&
195 ptp
->info
->getmaxphase
!= NULL
;
196 if (caps
.adjust_phase
)
197 caps
.max_phase_adj
= ptp
->info
->getmaxphase(ptp
->info
);
198 if (copy_to_user((void __user
*)arg
, &caps
, sizeof(caps
)))
202 case PTP_EXTTS_REQUEST
:
203 case PTP_EXTTS_REQUEST2
:
204 memset(&req
, 0, sizeof(req
));
206 if (copy_from_user(&req
.extts
, (void __user
*)arg
,
207 sizeof(req
.extts
))) {
211 if (cmd
== PTP_EXTTS_REQUEST2
) {
212 /* Tell the drivers to check the flags carefully. */
213 req
.extts
.flags
|= PTP_STRICT_FLAGS
;
214 /* Make sure no reserved bit is set. */
215 if ((req
.extts
.flags
& ~PTP_EXTTS_VALID_FLAGS
) ||
216 req
.extts
.rsv
[0] || req
.extts
.rsv
[1]) {
220 /* Ensure one of the rising/falling edge bits is set. */
221 if ((req
.extts
.flags
& PTP_ENABLE_FEATURE
) &&
222 (req
.extts
.flags
& PTP_EXTTS_EDGES
) == 0) {
226 } else if (cmd
== PTP_EXTTS_REQUEST
) {
227 req
.extts
.flags
&= PTP_EXTTS_V1_VALID_FLAGS
;
228 req
.extts
.rsv
[0] = 0;
229 req
.extts
.rsv
[1] = 0;
231 if (req
.extts
.index
>= ops
->n_ext_ts
) {
235 req
.type
= PTP_CLK_REQ_EXTTS
;
236 enable
= req
.extts
.flags
& PTP_ENABLE_FEATURE
? 1 : 0;
237 if (mutex_lock_interruptible(&ptp
->pincfg_mux
))
239 err
= ops
->enable(ops
, &req
, enable
);
240 mutex_unlock(&ptp
->pincfg_mux
);
243 case PTP_PEROUT_REQUEST
:
244 case PTP_PEROUT_REQUEST2
:
245 memset(&req
, 0, sizeof(req
));
247 if (copy_from_user(&req
.perout
, (void __user
*)arg
,
248 sizeof(req
.perout
))) {
252 if (cmd
== PTP_PEROUT_REQUEST2
) {
253 struct ptp_perout_request
*perout
= &req
.perout
;
255 if (perout
->flags
& ~PTP_PEROUT_VALID_FLAGS
) {
260 * The "on" field has undefined meaning if
261 * PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat
262 * it as reserved, which must be set to zero.
264 if (!(perout
->flags
& PTP_PEROUT_DUTY_CYCLE
) &&
265 (perout
->rsv
[0] || perout
->rsv
[1] ||
266 perout
->rsv
[2] || perout
->rsv
[3])) {
270 if (perout
->flags
& PTP_PEROUT_DUTY_CYCLE
) {
271 /* The duty cycle must be subunitary. */
272 if (perout
->on
.sec
> perout
->period
.sec
||
273 (perout
->on
.sec
== perout
->period
.sec
&&
274 perout
->on
.nsec
> perout
->period
.nsec
)) {
279 if (perout
->flags
& PTP_PEROUT_PHASE
) {
281 * The phase should be specified modulo the
282 * period, therefore anything equal or larger
283 * than 1 period is invalid.
285 if (perout
->phase
.sec
> perout
->period
.sec
||
286 (perout
->phase
.sec
== perout
->period
.sec
&&
287 perout
->phase
.nsec
>= perout
->period
.nsec
)) {
292 } else if (cmd
== PTP_PEROUT_REQUEST
) {
293 req
.perout
.flags
&= PTP_PEROUT_V1_VALID_FLAGS
;
294 req
.perout
.rsv
[0] = 0;
295 req
.perout
.rsv
[1] = 0;
296 req
.perout
.rsv
[2] = 0;
297 req
.perout
.rsv
[3] = 0;
299 if (req
.perout
.index
>= ops
->n_per_out
) {
303 req
.type
= PTP_CLK_REQ_PEROUT
;
304 enable
= req
.perout
.period
.sec
|| req
.perout
.period
.nsec
;
305 if (mutex_lock_interruptible(&ptp
->pincfg_mux
))
307 err
= ops
->enable(ops
, &req
, enable
);
308 mutex_unlock(&ptp
->pincfg_mux
);
312 case PTP_ENABLE_PPS2
:
313 memset(&req
, 0, sizeof(req
));
315 if (!capable(CAP_SYS_TIME
))
317 req
.type
= PTP_CLK_REQ_PPS
;
318 enable
= arg
? 1 : 0;
319 if (mutex_lock_interruptible(&ptp
->pincfg_mux
))
321 err
= ops
->enable(ops
, &req
, enable
);
322 mutex_unlock(&ptp
->pincfg_mux
);
325 case PTP_SYS_OFFSET_PRECISE
:
326 case PTP_SYS_OFFSET_PRECISE2
:
327 if (!ptp
->info
->getcrosststamp
) {
331 err
= ptp
->info
->getcrosststamp(ptp
->info
, &xtstamp
);
335 memset(&precise_offset
, 0, sizeof(precise_offset
));
336 ts
= ktime_to_timespec64(xtstamp
.device
);
337 precise_offset
.device
.sec
= ts
.tv_sec
;
338 precise_offset
.device
.nsec
= ts
.tv_nsec
;
339 ts
= ktime_to_timespec64(xtstamp
.sys_realtime
);
340 precise_offset
.sys_realtime
.sec
= ts
.tv_sec
;
341 precise_offset
.sys_realtime
.nsec
= ts
.tv_nsec
;
342 ts
= ktime_to_timespec64(xtstamp
.sys_monoraw
);
343 precise_offset
.sys_monoraw
.sec
= ts
.tv_sec
;
344 precise_offset
.sys_monoraw
.nsec
= ts
.tv_nsec
;
345 if (copy_to_user((void __user
*)arg
, &precise_offset
,
346 sizeof(precise_offset
)))
350 case PTP_SYS_OFFSET_EXTENDED
:
351 case PTP_SYS_OFFSET_EXTENDED2
:
352 if (!ptp
->info
->gettimex64
) {
356 extoff
= memdup_user((void __user
*)arg
, sizeof(*extoff
));
357 if (IS_ERR(extoff
)) {
358 err
= PTR_ERR(extoff
);
362 if (extoff
->n_samples
> PTP_MAX_SAMPLES
||
363 extoff
->rsv
[0] || extoff
->rsv
[1] ||
364 (extoff
->clockid
!= CLOCK_REALTIME
&&
365 extoff
->clockid
!= CLOCK_MONOTONIC
&&
366 extoff
->clockid
!= CLOCK_MONOTONIC_RAW
)) {
370 sts
.clockid
= extoff
->clockid
;
371 for (i
= 0; i
< extoff
->n_samples
; i
++) {
372 err
= ptp
->info
->gettimex64(ptp
->info
, &ts
, &sts
);
375 extoff
->ts
[i
][0].sec
= sts
.pre_ts
.tv_sec
;
376 extoff
->ts
[i
][0].nsec
= sts
.pre_ts
.tv_nsec
;
377 extoff
->ts
[i
][1].sec
= ts
.tv_sec
;
378 extoff
->ts
[i
][1].nsec
= ts
.tv_nsec
;
379 extoff
->ts
[i
][2].sec
= sts
.post_ts
.tv_sec
;
380 extoff
->ts
[i
][2].nsec
= sts
.post_ts
.tv_nsec
;
382 if (copy_to_user((void __user
*)arg
, extoff
, sizeof(*extoff
)))
387 case PTP_SYS_OFFSET2
:
388 sysoff
= memdup_user((void __user
*)arg
, sizeof(*sysoff
));
389 if (IS_ERR(sysoff
)) {
390 err
= PTR_ERR(sysoff
);
394 if (sysoff
->n_samples
> PTP_MAX_SAMPLES
) {
398 pct
= &sysoff
->ts
[0];
399 for (i
= 0; i
< sysoff
->n_samples
; i
++) {
400 ktime_get_real_ts64(&ts
);
401 pct
->sec
= ts
.tv_sec
;
402 pct
->nsec
= ts
.tv_nsec
;
405 err
= ops
->gettimex64(ops
, &ts
, NULL
);
407 err
= ops
->gettime64(ops
, &ts
);
410 pct
->sec
= ts
.tv_sec
;
411 pct
->nsec
= ts
.tv_nsec
;
414 ktime_get_real_ts64(&ts
);
415 pct
->sec
= ts
.tv_sec
;
416 pct
->nsec
= ts
.tv_nsec
;
417 if (copy_to_user((void __user
*)arg
, sysoff
, sizeof(*sysoff
)))
421 case PTP_PIN_GETFUNC
:
422 case PTP_PIN_GETFUNC2
:
423 if (copy_from_user(&pd
, (void __user
*)arg
, sizeof(pd
))) {
427 if ((pd
.rsv
[0] || pd
.rsv
[1] || pd
.rsv
[2]
428 || pd
.rsv
[3] || pd
.rsv
[4])
429 && cmd
== PTP_PIN_GETFUNC2
) {
432 } else if (cmd
== PTP_PIN_GETFUNC
) {
439 pin_index
= pd
.index
;
440 if (pin_index
>= ops
->n_pins
) {
444 pin_index
= array_index_nospec(pin_index
, ops
->n_pins
);
445 if (mutex_lock_interruptible(&ptp
->pincfg_mux
))
447 pd
= ops
->pin_config
[pin_index
];
448 mutex_unlock(&ptp
->pincfg_mux
);
449 if (!err
&& copy_to_user((void __user
*)arg
, &pd
, sizeof(pd
)))
453 case PTP_PIN_SETFUNC
:
454 case PTP_PIN_SETFUNC2
:
455 if (copy_from_user(&pd
, (void __user
*)arg
, sizeof(pd
))) {
459 if ((pd
.rsv
[0] || pd
.rsv
[1] || pd
.rsv
[2]
460 || pd
.rsv
[3] || pd
.rsv
[4])
461 && cmd
== PTP_PIN_SETFUNC2
) {
464 } else if (cmd
== PTP_PIN_SETFUNC
) {
471 pin_index
= pd
.index
;
472 if (pin_index
>= ops
->n_pins
) {
476 pin_index
= array_index_nospec(pin_index
, ops
->n_pins
);
477 if (mutex_lock_interruptible(&ptp
->pincfg_mux
))
479 err
= ptp_set_pinfunc(ptp
, pin_index
, pd
.func
, pd
.chan
);
480 mutex_unlock(&ptp
->pincfg_mux
);
483 case PTP_MASK_CLEAR_ALL
:
484 bitmap_clear(tsevq
->mask
, 0, PTP_MAX_CHANNELS
);
487 case PTP_MASK_EN_SINGLE
:
488 if (copy_from_user(&i
, (void __user
*)arg
, sizeof(i
))) {
492 if (i
>= PTP_MAX_CHANNELS
) {
496 set_bit(i
, tsevq
->mask
);
510 __poll_t
ptp_poll(struct posix_clock_context
*pccontext
, struct file
*fp
,
513 struct ptp_clock
*ptp
=
514 container_of(pccontext
->clk
, struct ptp_clock
, clock
);
515 struct timestamp_event_queue
*queue
;
517 queue
= pccontext
->private_clkdata
;
521 poll_wait(fp
, &ptp
->tsev_wq
, wait
);
523 return queue_cnt(queue
) ? EPOLLIN
: 0;
526 #define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
528 ssize_t
ptp_read(struct posix_clock_context
*pccontext
, uint rdflags
,
529 char __user
*buf
, size_t cnt
)
531 struct ptp_clock
*ptp
=
532 container_of(pccontext
->clk
, struct ptp_clock
, clock
);
533 struct timestamp_event_queue
*queue
;
534 struct ptp_extts_event
*event
;
539 queue
= pccontext
->private_clkdata
;
545 if (cnt
% sizeof(struct ptp_extts_event
) != 0) {
550 if (cnt
> EXTTS_BUFSIZE
)
553 cnt
= cnt
/ sizeof(struct ptp_extts_event
);
555 if (wait_event_interruptible(ptp
->tsev_wq
,
556 ptp
->defunct
|| queue_cnt(queue
))) {
565 event
= kmalloc(EXTTS_BUFSIZE
, GFP_KERNEL
);
571 spin_lock_irqsave(&queue
->lock
, flags
);
573 qcnt
= queue_cnt(queue
);
578 for (i
= 0; i
< cnt
; i
++) {
579 event
[i
] = queue
->buf
[queue
->head
];
580 /* Paired with READ_ONCE() in queue_cnt() */
581 WRITE_ONCE(queue
->head
, (queue
->head
+ 1) % PTP_MAX_TIMESTAMPS
);
584 spin_unlock_irqrestore(&queue
->lock
, flags
);
586 cnt
= cnt
* sizeof(struct ptp_extts_event
);
589 if (copy_to_user(buf
, event
, cnt
)) {