1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * PTP 1588 clock support - character device implementation.
5 * Copyright (C) 2010 OMICRON electronics GmbH
7 #include <linux/module.h>
8 #include <linux/posix-clock.h>
9 #include <linux/poll.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/timekeeping.h>
14 #include <linux/nospec.h>
16 #include "ptp_private.h"
18 static int ptp_disable_pinfunc(struct ptp_clock_info
*ops
,
19 enum ptp_pin_function func
, unsigned int chan
)
21 struct ptp_clock_request rq
;
24 memset(&rq
, 0, sizeof(rq
));
30 rq
.type
= PTP_CLK_REQ_EXTTS
;
31 rq
.extts
.index
= chan
;
32 err
= ops
->enable(ops
, &rq
, 0);
35 rq
.type
= PTP_CLK_REQ_PEROUT
;
36 rq
.perout
.index
= chan
;
37 err
= ops
->enable(ops
, &rq
, 0);
48 int ptp_set_pinfunc(struct ptp_clock
*ptp
, unsigned int pin
,
49 enum ptp_pin_function func
, unsigned int chan
)
51 struct ptp_clock_info
*info
= ptp
->info
;
52 struct ptp_pin_desc
*pin1
= NULL
, *pin2
= &info
->pin_config
[pin
];
55 /* Check to see if any other pin previously had this function. */
56 for (i
= 0; i
< info
->n_pins
; i
++) {
57 if (info
->pin_config
[i
].func
== func
&&
58 info
->pin_config
[i
].chan
== chan
) {
59 pin1
= &info
->pin_config
[i
];
66 /* Check the desired function and channel. */
71 if (chan
>= info
->n_ext_ts
)
75 if (chan
>= info
->n_per_out
)
86 if (info
->verify(info
, pin
, func
, chan
)) {
87 pr_err("driver cannot use function %u on pin %u\n", func
, chan
);
91 /* Disable whatever function was previously assigned. */
93 ptp_disable_pinfunc(info
, func
, chan
);
94 pin1
->func
= PTP_PF_NONE
;
97 ptp_disable_pinfunc(info
, pin2
->func
, pin2
->chan
);
104 int ptp_open(struct posix_clock
*pc
, fmode_t fmode
)
109 long ptp_ioctl(struct posix_clock
*pc
, unsigned int cmd
, unsigned long arg
)
111 struct ptp_clock
*ptp
= container_of(pc
, struct ptp_clock
, clock
);
112 struct ptp_sys_offset_extended
*extoff
= NULL
;
113 struct ptp_sys_offset_precise precise_offset
;
114 struct system_device_crosststamp xtstamp
;
115 struct ptp_clock_info
*ops
= ptp
->info
;
116 struct ptp_sys_offset
*sysoff
= NULL
;
117 struct ptp_system_timestamp sts
;
118 struct ptp_clock_request req
;
119 struct ptp_clock_caps caps
;
120 struct ptp_clock_time
*pct
;
121 unsigned int i
, pin_index
;
122 struct ptp_pin_desc pd
;
123 struct timespec64 ts
;
128 case PTP_CLOCK_GETCAPS
:
129 case PTP_CLOCK_GETCAPS2
:
130 memset(&caps
, 0, sizeof(caps
));
132 caps
.max_adj
= ptp
->info
->max_adj
;
133 caps
.n_alarm
= ptp
->info
->n_alarm
;
134 caps
.n_ext_ts
= ptp
->info
->n_ext_ts
;
135 caps
.n_per_out
= ptp
->info
->n_per_out
;
136 caps
.pps
= ptp
->info
->pps
;
137 caps
.n_pins
= ptp
->info
->n_pins
;
138 caps
.cross_timestamping
= ptp
->info
->getcrosststamp
!= NULL
;
139 caps
.adjust_phase
= ptp
->info
->adjphase
!= NULL
;
140 if (copy_to_user((void __user
*)arg
, &caps
, sizeof(caps
)))
144 case PTP_EXTTS_REQUEST
:
145 case PTP_EXTTS_REQUEST2
:
146 memset(&req
, 0, sizeof(req
));
148 if (copy_from_user(&req
.extts
, (void __user
*)arg
,
149 sizeof(req
.extts
))) {
153 if (cmd
== PTP_EXTTS_REQUEST2
) {
154 /* Tell the drivers to check the flags carefully. */
155 req
.extts
.flags
|= PTP_STRICT_FLAGS
;
156 /* Make sure no reserved bit is set. */
157 if ((req
.extts
.flags
& ~PTP_EXTTS_VALID_FLAGS
) ||
158 req
.extts
.rsv
[0] || req
.extts
.rsv
[1]) {
162 /* Ensure one of the rising/falling edge bits is set. */
163 if ((req
.extts
.flags
& PTP_ENABLE_FEATURE
) &&
164 (req
.extts
.flags
& PTP_EXTTS_EDGES
) == 0) {
168 } else if (cmd
== PTP_EXTTS_REQUEST
) {
169 req
.extts
.flags
&= PTP_EXTTS_V1_VALID_FLAGS
;
170 req
.extts
.rsv
[0] = 0;
171 req
.extts
.rsv
[1] = 0;
173 if (req
.extts
.index
>= ops
->n_ext_ts
) {
177 req
.type
= PTP_CLK_REQ_EXTTS
;
178 enable
= req
.extts
.flags
& PTP_ENABLE_FEATURE
? 1 : 0;
179 if (mutex_lock_interruptible(&ptp
->pincfg_mux
))
181 err
= ops
->enable(ops
, &req
, enable
);
182 mutex_unlock(&ptp
->pincfg_mux
);
185 case PTP_PEROUT_REQUEST
:
186 case PTP_PEROUT_REQUEST2
:
187 memset(&req
, 0, sizeof(req
));
189 if (copy_from_user(&req
.perout
, (void __user
*)arg
,
190 sizeof(req
.perout
))) {
194 if (cmd
== PTP_PEROUT_REQUEST2
) {
195 struct ptp_perout_request
*perout
= &req
.perout
;
197 if (perout
->flags
& ~PTP_PEROUT_VALID_FLAGS
) {
202 * The "on" field has undefined meaning if
203 * PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat
204 * it as reserved, which must be set to zero.
206 if (!(perout
->flags
& PTP_PEROUT_DUTY_CYCLE
) &&
207 (perout
->rsv
[0] || perout
->rsv
[1] ||
208 perout
->rsv
[2] || perout
->rsv
[3])) {
212 if (perout
->flags
& PTP_PEROUT_DUTY_CYCLE
) {
213 /* The duty cycle must be subunitary. */
214 if (perout
->on
.sec
> perout
->period
.sec
||
215 (perout
->on
.sec
== perout
->period
.sec
&&
216 perout
->on
.nsec
> perout
->period
.nsec
)) {
221 if (perout
->flags
& PTP_PEROUT_PHASE
) {
223 * The phase should be specified modulo the
224 * period, therefore anything equal or larger
225 * than 1 period is invalid.
227 if (perout
->phase
.sec
> perout
->period
.sec
||
228 (perout
->phase
.sec
== perout
->period
.sec
&&
229 perout
->phase
.nsec
>= perout
->period
.nsec
)) {
234 } else if (cmd
== PTP_PEROUT_REQUEST
) {
235 req
.perout
.flags
&= PTP_PEROUT_V1_VALID_FLAGS
;
236 req
.perout
.rsv
[0] = 0;
237 req
.perout
.rsv
[1] = 0;
238 req
.perout
.rsv
[2] = 0;
239 req
.perout
.rsv
[3] = 0;
241 if (req
.perout
.index
>= ops
->n_per_out
) {
245 req
.type
= PTP_CLK_REQ_PEROUT
;
246 enable
= req
.perout
.period
.sec
|| req
.perout
.period
.nsec
;
247 if (mutex_lock_interruptible(&ptp
->pincfg_mux
))
249 err
= ops
->enable(ops
, &req
, enable
);
250 mutex_unlock(&ptp
->pincfg_mux
);
254 case PTP_ENABLE_PPS2
:
255 memset(&req
, 0, sizeof(req
));
257 if (!capable(CAP_SYS_TIME
))
259 req
.type
= PTP_CLK_REQ_PPS
;
260 enable
= arg
? 1 : 0;
261 if (mutex_lock_interruptible(&ptp
->pincfg_mux
))
263 err
= ops
->enable(ops
, &req
, enable
);
264 mutex_unlock(&ptp
->pincfg_mux
);
267 case PTP_SYS_OFFSET_PRECISE
:
268 case PTP_SYS_OFFSET_PRECISE2
:
269 if (!ptp
->info
->getcrosststamp
) {
273 err
= ptp
->info
->getcrosststamp(ptp
->info
, &xtstamp
);
277 memset(&precise_offset
, 0, sizeof(precise_offset
));
278 ts
= ktime_to_timespec64(xtstamp
.device
);
279 precise_offset
.device
.sec
= ts
.tv_sec
;
280 precise_offset
.device
.nsec
= ts
.tv_nsec
;
281 ts
= ktime_to_timespec64(xtstamp
.sys_realtime
);
282 precise_offset
.sys_realtime
.sec
= ts
.tv_sec
;
283 precise_offset
.sys_realtime
.nsec
= ts
.tv_nsec
;
284 ts
= ktime_to_timespec64(xtstamp
.sys_monoraw
);
285 precise_offset
.sys_monoraw
.sec
= ts
.tv_sec
;
286 precise_offset
.sys_monoraw
.nsec
= ts
.tv_nsec
;
287 if (copy_to_user((void __user
*)arg
, &precise_offset
,
288 sizeof(precise_offset
)))
292 case PTP_SYS_OFFSET_EXTENDED
:
293 case PTP_SYS_OFFSET_EXTENDED2
:
294 if (!ptp
->info
->gettimex64
) {
298 extoff
= memdup_user((void __user
*)arg
, sizeof(*extoff
));
299 if (IS_ERR(extoff
)) {
300 err
= PTR_ERR(extoff
);
304 if (extoff
->n_samples
> PTP_MAX_SAMPLES
305 || extoff
->rsv
[0] || extoff
->rsv
[1] || extoff
->rsv
[2]) {
309 for (i
= 0; i
< extoff
->n_samples
; i
++) {
310 err
= ptp
->info
->gettimex64(ptp
->info
, &ts
, &sts
);
313 extoff
->ts
[i
][0].sec
= sts
.pre_ts
.tv_sec
;
314 extoff
->ts
[i
][0].nsec
= sts
.pre_ts
.tv_nsec
;
315 extoff
->ts
[i
][1].sec
= ts
.tv_sec
;
316 extoff
->ts
[i
][1].nsec
= ts
.tv_nsec
;
317 extoff
->ts
[i
][2].sec
= sts
.post_ts
.tv_sec
;
318 extoff
->ts
[i
][2].nsec
= sts
.post_ts
.tv_nsec
;
320 if (copy_to_user((void __user
*)arg
, extoff
, sizeof(*extoff
)))
325 case PTP_SYS_OFFSET2
:
326 sysoff
= memdup_user((void __user
*)arg
, sizeof(*sysoff
));
327 if (IS_ERR(sysoff
)) {
328 err
= PTR_ERR(sysoff
);
332 if (sysoff
->n_samples
> PTP_MAX_SAMPLES
) {
336 pct
= &sysoff
->ts
[0];
337 for (i
= 0; i
< sysoff
->n_samples
; i
++) {
338 ktime_get_real_ts64(&ts
);
339 pct
->sec
= ts
.tv_sec
;
340 pct
->nsec
= ts
.tv_nsec
;
343 err
= ops
->gettimex64(ops
, &ts
, NULL
);
345 err
= ops
->gettime64(ops
, &ts
);
348 pct
->sec
= ts
.tv_sec
;
349 pct
->nsec
= ts
.tv_nsec
;
352 ktime_get_real_ts64(&ts
);
353 pct
->sec
= ts
.tv_sec
;
354 pct
->nsec
= ts
.tv_nsec
;
355 if (copy_to_user((void __user
*)arg
, sysoff
, sizeof(*sysoff
)))
359 case PTP_PIN_GETFUNC
:
360 case PTP_PIN_GETFUNC2
:
361 if (copy_from_user(&pd
, (void __user
*)arg
, sizeof(pd
))) {
365 if ((pd
.rsv
[0] || pd
.rsv
[1] || pd
.rsv
[2]
366 || pd
.rsv
[3] || pd
.rsv
[4])
367 && cmd
== PTP_PIN_GETFUNC2
) {
370 } else if (cmd
== PTP_PIN_GETFUNC
) {
377 pin_index
= pd
.index
;
378 if (pin_index
>= ops
->n_pins
) {
382 pin_index
= array_index_nospec(pin_index
, ops
->n_pins
);
383 if (mutex_lock_interruptible(&ptp
->pincfg_mux
))
385 pd
= ops
->pin_config
[pin_index
];
386 mutex_unlock(&ptp
->pincfg_mux
);
387 if (!err
&& copy_to_user((void __user
*)arg
, &pd
, sizeof(pd
)))
391 case PTP_PIN_SETFUNC
:
392 case PTP_PIN_SETFUNC2
:
393 if (copy_from_user(&pd
, (void __user
*)arg
, sizeof(pd
))) {
397 if ((pd
.rsv
[0] || pd
.rsv
[1] || pd
.rsv
[2]
398 || pd
.rsv
[3] || pd
.rsv
[4])
399 && cmd
== PTP_PIN_SETFUNC2
) {
402 } else if (cmd
== PTP_PIN_SETFUNC
) {
409 pin_index
= pd
.index
;
410 if (pin_index
>= ops
->n_pins
) {
414 pin_index
= array_index_nospec(pin_index
, ops
->n_pins
);
415 if (mutex_lock_interruptible(&ptp
->pincfg_mux
))
417 err
= ptp_set_pinfunc(ptp
, pin_index
, pd
.func
, pd
.chan
);
418 mutex_unlock(&ptp
->pincfg_mux
);
432 __poll_t
ptp_poll(struct posix_clock
*pc
, struct file
*fp
, poll_table
*wait
)
434 struct ptp_clock
*ptp
= container_of(pc
, struct ptp_clock
, clock
);
436 poll_wait(fp
, &ptp
->tsev_wq
, wait
);
438 return queue_cnt(&ptp
->tsevq
) ? EPOLLIN
: 0;
441 #define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
443 ssize_t
ptp_read(struct posix_clock
*pc
,
444 uint rdflags
, char __user
*buf
, size_t cnt
)
446 struct ptp_clock
*ptp
= container_of(pc
, struct ptp_clock
, clock
);
447 struct timestamp_event_queue
*queue
= &ptp
->tsevq
;
448 struct ptp_extts_event
*event
;
453 if (cnt
% sizeof(struct ptp_extts_event
) != 0)
456 if (cnt
> EXTTS_BUFSIZE
)
459 cnt
= cnt
/ sizeof(struct ptp_extts_event
);
461 if (mutex_lock_interruptible(&ptp
->tsevq_mux
))
464 if (wait_event_interruptible(ptp
->tsev_wq
,
465 ptp
->defunct
|| queue_cnt(queue
))) {
466 mutex_unlock(&ptp
->tsevq_mux
);
471 mutex_unlock(&ptp
->tsevq_mux
);
475 event
= kmalloc(EXTTS_BUFSIZE
, GFP_KERNEL
);
477 mutex_unlock(&ptp
->tsevq_mux
);
481 spin_lock_irqsave(&queue
->lock
, flags
);
483 qcnt
= queue_cnt(queue
);
488 for (i
= 0; i
< cnt
; i
++) {
489 event
[i
] = queue
->buf
[queue
->head
];
490 queue
->head
= (queue
->head
+ 1) % PTP_MAX_TIMESTAMPS
;
493 spin_unlock_irqrestore(&queue
->lock
, flags
);
495 cnt
= cnt
* sizeof(struct ptp_extts_event
);
497 mutex_unlock(&ptp
->tsevq_mux
);
500 if (copy_to_user(buf
, event
, cnt
))