gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / net / ethernet / aquantia / atlantic / aq_ptp.c
blob58e8c641e8b3e40fc61925a77028c8ee0312a385
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Aquantia Corporation Network Driver
3 * Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved
4 */
6 /* File aq_ptp.c:
7 * Definition of functions for Linux PTP support.
8 */
10 #include <linux/ptp_clock_kernel.h>
11 #include <linux/ptp_classify.h>
12 #include <linux/interrupt.h>
13 #include <linux/clocksource.h>
15 #include "aq_nic.h"
16 #include "aq_ptp.h"
17 #include "aq_ring.h"
18 #include "aq_phy.h"
19 #include "aq_filters.h"
21 #define AQ_PTP_TX_TIMEOUT (HZ * 10)
23 #define POLL_SYNC_TIMER_MS 15
25 enum ptp_speed_offsets {
26 ptp_offset_idx_10 = 0,
27 ptp_offset_idx_100,
28 ptp_offset_idx_1000,
29 ptp_offset_idx_2500,
30 ptp_offset_idx_5000,
31 ptp_offset_idx_10000,
34 struct ptp_skb_ring {
35 struct sk_buff **buff;
36 spinlock_t lock;
37 unsigned int size;
38 unsigned int head;
39 unsigned int tail;
42 struct ptp_tx_timeout {
43 spinlock_t lock;
44 bool active;
45 unsigned long tx_start;
48 struct aq_ptp_s {
49 struct aq_nic_s *aq_nic;
50 struct hwtstamp_config hwtstamp_config;
51 spinlock_t ptp_lock;
52 spinlock_t ptp_ring_lock;
53 struct ptp_clock *ptp_clock;
54 struct ptp_clock_info ptp_info;
56 atomic_t offset_egress;
57 atomic_t offset_ingress;
59 struct aq_ring_param_s ptp_ring_param;
61 struct ptp_tx_timeout ptp_tx_timeout;
63 unsigned int idx_vector;
64 struct napi_struct napi;
66 struct aq_ring_s ptp_tx;
67 struct aq_ring_s ptp_rx;
68 struct aq_ring_s hwts_rx;
70 struct ptp_skb_ring skb_ring;
72 struct aq_rx_filter_l3l4 udp_filter;
73 struct aq_rx_filter_l2 eth_type_filter;
75 struct delayed_work poll_sync;
76 u32 poll_timeout_ms;
78 bool extts_pin_enabled;
79 u64 last_sync1588_ts;
82 struct ptp_tm_offset {
83 unsigned int mbps;
84 int egress;
85 int ingress;
88 static struct ptp_tm_offset ptp_offset[6];
90 void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps)
92 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
93 int i, egress, ingress;
95 if (!aq_ptp)
96 return;
98 egress = 0;
99 ingress = 0;
101 for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
102 if (mbps == ptp_offset[i].mbps) {
103 egress = ptp_offset[i].egress;
104 ingress = ptp_offset[i].ingress;
105 break;
109 atomic_set(&aq_ptp->offset_egress, egress);
110 atomic_set(&aq_ptp->offset_ingress, ingress);
113 static int __aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
115 unsigned int next_head = (ring->head + 1) % ring->size;
117 if (next_head == ring->tail)
118 return -ENOMEM;
120 ring->buff[ring->head] = skb_get(skb);
121 ring->head = next_head;
123 return 0;
126 static int aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
128 unsigned long flags;
129 int ret;
131 spin_lock_irqsave(&ring->lock, flags);
132 ret = __aq_ptp_skb_put(ring, skb);
133 spin_unlock_irqrestore(&ring->lock, flags);
135 return ret;
138 static struct sk_buff *__aq_ptp_skb_get(struct ptp_skb_ring *ring)
140 struct sk_buff *skb;
142 if (ring->tail == ring->head)
143 return NULL;
145 skb = ring->buff[ring->tail];
146 ring->tail = (ring->tail + 1) % ring->size;
148 return skb;
151 static struct sk_buff *aq_ptp_skb_get(struct ptp_skb_ring *ring)
153 unsigned long flags;
154 struct sk_buff *skb;
156 spin_lock_irqsave(&ring->lock, flags);
157 skb = __aq_ptp_skb_get(ring);
158 spin_unlock_irqrestore(&ring->lock, flags);
160 return skb;
163 static unsigned int aq_ptp_skb_buf_len(struct ptp_skb_ring *ring)
165 unsigned long flags;
166 unsigned int len;
168 spin_lock_irqsave(&ring->lock, flags);
169 len = (ring->head >= ring->tail) ?
170 ring->head - ring->tail :
171 ring->size - ring->tail + ring->head;
172 spin_unlock_irqrestore(&ring->lock, flags);
174 return len;
177 static int aq_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size)
179 struct sk_buff **buff = kmalloc(sizeof(*buff) * size, GFP_KERNEL);
181 if (!buff)
182 return -ENOMEM;
184 spin_lock_init(&ring->lock);
186 ring->buff = buff;
187 ring->size = size;
188 ring->head = 0;
189 ring->tail = 0;
191 return 0;
194 static void aq_ptp_skb_ring_clean(struct ptp_skb_ring *ring)
196 struct sk_buff *skb;
198 while ((skb = aq_ptp_skb_get(ring)) != NULL)
199 dev_kfree_skb_any(skb);
202 static void aq_ptp_skb_ring_release(struct ptp_skb_ring *ring)
204 if (ring->buff) {
205 aq_ptp_skb_ring_clean(ring);
206 kfree(ring->buff);
207 ring->buff = NULL;
211 static void aq_ptp_tx_timeout_init(struct ptp_tx_timeout *timeout)
213 spin_lock_init(&timeout->lock);
214 timeout->active = false;
217 static void aq_ptp_tx_timeout_start(struct aq_ptp_s *aq_ptp)
219 struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
220 unsigned long flags;
222 spin_lock_irqsave(&timeout->lock, flags);
223 timeout->active = true;
224 timeout->tx_start = jiffies;
225 spin_unlock_irqrestore(&timeout->lock, flags);
228 static void aq_ptp_tx_timeout_update(struct aq_ptp_s *aq_ptp)
230 if (!aq_ptp_skb_buf_len(&aq_ptp->skb_ring)) {
231 struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
232 unsigned long flags;
234 spin_lock_irqsave(&timeout->lock, flags);
235 timeout->active = false;
236 spin_unlock_irqrestore(&timeout->lock, flags);
240 static void aq_ptp_tx_timeout_check(struct aq_ptp_s *aq_ptp)
242 struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
243 unsigned long flags;
244 bool timeout_flag;
246 timeout_flag = false;
248 spin_lock_irqsave(&timeout->lock, flags);
249 if (timeout->active) {
250 timeout_flag = time_is_before_jiffies(timeout->tx_start +
251 AQ_PTP_TX_TIMEOUT);
252 /* reset active flag if timeout detected */
253 if (timeout_flag)
254 timeout->active = false;
256 spin_unlock_irqrestore(&timeout->lock, flags);
258 if (timeout_flag) {
259 aq_ptp_skb_ring_clean(&aq_ptp->skb_ring);
260 netdev_err(aq_ptp->aq_nic->ndev,
261 "PTP Timeout. Clearing Tx Timestamp SKBs\n");
265 /* aq_ptp_adjfine
266 * @ptp: the ptp clock structure
267 * @ppb: parts per billion adjustment from base
269 * adjust the frequency of the ptp cycle counter by the
270 * indicated ppb from the base frequency.
272 static int aq_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
274 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
275 struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
277 mutex_lock(&aq_nic->fwreq_mutex);
278 aq_nic->aq_hw_ops->hw_adj_clock_freq(aq_nic->aq_hw,
279 scaled_ppm_to_ppb(scaled_ppm));
280 mutex_unlock(&aq_nic->fwreq_mutex);
282 return 0;
285 /* aq_ptp_adjtime
286 * @ptp: the ptp clock structure
287 * @delta: offset to adjust the cycle counter by
289 * adjust the timer by resetting the timecounter structure.
291 static int aq_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
293 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
294 struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
295 unsigned long flags;
297 spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
298 aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, delta);
299 spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
301 return 0;
304 /* aq_ptp_gettime
305 * @ptp: the ptp clock structure
306 * @ts: timespec structure to hold the current time value
308 * read the timecounter and return the correct value on ns,
309 * after converting it into a struct timespec.
311 static int aq_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
313 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
314 struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
315 unsigned long flags;
316 u64 ns;
318 spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
319 aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &ns);
320 spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
322 *ts = ns_to_timespec64(ns);
324 return 0;
327 /* aq_ptp_settime
328 * @ptp: the ptp clock structure
329 * @ts: the timespec containing the new time for the cycle counter
331 * reset the timecounter to use a new base value instead of the kernel
332 * wall timer value.
334 static int aq_ptp_settime(struct ptp_clock_info *ptp,
335 const struct timespec64 *ts)
337 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
338 struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
339 unsigned long flags;
340 u64 ns = timespec64_to_ns(ts);
341 u64 now;
343 spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
344 aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &now);
345 aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, (s64)ns - (s64)now);
347 spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
349 return 0;
352 static void aq_ptp_convert_to_hwtstamp(struct aq_ptp_s *aq_ptp,
353 struct skb_shared_hwtstamps *hwtstamp,
354 u64 timestamp)
356 memset(hwtstamp, 0, sizeof(*hwtstamp));
357 hwtstamp->hwtstamp = ns_to_ktime(timestamp);
360 static int aq_ptp_hw_pin_conf(struct aq_nic_s *aq_nic, u32 pin_index, u64 start,
361 u64 period)
363 if (period)
364 netdev_dbg(aq_nic->ndev,
365 "Enable GPIO %d pulsing, start time %llu, period %u\n",
366 pin_index, start, (u32)period);
367 else
368 netdev_dbg(aq_nic->ndev,
369 "Disable GPIO %d pulsing, start time %llu, period %u\n",
370 pin_index, start, (u32)period);
372 /* Notify hardware of request to being sending pulses.
373 * If period is ZERO then pulsen is disabled.
375 mutex_lock(&aq_nic->fwreq_mutex);
376 aq_nic->aq_hw_ops->hw_gpio_pulse(aq_nic->aq_hw, pin_index,
377 start, (u32)period);
378 mutex_unlock(&aq_nic->fwreq_mutex);
380 return 0;
383 static int aq_ptp_perout_pin_configure(struct ptp_clock_info *ptp,
384 struct ptp_clock_request *rq, int on)
386 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
387 struct ptp_clock_time *t = &rq->perout.period;
388 struct ptp_clock_time *s = &rq->perout.start;
389 struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
390 u64 start, period;
391 u32 pin_index = rq->perout.index;
393 /* verify the request channel is there */
394 if (pin_index >= ptp->n_per_out)
395 return -EINVAL;
397 /* we cannot support periods greater
398 * than 4 seconds due to reg limit
400 if (t->sec > 4 || t->sec < 0)
401 return -ERANGE;
403 /* convert to unsigned 64b ns,
404 * verify we can put it in a 32b register
406 period = on ? t->sec * NSEC_PER_SEC + t->nsec : 0;
408 /* verify the value is in range supported by hardware */
409 if (period > U32_MAX)
410 return -ERANGE;
411 /* convert to unsigned 64b ns */
412 /* TODO convert to AQ time */
413 start = on ? s->sec * NSEC_PER_SEC + s->nsec : 0;
415 aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
417 return 0;
420 static int aq_ptp_pps_pin_configure(struct ptp_clock_info *ptp,
421 struct ptp_clock_request *rq, int on)
423 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
424 struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
425 u64 start, period;
426 u32 pin_index = 0;
427 u32 rest = 0;
429 /* verify the request channel is there */
430 if (pin_index >= ptp->n_per_out)
431 return -EINVAL;
433 aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &start);
434 div_u64_rem(start, NSEC_PER_SEC, &rest);
435 period = on ? NSEC_PER_SEC : 0; /* PPS - pulse per second */
436 start = on ? start - rest + NSEC_PER_SEC *
437 (rest > 990000000LL ? 2 : 1) : 0;
439 aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
441 return 0;
444 static void aq_ptp_extts_pin_ctrl(struct aq_ptp_s *aq_ptp)
446 struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
447 u32 enable = aq_ptp->extts_pin_enabled;
449 if (aq_nic->aq_hw_ops->hw_extts_gpio_enable)
450 aq_nic->aq_hw_ops->hw_extts_gpio_enable(aq_nic->aq_hw, 0,
451 enable);
454 static int aq_ptp_extts_pin_configure(struct ptp_clock_info *ptp,
455 struct ptp_clock_request *rq, int on)
457 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
459 u32 pin_index = rq->extts.index;
461 if (pin_index >= ptp->n_ext_ts)
462 return -EINVAL;
464 aq_ptp->extts_pin_enabled = !!on;
465 if (on) {
466 aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
467 cancel_delayed_work_sync(&aq_ptp->poll_sync);
468 schedule_delayed_work(&aq_ptp->poll_sync,
469 msecs_to_jiffies(aq_ptp->poll_timeout_ms));
472 aq_ptp_extts_pin_ctrl(aq_ptp);
473 return 0;
476 /* aq_ptp_gpio_feature_enable
477 * @ptp: the ptp clock structure
478 * @rq: the requested feature to change
479 * @on: whether to enable or disable the feature
481 static int aq_ptp_gpio_feature_enable(struct ptp_clock_info *ptp,
482 struct ptp_clock_request *rq, int on)
484 switch (rq->type) {
485 case PTP_CLK_REQ_EXTTS:
486 return aq_ptp_extts_pin_configure(ptp, rq, on);
487 case PTP_CLK_REQ_PEROUT:
488 return aq_ptp_perout_pin_configure(ptp, rq, on);
489 case PTP_CLK_REQ_PPS:
490 return aq_ptp_pps_pin_configure(ptp, rq, on);
491 default:
492 return -EOPNOTSUPP;
495 return 0;
498 /* aq_ptp_verify
499 * @ptp: the ptp clock structure
500 * @pin: index of the pin in question
501 * @func: the desired function to use
502 * @chan: the function channel index to use
504 static int aq_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
505 enum ptp_pin_function func, unsigned int chan)
507 /* verify the requested pin is there */
508 if (!ptp->pin_config || pin >= ptp->n_pins)
509 return -EINVAL;
511 /* enforce locked channels, no changing them */
512 if (chan != ptp->pin_config[pin].chan)
513 return -EINVAL;
515 /* we want to keep the functions locked as well */
516 if (func != ptp->pin_config[pin].func)
517 return -EINVAL;
519 return 0;
522 /* aq_ptp_tx_hwtstamp - utility function which checks for TX time stamp
523 * @adapter: the private adapter struct
525 * if the timestamp is valid, we convert it into the timecounter ns
526 * value, then store that result into the hwtstamps structure which
527 * is passed up the network stack
529 void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
531 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
532 struct sk_buff *skb = aq_ptp_skb_get(&aq_ptp->skb_ring);
533 struct skb_shared_hwtstamps hwtstamp;
535 if (!skb) {
536 netdev_err(aq_nic->ndev, "have timestamp but tx_queues empty\n");
537 return;
540 timestamp += atomic_read(&aq_ptp->offset_egress);
541 aq_ptp_convert_to_hwtstamp(aq_ptp, &hwtstamp, timestamp);
542 skb_tstamp_tx(skb, &hwtstamp);
543 dev_kfree_skb_any(skb);
545 aq_ptp_tx_timeout_update(aq_ptp);
548 /* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
549 * @adapter: pointer to adapter struct
550 * @skb: particular skb to send timestamp with
552 * if the timestamp is valid, we convert it into the timecounter ns
553 * value, then store that result into the hwtstamps structure which
554 * is passed up the network stack
556 static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
557 u64 timestamp)
559 timestamp -= atomic_read(&aq_ptp->offset_ingress);
560 aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
563 void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
564 struct hwtstamp_config *config)
566 *config = aq_ptp->hwtstamp_config;
569 static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp)
571 aq_ptp->udp_filter.cmd = HW_ATL_RX_ENABLE_FLTR_L3L4 |
572 HW_ATL_RX_ENABLE_CMP_PROT_L4 |
573 HW_ATL_RX_UDP |
574 HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
575 HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT |
576 HW_ATL_RX_ENABLE_QUEUE_L3L4 |
577 aq_ptp->ptp_rx.idx << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
578 aq_ptp->udp_filter.p_dst = PTP_EV_PORT;
580 aq_ptp->eth_type_filter.ethertype = ETH_P_1588;
581 aq_ptp->eth_type_filter.queue = aq_ptp->ptp_rx.idx;
584 int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
585 struct hwtstamp_config *config)
587 struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
588 const struct aq_hw_ops *hw_ops;
589 int err = 0;
591 hw_ops = aq_nic->aq_hw_ops;
592 if (config->tx_type == HWTSTAMP_TX_ON ||
593 config->rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT) {
594 aq_ptp_prepare_filters(aq_ptp);
595 if (hw_ops->hw_filter_l3l4_set) {
596 err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
597 &aq_ptp->udp_filter);
599 if (!err && hw_ops->hw_filter_l2_set) {
600 err = hw_ops->hw_filter_l2_set(aq_nic->aq_hw,
601 &aq_ptp->eth_type_filter);
603 aq_utils_obj_set(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
604 } else {
605 aq_ptp->udp_filter.cmd &= ~HW_ATL_RX_ENABLE_FLTR_L3L4;
606 if (hw_ops->hw_filter_l3l4_set) {
607 err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
608 &aq_ptp->udp_filter);
610 if (!err && hw_ops->hw_filter_l2_clear) {
611 err = hw_ops->hw_filter_l2_clear(aq_nic->aq_hw,
612 &aq_ptp->eth_type_filter);
614 aq_utils_obj_clear(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
617 if (err)
618 return -EREMOTEIO;
620 aq_ptp->hwtstamp_config = *config;
622 return 0;
625 bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
627 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
629 if (!aq_ptp)
630 return false;
632 return &aq_ptp->ptp_tx == ring ||
633 &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
636 u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
637 unsigned int len)
639 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
640 u64 timestamp = 0;
641 u16 ret = aq_nic->aq_hw_ops->rx_extract_ts(aq_nic->aq_hw,
642 p, len, &timestamp);
644 if (ret > 0)
645 aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
647 return ret;
650 static int aq_ptp_poll(struct napi_struct *napi, int budget)
652 struct aq_ptp_s *aq_ptp = container_of(napi, struct aq_ptp_s, napi);
653 struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
654 bool was_cleaned = false;
655 int work_done = 0;
656 int err;
658 /* Processing PTP TX traffic */
659 err = aq_nic->aq_hw_ops->hw_ring_tx_head_update(aq_nic->aq_hw,
660 &aq_ptp->ptp_tx);
661 if (err < 0)
662 goto err_exit;
664 if (aq_ptp->ptp_tx.sw_head != aq_ptp->ptp_tx.hw_head) {
665 aq_ring_tx_clean(&aq_ptp->ptp_tx);
667 was_cleaned = true;
670 /* Processing HW_TIMESTAMP RX traffic */
671 err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_receive(aq_nic->aq_hw,
672 &aq_ptp->hwts_rx);
673 if (err < 0)
674 goto err_exit;
676 if (aq_ptp->hwts_rx.sw_head != aq_ptp->hwts_rx.hw_head) {
677 aq_ring_hwts_rx_clean(&aq_ptp->hwts_rx, aq_nic);
679 err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
680 &aq_ptp->hwts_rx);
681 if (err < 0)
682 goto err_exit;
684 was_cleaned = true;
687 /* Processing PTP RX traffic */
688 err = aq_nic->aq_hw_ops->hw_ring_rx_receive(aq_nic->aq_hw,
689 &aq_ptp->ptp_rx);
690 if (err < 0)
691 goto err_exit;
693 if (aq_ptp->ptp_rx.sw_head != aq_ptp->ptp_rx.hw_head) {
694 unsigned int sw_tail_old;
696 err = aq_ring_rx_clean(&aq_ptp->ptp_rx, napi, &work_done, budget);
697 if (err < 0)
698 goto err_exit;
700 sw_tail_old = aq_ptp->ptp_rx.sw_tail;
701 err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
702 if (err < 0)
703 goto err_exit;
705 err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
706 &aq_ptp->ptp_rx,
707 sw_tail_old);
708 if (err < 0)
709 goto err_exit;
712 if (was_cleaned)
713 work_done = budget;
715 if (work_done < budget) {
716 napi_complete_done(napi, work_done);
717 aq_nic->aq_hw_ops->hw_irq_enable(aq_nic->aq_hw,
718 BIT_ULL(aq_ptp->ptp_ring_param.vec_idx));
721 err_exit:
722 return work_done;
725 static irqreturn_t aq_ptp_isr(int irq, void *private)
727 struct aq_ptp_s *aq_ptp = private;
728 int err = 0;
730 if (!aq_ptp) {
731 err = -EINVAL;
732 goto err_exit;
734 napi_schedule(&aq_ptp->napi);
736 err_exit:
737 return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
740 int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
742 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
743 struct aq_ring_s *ring = &aq_ptp->ptp_tx;
744 unsigned long irq_flags;
745 int err = NETDEV_TX_OK;
746 unsigned int frags;
748 if (skb->len <= 0) {
749 dev_kfree_skb_any(skb);
750 goto err_exit;
753 frags = skb_shinfo(skb)->nr_frags + 1;
754 /* Frags cannot be bigger 16KB
755 * because PTP usually works
756 * without Jumbo even in a background
758 if (frags > AQ_CFG_SKB_FRAGS_MAX || frags > aq_ring_avail_dx(ring)) {
759 /* Drop packet because it doesn't make sence to delay it */
760 dev_kfree_skb_any(skb);
761 goto err_exit;
764 err = aq_ptp_skb_put(&aq_ptp->skb_ring, skb);
765 if (err) {
766 netdev_err(aq_nic->ndev, "SKB Ring is overflow (%u)!\n",
767 ring->size);
768 return NETDEV_TX_BUSY;
770 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
771 aq_ptp_tx_timeout_start(aq_ptp);
772 skb_tx_timestamp(skb);
774 spin_lock_irqsave(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
775 frags = aq_nic_map_skb(aq_nic, skb, ring);
777 if (likely(frags)) {
778 err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw,
779 ring, frags);
780 if (err >= 0) {
781 ++ring->stats.tx.packets;
782 ring->stats.tx.bytes += skb->len;
784 } else {
785 err = NETDEV_TX_BUSY;
787 spin_unlock_irqrestore(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
789 err_exit:
790 return err;
793 void aq_ptp_service_task(struct aq_nic_s *aq_nic)
795 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
797 if (!aq_ptp)
798 return;
800 aq_ptp_tx_timeout_check(aq_ptp);
803 int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
805 struct pci_dev *pdev = aq_nic->pdev;
806 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
807 int err = 0;
809 if (!aq_ptp)
810 return 0;
812 if (pdev->msix_enabled || pdev->msi_enabled) {
813 err = request_irq(pci_irq_vector(pdev, aq_ptp->idx_vector),
814 aq_ptp_isr, 0, aq_nic->ndev->name, aq_ptp);
815 } else {
816 err = -EINVAL;
817 goto err_exit;
820 err_exit:
821 return err;
824 void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
826 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
827 struct pci_dev *pdev = aq_nic->pdev;
829 if (!aq_ptp)
830 return;
832 free_irq(pci_irq_vector(pdev, aq_ptp->idx_vector), aq_ptp);
835 int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
837 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
838 int err = 0;
840 if (!aq_ptp)
841 return 0;
843 err = aq_ring_init(&aq_ptp->ptp_tx);
844 if (err < 0)
845 goto err_exit;
846 err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw,
847 &aq_ptp->ptp_tx,
848 &aq_ptp->ptp_ring_param);
849 if (err < 0)
850 goto err_exit;
852 err = aq_ring_init(&aq_ptp->ptp_rx);
853 if (err < 0)
854 goto err_exit;
855 err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
856 &aq_ptp->ptp_rx,
857 &aq_ptp->ptp_ring_param);
858 if (err < 0)
859 goto err_exit;
861 err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
862 if (err < 0)
863 goto err_rx_free;
864 err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
865 &aq_ptp->ptp_rx,
866 0U);
867 if (err < 0)
868 goto err_rx_free;
870 err = aq_ring_init(&aq_ptp->hwts_rx);
871 if (err < 0)
872 goto err_rx_free;
873 err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
874 &aq_ptp->hwts_rx,
875 &aq_ptp->ptp_ring_param);
876 if (err < 0)
877 goto err_exit;
878 err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
879 &aq_ptp->hwts_rx);
880 if (err < 0)
881 goto err_exit;
883 return err;
885 err_rx_free:
886 aq_ring_rx_deinit(&aq_ptp->ptp_rx);
887 err_exit:
888 return err;
891 int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
893 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
894 int err = 0;
896 if (!aq_ptp)
897 return 0;
899 err = aq_nic->aq_hw_ops->hw_ring_tx_start(aq_nic->aq_hw, &aq_ptp->ptp_tx);
900 if (err < 0)
901 goto err_exit;
903 err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw, &aq_ptp->ptp_rx);
904 if (err < 0)
905 goto err_exit;
907 err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw,
908 &aq_ptp->hwts_rx);
909 if (err < 0)
910 goto err_exit;
912 napi_enable(&aq_ptp->napi);
914 err_exit:
915 return err;
918 void aq_ptp_ring_stop(struct aq_nic_s *aq_nic)
920 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
922 if (!aq_ptp)
923 return;
925 aq_nic->aq_hw_ops->hw_ring_tx_stop(aq_nic->aq_hw, &aq_ptp->ptp_tx);
926 aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->ptp_rx);
928 aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->hwts_rx);
930 napi_disable(&aq_ptp->napi);
933 void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
935 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
937 if (!aq_ptp || !aq_ptp->ptp_tx.aq_nic || !aq_ptp->ptp_rx.aq_nic)
938 return;
940 aq_ring_tx_clean(&aq_ptp->ptp_tx);
941 aq_ring_rx_deinit(&aq_ptp->ptp_rx);
944 #define PTP_8TC_RING_IDX 8
945 #define PTP_4TC_RING_IDX 16
946 #define PTP_HWST_RING_IDX 31
948 int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
950 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
951 unsigned int tx_ring_idx, rx_ring_idx;
952 struct aq_ring_s *hwts;
953 u32 tx_tc_mode, rx_tc_mode;
954 struct aq_ring_s *ring;
955 int err;
957 if (!aq_ptp)
958 return 0;
960 /* Index must to be 8 (8 TCs) or 16 (4 TCs).
961 * It depends from Traffic Class mode.
963 aq_nic->aq_hw_ops->hw_tx_tc_mode_get(aq_nic->aq_hw, &tx_tc_mode);
964 if (tx_tc_mode == 0)
965 tx_ring_idx = PTP_8TC_RING_IDX;
966 else
967 tx_ring_idx = PTP_4TC_RING_IDX;
969 ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
970 tx_ring_idx, &aq_nic->aq_nic_cfg);
971 if (!ring) {
972 err = -ENOMEM;
973 goto err_exit;
976 aq_nic->aq_hw_ops->hw_rx_tc_mode_get(aq_nic->aq_hw, &rx_tc_mode);
977 if (rx_tc_mode == 0)
978 rx_ring_idx = PTP_8TC_RING_IDX;
979 else
980 rx_ring_idx = PTP_4TC_RING_IDX;
982 ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
983 rx_ring_idx, &aq_nic->aq_nic_cfg);
984 if (!ring) {
985 err = -ENOMEM;
986 goto err_exit_ptp_tx;
989 hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
990 aq_nic->aq_nic_cfg.rxds,
991 aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
992 if (!hwts) {
993 err = -ENOMEM;
994 goto err_exit_ptp_rx;
997 err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
998 if (err != 0) {
999 err = -ENOMEM;
1000 goto err_exit_hwts_rx;
1003 aq_ptp->ptp_ring_param.vec_idx = aq_ptp->idx_vector;
1004 aq_ptp->ptp_ring_param.cpu = aq_ptp->ptp_ring_param.vec_idx +
1005 aq_nic_get_cfg(aq_nic)->aq_rss.base_cpu_number;
1006 cpumask_set_cpu(aq_ptp->ptp_ring_param.cpu,
1007 &aq_ptp->ptp_ring_param.affinity_mask);
1009 return 0;
1011 err_exit_hwts_rx:
1012 aq_ring_free(&aq_ptp->hwts_rx);
1013 err_exit_ptp_rx:
1014 aq_ring_free(&aq_ptp->ptp_rx);
1015 err_exit_ptp_tx:
1016 aq_ring_free(&aq_ptp->ptp_tx);
1017 err_exit:
1018 return err;
1021 void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
1023 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1025 if (!aq_ptp)
1026 return;
1028 aq_ring_free(&aq_ptp->ptp_tx);
1029 aq_ring_free(&aq_ptp->ptp_rx);
1030 aq_ring_free(&aq_ptp->hwts_rx);
1032 aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
1035 #define MAX_PTP_GPIO_COUNT 4
1037 static struct ptp_clock_info aq_ptp_clock = {
1038 .owner = THIS_MODULE,
1039 .name = "atlantic ptp",
1040 .max_adj = 999999999,
1041 .n_ext_ts = 0,
1042 .pps = 0,
1043 .adjfine = aq_ptp_adjfine,
1044 .adjtime = aq_ptp_adjtime,
1045 .gettime64 = aq_ptp_gettime,
1046 .settime64 = aq_ptp_settime,
1047 .n_per_out = 0,
1048 .enable = aq_ptp_gpio_feature_enable,
1049 .n_pins = 0,
1050 .verify = aq_ptp_verify,
1051 .pin_config = NULL,
1054 #define ptp_offset_init(__idx, __mbps, __egress, __ingress) do { \
1055 ptp_offset[__idx].mbps = (__mbps); \
1056 ptp_offset[__idx].egress = (__egress); \
1057 ptp_offset[__idx].ingress = (__ingress); } \
1058 while (0)
1060 static void aq_ptp_offset_init_from_fw(const struct hw_atl_ptp_offset *offsets)
1062 int i;
1064 /* Load offsets for PTP */
1065 for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
1066 switch (i) {
1067 /* 100M */
1068 case ptp_offset_idx_100:
1069 ptp_offset_init(i, 100,
1070 offsets->egress_100,
1071 offsets->ingress_100);
1072 break;
1073 /* 1G */
1074 case ptp_offset_idx_1000:
1075 ptp_offset_init(i, 1000,
1076 offsets->egress_1000,
1077 offsets->ingress_1000);
1078 break;
1079 /* 2.5G */
1080 case ptp_offset_idx_2500:
1081 ptp_offset_init(i, 2500,
1082 offsets->egress_2500,
1083 offsets->ingress_2500);
1084 break;
1085 /* 5G */
1086 case ptp_offset_idx_5000:
1087 ptp_offset_init(i, 5000,
1088 offsets->egress_5000,
1089 offsets->ingress_5000);
1090 break;
1091 /* 10G */
1092 case ptp_offset_idx_10000:
1093 ptp_offset_init(i, 10000,
1094 offsets->egress_10000,
1095 offsets->ingress_10000);
1096 break;
1101 static void aq_ptp_offset_init(const struct hw_atl_ptp_offset *offsets)
1103 memset(ptp_offset, 0, sizeof(ptp_offset));
1105 aq_ptp_offset_init_from_fw(offsets);
1108 static void aq_ptp_gpio_init(struct ptp_clock_info *info,
1109 struct hw_atl_info *hw_info)
1111 struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT];
1112 u32 extts_pin_cnt = 0;
1113 u32 out_pin_cnt = 0;
1114 u32 i;
1116 memset(pin_desc, 0, sizeof(pin_desc));
1118 for (i = 0; i < MAX_PTP_GPIO_COUNT - 1; i++) {
1119 if (hw_info->gpio_pin[i] ==
1120 (GPIO_PIN_FUNCTION_PTP0 + out_pin_cnt)) {
1121 snprintf(pin_desc[out_pin_cnt].name,
1122 sizeof(pin_desc[out_pin_cnt].name),
1123 "AQ_GPIO%d", i);
1124 pin_desc[out_pin_cnt].index = out_pin_cnt;
1125 pin_desc[out_pin_cnt].chan = out_pin_cnt;
1126 pin_desc[out_pin_cnt++].func = PTP_PF_PEROUT;
1130 info->n_per_out = out_pin_cnt;
1132 if (hw_info->caps_ex & BIT(CAPS_EX_PHY_CTRL_TS_PIN)) {
1133 extts_pin_cnt += 1;
1135 snprintf(pin_desc[out_pin_cnt].name,
1136 sizeof(pin_desc[out_pin_cnt].name),
1137 "AQ_GPIO%d", out_pin_cnt);
1138 pin_desc[out_pin_cnt].index = out_pin_cnt;
1139 pin_desc[out_pin_cnt].chan = 0;
1140 pin_desc[out_pin_cnt].func = PTP_PF_EXTTS;
1143 info->n_pins = out_pin_cnt + extts_pin_cnt;
1144 info->n_ext_ts = extts_pin_cnt;
1146 if (!info->n_pins)
1147 return;
1149 info->pin_config = kcalloc(info->n_pins, sizeof(struct ptp_pin_desc),
1150 GFP_KERNEL);
1152 if (!info->pin_config)
1153 return;
1155 memcpy(info->pin_config, &pin_desc,
1156 sizeof(struct ptp_pin_desc) * info->n_pins);
1159 void aq_ptp_clock_init(struct aq_nic_s *aq_nic)
1161 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1162 struct timespec64 ts;
1164 ktime_get_real_ts64(&ts);
1165 aq_ptp_settime(&aq_ptp->ptp_info, &ts);
1168 static void aq_ptp_poll_sync_work_cb(struct work_struct *w);
1170 int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
1172 struct hw_atl_utils_mbox mbox;
1173 struct ptp_clock *clock;
1174 struct aq_ptp_s *aq_ptp;
1175 int err = 0;
1177 if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) {
1178 aq_nic->aq_ptp = NULL;
1179 return 0;
1182 if (!aq_nic->aq_fw_ops->enable_ptp) {
1183 aq_nic->aq_ptp = NULL;
1184 return 0;
1187 hw_atl_utils_mpi_read_stats(aq_nic->aq_hw, &mbox);
1189 if (!(mbox.info.caps_ex & BIT(CAPS_EX_PHY_PTP_EN))) {
1190 aq_nic->aq_ptp = NULL;
1191 return 0;
1194 aq_ptp_offset_init(&mbox.info.ptp_offset);
1196 aq_ptp = kzalloc(sizeof(*aq_ptp), GFP_KERNEL);
1197 if (!aq_ptp) {
1198 err = -ENOMEM;
1199 goto err_exit;
1202 aq_ptp->aq_nic = aq_nic;
1204 spin_lock_init(&aq_ptp->ptp_lock);
1205 spin_lock_init(&aq_ptp->ptp_ring_lock);
1207 aq_ptp->ptp_info = aq_ptp_clock;
1208 aq_ptp_gpio_init(&aq_ptp->ptp_info, &mbox.info);
1209 clock = ptp_clock_register(&aq_ptp->ptp_info, &aq_nic->ndev->dev);
1210 if (IS_ERR(clock)) {
1211 netdev_err(aq_nic->ndev, "ptp_clock_register failed\n");
1212 err = PTR_ERR(clock);
1213 goto err_exit;
1215 aq_ptp->ptp_clock = clock;
1216 aq_ptp_tx_timeout_init(&aq_ptp->ptp_tx_timeout);
1218 atomic_set(&aq_ptp->offset_egress, 0);
1219 atomic_set(&aq_ptp->offset_ingress, 0);
1221 netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
1222 aq_ptp_poll, AQ_CFG_NAPI_WEIGHT);
1224 aq_ptp->idx_vector = idx_vec;
1226 aq_nic->aq_ptp = aq_ptp;
1228 /* enable ptp counter */
1229 aq_utils_obj_set(&aq_nic->aq_hw->flags, AQ_HW_PTP_AVAILABLE);
1230 mutex_lock(&aq_nic->fwreq_mutex);
1231 aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 1);
1232 aq_ptp_clock_init(aq_nic);
1233 mutex_unlock(&aq_nic->fwreq_mutex);
1235 INIT_DELAYED_WORK(&aq_ptp->poll_sync, &aq_ptp_poll_sync_work_cb);
1236 aq_ptp->eth_type_filter.location =
1237 aq_nic_reserve_filter(aq_nic, aq_rx_filter_ethertype);
1238 aq_ptp->udp_filter.location =
1239 aq_nic_reserve_filter(aq_nic, aq_rx_filter_l3l4);
1241 return 0;
1243 err_exit:
1244 if (aq_ptp)
1245 kfree(aq_ptp->ptp_info.pin_config);
1246 kfree(aq_ptp);
1247 aq_nic->aq_ptp = NULL;
1248 return err;
1251 void aq_ptp_unregister(struct aq_nic_s *aq_nic)
1253 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1255 if (!aq_ptp)
1256 return;
1258 ptp_clock_unregister(aq_ptp->ptp_clock);
1261 void aq_ptp_free(struct aq_nic_s *aq_nic)
1263 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1265 if (!aq_ptp)
1266 return;
1268 aq_nic_release_filter(aq_nic, aq_rx_filter_ethertype,
1269 aq_ptp->eth_type_filter.location);
1270 aq_nic_release_filter(aq_nic, aq_rx_filter_l3l4,
1271 aq_ptp->udp_filter.location);
1272 cancel_delayed_work_sync(&aq_ptp->poll_sync);
1273 /* disable ptp */
1274 mutex_lock(&aq_nic->fwreq_mutex);
1275 aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 0);
1276 mutex_unlock(&aq_nic->fwreq_mutex);
1278 kfree(aq_ptp->ptp_info.pin_config);
1280 netif_napi_del(&aq_ptp->napi);
1281 kfree(aq_ptp);
1282 aq_nic->aq_ptp = NULL;
1285 struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
1287 return aq_ptp->ptp_clock;
1290 /* PTP external GPIO nanoseconds count */
1291 static uint64_t aq_ptp_get_sync1588_ts(struct aq_nic_s *aq_nic)
1293 u64 ts = 0;
1295 if (aq_nic->aq_hw_ops->hw_get_sync_ts)
1296 aq_nic->aq_hw_ops->hw_get_sync_ts(aq_nic->aq_hw, &ts);
1298 return ts;
1301 static void aq_ptp_start_work(struct aq_ptp_s *aq_ptp)
1303 if (aq_ptp->extts_pin_enabled) {
1304 aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
1305 aq_ptp->last_sync1588_ts =
1306 aq_ptp_get_sync1588_ts(aq_ptp->aq_nic);
1307 schedule_delayed_work(&aq_ptp->poll_sync,
1308 msecs_to_jiffies(aq_ptp->poll_timeout_ms));
1312 int aq_ptp_link_change(struct aq_nic_s *aq_nic)
1314 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
1316 if (!aq_ptp)
1317 return 0;
1319 if (aq_nic->aq_hw->aq_link_status.mbps)
1320 aq_ptp_start_work(aq_ptp);
1321 else
1322 cancel_delayed_work_sync(&aq_ptp->poll_sync);
1324 return 0;
1327 static bool aq_ptp_sync_ts_updated(struct aq_ptp_s *aq_ptp, u64 *new_ts)
1329 struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
1330 u64 sync_ts2;
1331 u64 sync_ts;
1333 sync_ts = aq_ptp_get_sync1588_ts(aq_nic);
1335 if (sync_ts != aq_ptp->last_sync1588_ts) {
1336 sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
1337 if (sync_ts != sync_ts2) {
1338 sync_ts = sync_ts2;
1339 sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
1340 if (sync_ts != sync_ts2) {
1341 netdev_err(aq_nic->ndev,
1342 "%s: Unable to get correct GPIO TS",
1343 __func__);
1344 sync_ts = 0;
1348 *new_ts = sync_ts;
1349 return true;
1351 return false;
1354 static int aq_ptp_check_sync1588(struct aq_ptp_s *aq_ptp)
1356 struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
1357 u64 sync_ts;
1359 /* Sync1588 pin was triggered */
1360 if (aq_ptp_sync_ts_updated(aq_ptp, &sync_ts)) {
1361 if (aq_ptp->extts_pin_enabled) {
1362 struct ptp_clock_event ptp_event;
1363 u64 time = 0;
1365 aq_nic->aq_hw_ops->hw_ts_to_sys_clock(aq_nic->aq_hw,
1366 sync_ts, &time);
1367 ptp_event.index = aq_ptp->ptp_info.n_pins - 1;
1368 ptp_event.timestamp = time;
1370 ptp_event.type = PTP_CLOCK_EXTTS;
1371 ptp_clock_event(aq_ptp->ptp_clock, &ptp_event);
1374 aq_ptp->last_sync1588_ts = sync_ts;
1377 return 0;
1380 static void aq_ptp_poll_sync_work_cb(struct work_struct *w)
1382 struct delayed_work *dw = to_delayed_work(w);
1383 struct aq_ptp_s *aq_ptp = container_of(dw, struct aq_ptp_s, poll_sync);
1385 aq_ptp_check_sync1588(aq_ptp);
1387 if (aq_ptp->extts_pin_enabled) {
1388 unsigned long timeout = msecs_to_jiffies(aq_ptp->poll_timeout_ms);
1390 schedule_delayed_work(&aq_ptp->poll_sync, timeout);