1 /* QLogic qede NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 const struct qed_eth_ptp_ops
*ops
;
36 struct ptp_clock_info clock_info
;
37 struct cyclecounter cc
;
38 struct timecounter tc
;
39 struct ptp_clock
*clock
;
40 struct work_struct work
;
41 struct qede_dev
*edev
;
42 struct sk_buff
*tx_skb
;
44 /* ptp spinlock is used for protecting the cycle/time counter fields
45 * and, also for serializing the qed PTP API invocations.
48 bool hw_ts_ioctl_called
;
55 * @ptp: the ptp clock structure
56 * @ppb: parts per billion adjustment from base
58 * Adjust the frequency of the ptp cycle counter by the
59 * indicated ppb from the base frequency.
61 static int qede_ptp_adjfreq(struct ptp_clock_info
*info
, s32 ppb
)
63 struct qede_ptp
*ptp
= container_of(info
, struct qede_ptp
, clock_info
);
64 struct qede_dev
*edev
= ptp
->edev
;
68 if (edev
->state
== QEDE_STATE_OPEN
) {
69 spin_lock_bh(&ptp
->lock
);
70 rc
= ptp
->ops
->adjfreq(edev
->cdev
, ppb
);
71 spin_unlock_bh(&ptp
->lock
);
73 DP_ERR(edev
, "PTP adjfreq called while interface is down\n");
81 static int qede_ptp_adjtime(struct ptp_clock_info
*info
, s64 delta
)
83 struct qede_dev
*edev
;
86 ptp
= container_of(info
, struct qede_ptp
, clock_info
);
89 DP_VERBOSE(edev
, QED_MSG_DEBUG
, "PTP adjtime called, delta = %llx\n",
92 spin_lock_bh(&ptp
->lock
);
93 timecounter_adjtime(&ptp
->tc
, delta
);
94 spin_unlock_bh(&ptp
->lock
);
99 static int qede_ptp_gettime(struct ptp_clock_info
*info
, struct timespec64
*ts
)
101 struct qede_dev
*edev
;
102 struct qede_ptp
*ptp
;
105 ptp
= container_of(info
, struct qede_ptp
, clock_info
);
108 spin_lock_bh(&ptp
->lock
);
109 ns
= timecounter_read(&ptp
->tc
);
110 spin_unlock_bh(&ptp
->lock
);
112 DP_VERBOSE(edev
, QED_MSG_DEBUG
, "PTP gettime called, ns = %llu\n", ns
);
114 *ts
= ns_to_timespec64(ns
);
119 static int qede_ptp_settime(struct ptp_clock_info
*info
,
120 const struct timespec64
*ts
)
122 struct qede_dev
*edev
;
123 struct qede_ptp
*ptp
;
126 ptp
= container_of(info
, struct qede_ptp
, clock_info
);
129 ns
= timespec64_to_ns(ts
);
131 DP_VERBOSE(edev
, QED_MSG_DEBUG
, "PTP settime called, ns = %llu\n", ns
);
133 /* Re-init the timecounter */
134 spin_lock_bh(&ptp
->lock
);
135 timecounter_init(&ptp
->tc
, &ptp
->cc
, ns
);
136 spin_unlock_bh(&ptp
->lock
);
141 /* Enable (or disable) ancillary features of the phc subsystem */
142 static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info
*info
,
143 struct ptp_clock_request
*rq
,
146 struct qede_dev
*edev
;
147 struct qede_ptp
*ptp
;
149 ptp
= container_of(info
, struct qede_ptp
, clock_info
);
152 DP_ERR(edev
, "PHC ancillary features are not supported\n");
157 static void qede_ptp_task(struct work_struct
*work
)
159 struct skb_shared_hwtstamps shhwtstamps
;
160 struct qede_dev
*edev
;
161 struct qede_ptp
*ptp
;
165 ptp
= container_of(work
, struct qede_ptp
, work
);
168 /* Read Tx timestamp registers */
169 spin_lock_bh(&ptp
->lock
);
170 rc
= ptp
->ops
->read_tx_ts(edev
->cdev
, ×tamp
);
171 spin_unlock_bh(&ptp
->lock
);
173 /* Reschedule to keep checking for a valid timestamp value */
174 schedule_work(&ptp
->work
);
178 ns
= timecounter_cyc2time(&ptp
->tc
, timestamp
);
179 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
180 shhwtstamps
.hwtstamp
= ns_to_ktime(ns
);
181 skb_tstamp_tx(ptp
->tx_skb
, &shhwtstamps
);
182 dev_kfree_skb_any(ptp
->tx_skb
);
184 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS
, &edev
->flags
);
186 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
187 "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
191 /* Read the PHC. This API is invoked with ptp_lock held. */
192 static u64
qede_ptp_read_cc(const struct cyclecounter
*cc
)
194 struct qede_dev
*edev
;
195 struct qede_ptp
*ptp
;
199 ptp
= container_of(cc
, struct qede_ptp
, cc
);
201 rc
= ptp
->ops
->read_cc(edev
->cdev
, &phc_cycles
);
203 WARN_ONCE(1, "PHC read err %d\n", rc
);
205 DP_VERBOSE(edev
, QED_MSG_DEBUG
, "PHC read cycles = %llu\n", phc_cycles
);
210 static int qede_ptp_cfg_filters(struct qede_dev
*edev
)
212 enum qed_ptp_hwtstamp_tx_type tx_type
= QED_PTP_HWTSTAMP_TX_ON
;
213 enum qed_ptp_filter_type rx_filter
= QED_PTP_FILTER_NONE
;
214 struct qede_ptp
*ptp
= edev
->ptp
;
219 if (!ptp
->hw_ts_ioctl_called
) {
220 DP_INFO(edev
, "TS IOCTL not called\n");
224 switch (ptp
->tx_type
) {
226 edev
->flags
|= QEDE_TX_TIMESTAMPING_EN
;
227 tx_type
= QED_PTP_HWTSTAMP_TX_ON
;
230 case HWTSTAMP_TX_OFF
:
231 edev
->flags
&= ~QEDE_TX_TIMESTAMPING_EN
;
232 tx_type
= QED_PTP_HWTSTAMP_TX_OFF
;
235 case HWTSTAMP_TX_ONESTEP_SYNC
:
236 DP_ERR(edev
, "One-step timestamping is not supported\n");
240 spin_lock_bh(&ptp
->lock
);
241 switch (ptp
->rx_filter
) {
242 case HWTSTAMP_FILTER_NONE
:
243 rx_filter
= QED_PTP_FILTER_NONE
;
245 case HWTSTAMP_FILTER_ALL
:
246 case HWTSTAMP_FILTER_SOME
:
247 case HWTSTAMP_FILTER_NTP_ALL
:
248 ptp
->rx_filter
= HWTSTAMP_FILTER_NONE
;
249 rx_filter
= QED_PTP_FILTER_ALL
;
251 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
252 ptp
->rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
253 rx_filter
= QED_PTP_FILTER_V1_L4_EVENT
;
255 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
256 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
257 ptp
->rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
258 /* Initialize PTP detection for UDP/IPv4 events */
259 rx_filter
= QED_PTP_FILTER_V1_L4_GEN
;
261 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
262 ptp
->rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
263 rx_filter
= QED_PTP_FILTER_V2_L4_EVENT
;
265 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
266 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
267 ptp
->rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
268 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
269 rx_filter
= QED_PTP_FILTER_V2_L4_GEN
;
271 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
272 ptp
->rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
273 rx_filter
= QED_PTP_FILTER_V2_L2_EVENT
;
275 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
276 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
277 ptp
->rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
278 /* Initialize PTP detection L2 events */
279 rx_filter
= QED_PTP_FILTER_V2_L2_GEN
;
281 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
282 ptp
->rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
283 rx_filter
= QED_PTP_FILTER_V2_EVENT
;
285 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
286 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
287 ptp
->rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
288 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
289 rx_filter
= QED_PTP_FILTER_V2_GEN
;
293 ptp
->ops
->cfg_filters(edev
->cdev
, rx_filter
, tx_type
);
295 spin_unlock_bh(&ptp
->lock
);
300 int qede_ptp_hw_ts(struct qede_dev
*edev
, struct ifreq
*ifr
)
302 struct hwtstamp_config config
;
303 struct qede_ptp
*ptp
;
310 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
313 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
314 "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
315 config
.tx_type
, config
.rx_filter
);
318 DP_ERR(edev
, "config.flags is reserved for future use\n");
322 ptp
->hw_ts_ioctl_called
= 1;
323 ptp
->tx_type
= config
.tx_type
;
324 ptp
->rx_filter
= config
.rx_filter
;
326 rc
= qede_ptp_cfg_filters(edev
);
330 config
.rx_filter
= ptp
->rx_filter
;
332 return copy_to_user(ifr
->ifr_data
, &config
,
333 sizeof(config
)) ? -EFAULT
: 0;
336 int qede_ptp_get_ts_info(struct qede_dev
*edev
, struct ethtool_ts_info
*info
)
338 struct qede_ptp
*ptp
= edev
->ptp
;
343 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
344 SOF_TIMESTAMPING_RX_SOFTWARE
|
345 SOF_TIMESTAMPING_SOFTWARE
|
346 SOF_TIMESTAMPING_TX_HARDWARE
|
347 SOF_TIMESTAMPING_RX_HARDWARE
|
348 SOF_TIMESTAMPING_RAW_HARDWARE
;
351 info
->phc_index
= ptp_clock_index(ptp
->clock
);
353 info
->phc_index
= -1;
355 info
->rx_filters
= BIT(HWTSTAMP_FILTER_NONE
) |
356 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
357 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC
) |
358 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
) |
359 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT
) |
360 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC
) |
361 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
) |
362 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
363 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC
) |
364 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
) |
365 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT
) |
366 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC
) |
367 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
);
369 info
->tx_types
= BIT(HWTSTAMP_TX_OFF
) | BIT(HWTSTAMP_TX_ON
);
374 void qede_ptp_disable(struct qede_dev
*edev
)
376 struct qede_ptp
*ptp
;
383 ptp_clock_unregister(ptp
->clock
);
387 /* Cancel PTP work queue. Should be done after the Tx queues are
388 * drained to prevent additional scheduling.
390 cancel_work_sync(&ptp
->work
);
392 dev_kfree_skb_any(ptp
->tx_skb
);
396 /* Disable PTP in HW */
397 spin_lock_bh(&ptp
->lock
);
398 ptp
->ops
->disable(edev
->cdev
);
399 spin_unlock_bh(&ptp
->lock
);
405 static int qede_ptp_init(struct qede_dev
*edev
, bool init_tc
)
407 struct qede_ptp
*ptp
;
414 spin_lock_init(&ptp
->lock
);
416 /* Configure PTP in HW */
417 rc
= ptp
->ops
->enable(edev
->cdev
);
419 DP_INFO(edev
, "PTP HW enable failed\n");
423 /* Init work queue for Tx timestamping */
424 INIT_WORK(&ptp
->work
, qede_ptp_task
);
426 /* Init cyclecounter and timecounter. This is done only in the first
427 * load. If done in every load, PTP application will fail when doing
428 * unload / load (e.g. MTU change) while it is running.
431 memset(&ptp
->cc
, 0, sizeof(ptp
->cc
));
432 ptp
->cc
.read
= qede_ptp_read_cc
;
433 ptp
->cc
.mask
= CYCLECOUNTER_MASK(64);
437 timecounter_init(&ptp
->tc
, &ptp
->cc
,
438 ktime_to_ns(ktime_get_real()));
444 int qede_ptp_enable(struct qede_dev
*edev
, bool init_tc
)
446 struct qede_ptp
*ptp
;
449 ptp
= kzalloc(sizeof(*ptp
), GFP_KERNEL
);
451 DP_INFO(edev
, "Failed to allocate struct for PTP\n");
456 ptp
->ops
= edev
->ops
->ptp
;
458 DP_INFO(edev
, "PTP enable failed\n");
465 rc
= qede_ptp_init(edev
, init_tc
);
469 qede_ptp_cfg_filters(edev
);
471 /* Fill the ptp_clock_info struct and register PTP clock */
472 ptp
->clock_info
.owner
= THIS_MODULE
;
473 snprintf(ptp
->clock_info
.name
, 16, "%s", edev
->ndev
->name
);
474 ptp
->clock_info
.max_adj
= QED_MAX_PHC_DRIFT_PPB
;
475 ptp
->clock_info
.n_alarm
= 0;
476 ptp
->clock_info
.n_ext_ts
= 0;
477 ptp
->clock_info
.n_per_out
= 0;
478 ptp
->clock_info
.pps
= 0;
479 ptp
->clock_info
.adjfreq
= qede_ptp_adjfreq
;
480 ptp
->clock_info
.adjtime
= qede_ptp_adjtime
;
481 ptp
->clock_info
.gettime64
= qede_ptp_gettime
;
482 ptp
->clock_info
.settime64
= qede_ptp_settime
;
483 ptp
->clock_info
.enable
= qede_ptp_ancillary_feature_enable
;
485 ptp
->clock
= ptp_clock_register(&ptp
->clock_info
, &edev
->pdev
->dev
);
486 if (IS_ERR(ptp
->clock
)) {
488 DP_ERR(edev
, "PTP clock registration failed\n");
495 qede_ptp_disable(edev
);
504 void qede_ptp_tx_ts(struct qede_dev
*edev
, struct sk_buff
*skb
)
506 struct qede_ptp
*ptp
;
512 if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS
, &edev
->flags
))
515 if (unlikely(!(edev
->flags
& QEDE_TX_TIMESTAMPING_EN
))) {
517 "Tx timestamping was not enabled, this packet will not be timestamped\n");
518 } else if (unlikely(ptp
->tx_skb
)) {
520 "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
522 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
523 /* schedule check for Tx timestamp */
524 ptp
->tx_skb
= skb_get(skb
);
525 schedule_work(&ptp
->work
);
529 void qede_ptp_rx_ts(struct qede_dev
*edev
, struct sk_buff
*skb
)
531 struct qede_ptp
*ptp
;
539 spin_lock_bh(&ptp
->lock
);
540 rc
= ptp
->ops
->read_rx_ts(edev
->cdev
, ×tamp
);
542 spin_unlock_bh(&ptp
->lock
);
543 DP_INFO(edev
, "Invalid Rx timestamp\n");
547 ns
= timecounter_cyc2time(&ptp
->tc
, timestamp
);
548 spin_unlock_bh(&ptp
->lock
);
549 skb_hwtstamps(skb
)->hwtstamp
= ns_to_ktime(ns
);
550 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
551 "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",