Linux 4.16.11
[linux/fpc-iii.git] / drivers / net / ethernet / sfc / ef10.c
blob75fbf58e421c327f52bbed81d60a53e8c456d118
1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
10 #include "net_driver.h"
11 #include "ef10_regs.h"
12 #include "io.h"
13 #include "mcdi.h"
14 #include "mcdi_pcol.h"
15 #include "nic.h"
16 #include "workarounds.h"
17 #include "selftest.h"
18 #include "ef10_sriov.h"
19 #include <linux/in.h>
20 #include <linux/jhash.h>
21 #include <linux/wait.h>
22 #include <linux/workqueue.h>
24 /* Hardware control for EF10 architecture including 'Huntington'. */
26 #define EFX_EF10_DRVGEN_EV 7
27 enum {
28 EFX_EF10_TEST = 1,
29 EFX_EF10_REFILL,
32 /* The reserved RSS context value */
33 #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
34 /* The maximum size of a shared RSS context */
35 /* TODO: this should really be from the mcdi protocol export */
36 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
38 /* The filter table(s) are managed by firmware and we have write-only
39 * access. When removing filters we must identify them to the
40 * firmware by a 64-bit handle, but this is too wide for Linux kernel
41 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
42 * be able to tell in advance whether a requested insertion will
43 * replace an existing filter. Therefore we maintain a software hash
44 * table, which should be at least as large as the hardware hash
45 * table.
47 * Huntington has a single 8K filter table shared between all filter
48 * types and both ports.
50 #define HUNT_FILTER_TBL_ROWS 8192
52 #define EFX_EF10_FILTER_ID_INVALID 0xffff
54 #define EFX_EF10_FILTER_DEV_UC_MAX 32
55 #define EFX_EF10_FILTER_DEV_MC_MAX 256
57 /* VLAN list entry */
58 struct efx_ef10_vlan {
59 struct list_head list;
60 u16 vid;
63 enum efx_ef10_default_filters {
64 EFX_EF10_BCAST,
65 EFX_EF10_UCDEF,
66 EFX_EF10_MCDEF,
67 EFX_EF10_VXLAN4_UCDEF,
68 EFX_EF10_VXLAN4_MCDEF,
69 EFX_EF10_VXLAN6_UCDEF,
70 EFX_EF10_VXLAN6_MCDEF,
71 EFX_EF10_NVGRE4_UCDEF,
72 EFX_EF10_NVGRE4_MCDEF,
73 EFX_EF10_NVGRE6_UCDEF,
74 EFX_EF10_NVGRE6_MCDEF,
75 EFX_EF10_GENEVE4_UCDEF,
76 EFX_EF10_GENEVE4_MCDEF,
77 EFX_EF10_GENEVE6_UCDEF,
78 EFX_EF10_GENEVE6_MCDEF,
80 EFX_EF10_NUM_DEFAULT_FILTERS
83 /* Per-VLAN filters information */
84 struct efx_ef10_filter_vlan {
85 struct list_head list;
86 u16 vid;
87 u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
88 u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
89 u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
92 struct efx_ef10_dev_addr {
93 u8 addr[ETH_ALEN];
96 struct efx_ef10_filter_table {
97 /* The MCDI match masks supported by this fw & hw, in order of priority */
98 u32 rx_match_mcdi_flags[
99 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
100 unsigned int rx_match_count;
102 struct {
103 unsigned long spec; /* pointer to spec plus flag bits */
104 /* BUSY flag indicates that an update is in progress. AUTO_OLD is
105 * used to mark and sweep MAC filters for the device address lists.
107 #define EFX_EF10_FILTER_FLAG_BUSY 1UL
108 #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
109 #define EFX_EF10_FILTER_FLAGS 3UL
110 u64 handle; /* firmware handle */
111 } *entry;
112 wait_queue_head_t waitq;
113 /* Shadow of net_device address lists, guarded by mac_lock */
114 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
115 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
116 int dev_uc_count;
117 int dev_mc_count;
118 bool uc_promisc;
119 bool mc_promisc;
120 /* Whether in multicast promiscuous mode when last changed */
121 bool mc_promisc_last;
122 bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
123 bool vlan_filter;
124 struct list_head vlan_list;
127 /* An arbitrary search limit for the software hash table */
128 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
130 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
131 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
132 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
133 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
134 struct efx_ef10_filter_vlan *vlan);
135 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
136 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
138 static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id)
140 WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
141 return filter_id & (HUNT_FILTER_TBL_ROWS - 1);
144 static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id)
146 return filter_id / (HUNT_FILTER_TBL_ROWS * 2);
149 static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx)
151 return pri * HUNT_FILTER_TBL_ROWS * 2 + idx;
154 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
156 efx_dword_t reg;
158 efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
159 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
160 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
163 /* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
164 * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O
165 * bar; PFs use BAR 0/1 for memory.
167 static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
169 switch (efx->pci_dev->device) {
170 case 0x0b03: /* SFC9250 PF */
171 return 0;
172 default:
173 return 2;
177 /* All VFs use BAR 0/1 for memory */
178 static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
180 return 0;
183 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
185 int bar;
187 bar = efx->type->mem_bar(efx);
188 return resource_size(&efx->pci_dev->resource[bar]);
191 static bool efx_ef10_is_vf(struct efx_nic *efx)
193 return efx->type->is_vf;
196 static int efx_ef10_get_pf_index(struct efx_nic *efx)
198 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
199 struct efx_ef10_nic_data *nic_data = efx->nic_data;
200 size_t outlen;
201 int rc;
203 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
204 sizeof(outbuf), &outlen);
205 if (rc)
206 return rc;
207 if (outlen < sizeof(outbuf))
208 return -EIO;
210 nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
211 return 0;
214 #ifdef CONFIG_SFC_SRIOV
215 static int efx_ef10_get_vf_index(struct efx_nic *efx)
217 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
218 struct efx_ef10_nic_data *nic_data = efx->nic_data;
219 size_t outlen;
220 int rc;
222 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
223 sizeof(outbuf), &outlen);
224 if (rc)
225 return rc;
226 if (outlen < sizeof(outbuf))
227 return -EIO;
229 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
230 return 0;
232 #endif
234 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
236 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
237 struct efx_ef10_nic_data *nic_data = efx->nic_data;
238 size_t outlen;
239 int rc;
241 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
243 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
244 outbuf, sizeof(outbuf), &outlen);
245 if (rc)
246 return rc;
247 if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
248 netif_err(efx, drv, efx->net_dev,
249 "unable to read datapath firmware capabilities\n");
250 return -EIO;
253 nic_data->datapath_caps =
254 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
256 if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
257 nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
258 GET_CAPABILITIES_V2_OUT_FLAGS2);
259 nic_data->piobuf_size = MCDI_WORD(outbuf,
260 GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
261 } else {
262 nic_data->datapath_caps2 = 0;
263 nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
266 /* record the DPCPU firmware IDs to determine VEB vswitching support.
268 nic_data->rx_dpcpu_fw_id =
269 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
270 nic_data->tx_dpcpu_fw_id =
271 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
273 if (!(nic_data->datapath_caps &
274 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
275 netif_err(efx, probe, efx->net_dev,
276 "current firmware does not support an RX prefix\n");
277 return -ENODEV;
280 if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
281 u8 vi_window_mode = MCDI_BYTE(outbuf,
282 GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
284 switch (vi_window_mode) {
285 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
286 efx->vi_stride = 8192;
287 break;
288 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
289 efx->vi_stride = 16384;
290 break;
291 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
292 efx->vi_stride = 65536;
293 break;
294 default:
295 netif_err(efx, probe, efx->net_dev,
296 "Unrecognised VI window mode %d\n",
297 vi_window_mode);
298 return -EIO;
300 netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
301 efx->vi_stride);
302 } else {
303 /* keep default VI stride */
304 netif_dbg(efx, probe, efx->net_dev,
305 "firmware did not report VI window mode, assuming vi_stride = %u\n",
306 efx->vi_stride);
309 if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
310 efx->num_mac_stats = MCDI_WORD(outbuf,
311 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
312 netif_dbg(efx, probe, efx->net_dev,
313 "firmware reports num_mac_stats = %u\n",
314 efx->num_mac_stats);
315 } else {
316 /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
317 netif_dbg(efx, probe, efx->net_dev,
318 "firmware did not report num_mac_stats, assuming %u\n",
319 efx->num_mac_stats);
322 return 0;
325 static void efx_ef10_read_licensed_features(struct efx_nic *efx)
327 MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN);
328 MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN);
329 struct efx_ef10_nic_data *nic_data = efx->nic_data;
330 size_t outlen;
331 int rc;
333 MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP,
334 MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
335 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf),
336 outbuf, sizeof(outbuf), &outlen);
337 if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN))
338 return;
340 nic_data->licensed_features = MCDI_QWORD(outbuf,
341 LICENSING_V3_OUT_LICENSED_FEATURES);
344 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
346 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
347 int rc;
349 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
350 outbuf, sizeof(outbuf), NULL);
351 if (rc)
352 return rc;
353 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
354 return rc > 0 ? rc : -ERANGE;
357 static int efx_ef10_get_timer_workarounds(struct efx_nic *efx)
359 struct efx_ef10_nic_data *nic_data = efx->nic_data;
360 unsigned int implemented;
361 unsigned int enabled;
362 int rc;
364 nic_data->workaround_35388 = false;
365 nic_data->workaround_61265 = false;
367 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
369 if (rc == -ENOSYS) {
370 /* Firmware without GET_WORKAROUNDS - not a problem. */
371 rc = 0;
372 } else if (rc == 0) {
373 /* Bug61265 workaround is always enabled if implemented. */
374 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
375 nic_data->workaround_61265 = true;
377 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
378 nic_data->workaround_35388 = true;
379 } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
380 /* Workaround is implemented but not enabled.
381 * Try to enable it.
383 rc = efx_mcdi_set_workaround(efx,
384 MC_CMD_WORKAROUND_BUG35388,
385 true, NULL);
386 if (rc == 0)
387 nic_data->workaround_35388 = true;
388 /* If we failed to set the workaround just carry on. */
389 rc = 0;
393 netif_dbg(efx, probe, efx->net_dev,
394 "workaround for bug 35388 is %sabled\n",
395 nic_data->workaround_35388 ? "en" : "dis");
396 netif_dbg(efx, probe, efx->net_dev,
397 "workaround for bug 61265 is %sabled\n",
398 nic_data->workaround_61265 ? "en" : "dis");
400 return rc;
403 static void efx_ef10_process_timer_config(struct efx_nic *efx,
404 const efx_dword_t *data)
406 unsigned int max_count;
408 if (EFX_EF10_WORKAROUND_61265(efx)) {
409 efx->timer_quantum_ns = MCDI_DWORD(data,
410 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS);
411 efx->timer_max_ns = MCDI_DWORD(data,
412 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS);
413 } else if (EFX_EF10_WORKAROUND_35388(efx)) {
414 efx->timer_quantum_ns = MCDI_DWORD(data,
415 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT);
416 max_count = MCDI_DWORD(data,
417 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT);
418 efx->timer_max_ns = max_count * efx->timer_quantum_ns;
419 } else {
420 efx->timer_quantum_ns = MCDI_DWORD(data,
421 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT);
422 max_count = MCDI_DWORD(data,
423 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT);
424 efx->timer_max_ns = max_count * efx->timer_quantum_ns;
427 netif_dbg(efx, probe, efx->net_dev,
428 "got timer properties from MC: quantum %u ns; max %u ns\n",
429 efx->timer_quantum_ns, efx->timer_max_ns);
432 static int efx_ef10_get_timer_config(struct efx_nic *efx)
434 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN);
435 int rc;
437 rc = efx_ef10_get_timer_workarounds(efx);
438 if (rc)
439 return rc;
441 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0,
442 outbuf, sizeof(outbuf), NULL);
444 if (rc == 0) {
445 efx_ef10_process_timer_config(efx, outbuf);
446 } else if (rc == -ENOSYS || rc == -EPERM) {
447 /* Not available - fall back to Huntington defaults. */
448 unsigned int quantum;
450 rc = efx_ef10_get_sysclk_freq(efx);
451 if (rc < 0)
452 return rc;
454 quantum = 1536000 / rc; /* 1536 cycles */
455 efx->timer_quantum_ns = quantum;
456 efx->timer_max_ns = efx->type->timer_period_max * quantum;
457 rc = 0;
458 } else {
459 efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES,
460 MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN,
461 NULL, 0, rc);
464 return rc;
467 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
469 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
470 size_t outlen;
471 int rc;
473 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
475 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
476 outbuf, sizeof(outbuf), &outlen);
477 if (rc)
478 return rc;
479 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
480 return -EIO;
482 ether_addr_copy(mac_address,
483 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
484 return 0;
487 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
489 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
490 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
491 size_t outlen;
492 int num_addrs, rc;
494 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
495 EVB_PORT_ID_ASSIGNED);
496 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
497 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
499 if (rc)
500 return rc;
501 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
502 return -EIO;
504 num_addrs = MCDI_DWORD(outbuf,
505 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
507 WARN_ON(num_addrs != 1);
509 ether_addr_copy(mac_address,
510 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
512 return 0;
515 static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
516 struct device_attribute *attr,
517 char *buf)
519 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
521 return sprintf(buf, "%d\n",
522 ((efx->mcdi->fn_flags) &
523 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
524 ? 1 : 0);
527 static ssize_t efx_ef10_show_primary_flag(struct device *dev,
528 struct device_attribute *attr,
529 char *buf)
531 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
533 return sprintf(buf, "%d\n",
534 ((efx->mcdi->fn_flags) &
535 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
536 ? 1 : 0);
539 static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
541 struct efx_ef10_nic_data *nic_data = efx->nic_data;
542 struct efx_ef10_vlan *vlan;
544 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
546 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
547 if (vlan->vid == vid)
548 return vlan;
551 return NULL;
554 static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
556 struct efx_ef10_nic_data *nic_data = efx->nic_data;
557 struct efx_ef10_vlan *vlan;
558 int rc;
560 mutex_lock(&nic_data->vlan_lock);
562 vlan = efx_ef10_find_vlan(efx, vid);
563 if (vlan) {
564 /* We add VID 0 on init. 8021q adds it on module init
565 * for all interfaces with VLAN filtring feature.
567 if (vid == 0)
568 goto done_unlock;
569 netif_warn(efx, drv, efx->net_dev,
570 "VLAN %u already added\n", vid);
571 rc = -EALREADY;
572 goto fail_exist;
575 rc = -ENOMEM;
576 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
577 if (!vlan)
578 goto fail_alloc;
580 vlan->vid = vid;
582 list_add_tail(&vlan->list, &nic_data->vlan_list);
584 if (efx->filter_state) {
585 mutex_lock(&efx->mac_lock);
586 down_write(&efx->filter_sem);
587 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
588 up_write(&efx->filter_sem);
589 mutex_unlock(&efx->mac_lock);
590 if (rc)
591 goto fail_filter_add_vlan;
594 done_unlock:
595 mutex_unlock(&nic_data->vlan_lock);
596 return 0;
598 fail_filter_add_vlan:
599 list_del(&vlan->list);
600 kfree(vlan);
601 fail_alloc:
602 fail_exist:
603 mutex_unlock(&nic_data->vlan_lock);
604 return rc;
607 static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
608 struct efx_ef10_vlan *vlan)
610 struct efx_ef10_nic_data *nic_data = efx->nic_data;
612 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
614 if (efx->filter_state) {
615 down_write(&efx->filter_sem);
616 efx_ef10_filter_del_vlan(efx, vlan->vid);
617 up_write(&efx->filter_sem);
620 list_del(&vlan->list);
621 kfree(vlan);
624 static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
626 struct efx_ef10_nic_data *nic_data = efx->nic_data;
627 struct efx_ef10_vlan *vlan;
628 int rc = 0;
630 /* 8021q removes VID 0 on module unload for all interfaces
631 * with VLAN filtering feature. We need to keep it to receive
632 * untagged traffic.
634 if (vid == 0)
635 return 0;
637 mutex_lock(&nic_data->vlan_lock);
639 vlan = efx_ef10_find_vlan(efx, vid);
640 if (!vlan) {
641 netif_err(efx, drv, efx->net_dev,
642 "VLAN %u to be deleted not found\n", vid);
643 rc = -ENOENT;
644 } else {
645 efx_ef10_del_vlan_internal(efx, vlan);
648 mutex_unlock(&nic_data->vlan_lock);
650 return rc;
653 static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
655 struct efx_ef10_nic_data *nic_data = efx->nic_data;
656 struct efx_ef10_vlan *vlan, *next_vlan;
658 mutex_lock(&nic_data->vlan_lock);
659 list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
660 efx_ef10_del_vlan_internal(efx, vlan);
661 mutex_unlock(&nic_data->vlan_lock);
664 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
665 NULL);
666 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
668 static int efx_ef10_probe(struct efx_nic *efx)
670 struct efx_ef10_nic_data *nic_data;
671 int i, rc;
673 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
674 if (!nic_data)
675 return -ENOMEM;
676 efx->nic_data = nic_data;
678 /* we assume later that we can copy from this buffer in dwords */
679 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
681 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
682 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
683 if (rc)
684 goto fail1;
686 /* Get the MC's warm boot count. In case it's rebooting right
687 * now, be prepared to retry.
689 i = 0;
690 for (;;) {
691 rc = efx_ef10_get_warm_boot_count(efx);
692 if (rc >= 0)
693 break;
694 if (++i == 5)
695 goto fail2;
696 ssleep(1);
698 nic_data->warm_boot_count = rc;
700 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
702 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
704 /* In case we're recovering from a crash (kexec), we want to
705 * cancel any outstanding request by the previous user of this
706 * function. We send a special message using the least
707 * significant bits of the 'high' (doorbell) register.
709 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
711 rc = efx_mcdi_init(efx);
712 if (rc)
713 goto fail2;
715 mutex_init(&nic_data->udp_tunnels_lock);
717 /* Reset (most) configuration for this function */
718 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
719 if (rc)
720 goto fail3;
722 /* Enable event logging */
723 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
724 if (rc)
725 goto fail3;
727 rc = device_create_file(&efx->pci_dev->dev,
728 &dev_attr_link_control_flag);
729 if (rc)
730 goto fail3;
732 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
733 if (rc)
734 goto fail4;
736 rc = efx_ef10_get_pf_index(efx);
737 if (rc)
738 goto fail5;
740 rc = efx_ef10_init_datapath_caps(efx);
741 if (rc < 0)
742 goto fail5;
744 efx_ef10_read_licensed_features(efx);
746 /* We can have one VI for each vi_stride-byte region.
747 * However, until we use TX option descriptors we need two TX queues
748 * per channel.
750 efx->max_channels = min_t(unsigned int,
751 EFX_MAX_CHANNELS,
752 efx_ef10_mem_map_size(efx) /
753 (efx->vi_stride * EFX_TXQ_TYPES));
754 efx->max_tx_channels = efx->max_channels;
755 if (WARN_ON(efx->max_channels == 0)) {
756 rc = -EIO;
757 goto fail5;
760 efx->rx_packet_len_offset =
761 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
763 if (nic_data->datapath_caps &
764 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN))
765 efx->net_dev->hw_features |= NETIF_F_RXFCS;
767 rc = efx_mcdi_port_get_number(efx);
768 if (rc < 0)
769 goto fail5;
770 efx->port_num = rc;
772 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
773 if (rc)
774 goto fail5;
776 rc = efx_ef10_get_timer_config(efx);
777 if (rc < 0)
778 goto fail5;
780 rc = efx_mcdi_mon_probe(efx);
781 if (rc && rc != -EPERM)
782 goto fail5;
784 efx_ptp_defer_probe_with_channel(efx);
786 #ifdef CONFIG_SFC_SRIOV
787 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
788 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
789 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
791 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
792 } else
793 #endif
794 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
796 INIT_LIST_HEAD(&nic_data->vlan_list);
797 mutex_init(&nic_data->vlan_lock);
799 /* Add unspecified VID to support VLAN filtering being disabled */
800 rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
801 if (rc)
802 goto fail_add_vid_unspec;
804 /* If VLAN filtering is enabled, we need VID 0 to get untagged
805 * traffic. It is added automatically if 8021q module is loaded,
806 * but we can't rely on it since module may be not loaded.
808 rc = efx_ef10_add_vlan(efx, 0);
809 if (rc)
810 goto fail_add_vid_0;
812 return 0;
814 fail_add_vid_0:
815 efx_ef10_cleanup_vlans(efx);
816 fail_add_vid_unspec:
817 mutex_destroy(&nic_data->vlan_lock);
818 efx_ptp_remove(efx);
819 efx_mcdi_mon_remove(efx);
820 fail5:
821 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
822 fail4:
823 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
824 fail3:
825 efx_mcdi_detach(efx);
827 mutex_lock(&nic_data->udp_tunnels_lock);
828 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
829 (void)efx_ef10_set_udp_tnl_ports(efx, true);
830 mutex_unlock(&nic_data->udp_tunnels_lock);
831 mutex_destroy(&nic_data->udp_tunnels_lock);
833 efx_mcdi_fini(efx);
834 fail2:
835 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
836 fail1:
837 kfree(nic_data);
838 efx->nic_data = NULL;
839 return rc;
842 static int efx_ef10_free_vis(struct efx_nic *efx)
844 MCDI_DECLARE_BUF_ERR(outbuf);
845 size_t outlen;
846 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
847 outbuf, sizeof(outbuf), &outlen);
849 /* -EALREADY means nothing to free, so ignore */
850 if (rc == -EALREADY)
851 rc = 0;
852 if (rc)
853 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
854 rc);
855 return rc;
858 #ifdef EFX_USE_PIO
860 static void efx_ef10_free_piobufs(struct efx_nic *efx)
862 struct efx_ef10_nic_data *nic_data = efx->nic_data;
863 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
864 unsigned int i;
865 int rc;
867 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
869 for (i = 0; i < nic_data->n_piobufs; i++) {
870 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
871 nic_data->piobuf_handle[i]);
872 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
873 NULL, 0, NULL);
874 WARN_ON(rc);
877 nic_data->n_piobufs = 0;
880 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
882 struct efx_ef10_nic_data *nic_data = efx->nic_data;
883 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
884 unsigned int i;
885 size_t outlen;
886 int rc = 0;
888 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
890 for (i = 0; i < n; i++) {
891 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
892 outbuf, sizeof(outbuf), &outlen);
893 if (rc) {
894 /* Don't display the MC error if we didn't have space
895 * for a VF.
897 if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
898 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
899 0, outbuf, outlen, rc);
900 break;
902 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
903 rc = -EIO;
904 break;
906 nic_data->piobuf_handle[i] =
907 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
908 netif_dbg(efx, probe, efx->net_dev,
909 "allocated PIO buffer %u handle %x\n", i,
910 nic_data->piobuf_handle[i]);
913 nic_data->n_piobufs = i;
914 if (rc)
915 efx_ef10_free_piobufs(efx);
916 return rc;
919 static int efx_ef10_link_piobufs(struct efx_nic *efx)
921 struct efx_ef10_nic_data *nic_data = efx->nic_data;
922 MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN);
923 struct efx_channel *channel;
924 struct efx_tx_queue *tx_queue;
925 unsigned int offset, index;
926 int rc;
928 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
929 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
931 /* Link a buffer to each VI in the write-combining mapping */
932 for (index = 0; index < nic_data->n_piobufs; ++index) {
933 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
934 nic_data->piobuf_handle[index]);
935 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
936 nic_data->pio_write_vi_base + index);
937 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
938 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
939 NULL, 0, NULL);
940 if (rc) {
941 netif_err(efx, drv, efx->net_dev,
942 "failed to link VI %u to PIO buffer %u (%d)\n",
943 nic_data->pio_write_vi_base + index, index,
944 rc);
945 goto fail;
947 netif_dbg(efx, probe, efx->net_dev,
948 "linked VI %u to PIO buffer %u\n",
949 nic_data->pio_write_vi_base + index, index);
952 /* Link a buffer to each TX queue */
953 efx_for_each_channel(channel, efx) {
954 /* Extra channels, even those with TXQs (PTP), do not require
955 * PIO resources.
957 if (!channel->type->want_pio)
958 continue;
959 efx_for_each_channel_tx_queue(tx_queue, channel) {
960 /* We assign the PIO buffers to queues in
961 * reverse order to allow for the following
962 * special case.
964 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
965 tx_queue->channel->channel - 1) *
966 efx_piobuf_size);
967 index = offset / nic_data->piobuf_size;
968 offset = offset % nic_data->piobuf_size;
970 /* When the host page size is 4K, the first
971 * host page in the WC mapping may be within
972 * the same VI page as the last TX queue. We
973 * can only link one buffer to each VI.
975 if (tx_queue->queue == nic_data->pio_write_vi_base) {
976 BUG_ON(index != 0);
977 rc = 0;
978 } else {
979 MCDI_SET_DWORD(inbuf,
980 LINK_PIOBUF_IN_PIOBUF_HANDLE,
981 nic_data->piobuf_handle[index]);
982 MCDI_SET_DWORD(inbuf,
983 LINK_PIOBUF_IN_TXQ_INSTANCE,
984 tx_queue->queue);
985 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
986 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
987 NULL, 0, NULL);
990 if (rc) {
991 /* This is non-fatal; the TX path just
992 * won't use PIO for this queue
994 netif_err(efx, drv, efx->net_dev,
995 "failed to link VI %u to PIO buffer %u (%d)\n",
996 tx_queue->queue, index, rc);
997 tx_queue->piobuf = NULL;
998 } else {
999 tx_queue->piobuf =
1000 nic_data->pio_write_base +
1001 index * efx->vi_stride + offset;
1002 tx_queue->piobuf_offset = offset;
1003 netif_dbg(efx, probe, efx->net_dev,
1004 "linked VI %u to PIO buffer %u offset %x addr %p\n",
1005 tx_queue->queue, index,
1006 tx_queue->piobuf_offset,
1007 tx_queue->piobuf);
1012 return 0;
1014 fail:
1015 /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same
1016 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter.
1018 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN);
1019 while (index--) {
1020 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
1021 nic_data->pio_write_vi_base + index);
1022 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
1023 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
1024 NULL, 0, NULL);
1026 return rc;
1029 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
1031 struct efx_channel *channel;
1032 struct efx_tx_queue *tx_queue;
1034 /* All our existing PIO buffers went away */
1035 efx_for_each_channel(channel, efx)
1036 efx_for_each_channel_tx_queue(tx_queue, channel)
1037 tx_queue->piobuf = NULL;
1040 #else /* !EFX_USE_PIO */
1042 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
1044 return n == 0 ? 0 : -ENOBUFS;
1047 static int efx_ef10_link_piobufs(struct efx_nic *efx)
1049 return 0;
1052 static void efx_ef10_free_piobufs(struct efx_nic *efx)
1056 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
1060 #endif /* EFX_USE_PIO */
1062 static void efx_ef10_remove(struct efx_nic *efx)
1064 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1065 int rc;
1067 #ifdef CONFIG_SFC_SRIOV
1068 struct efx_ef10_nic_data *nic_data_pf;
1069 struct pci_dev *pci_dev_pf;
1070 struct efx_nic *efx_pf;
1071 struct ef10_vf *vf;
1073 if (efx->pci_dev->is_virtfn) {
1074 pci_dev_pf = efx->pci_dev->physfn;
1075 if (pci_dev_pf) {
1076 efx_pf = pci_get_drvdata(pci_dev_pf);
1077 nic_data_pf = efx_pf->nic_data;
1078 vf = nic_data_pf->vf + nic_data->vf_index;
1079 vf->efx = NULL;
1080 } else
1081 netif_info(efx, drv, efx->net_dev,
1082 "Could not get the PF id from VF\n");
1084 #endif
1086 efx_ef10_cleanup_vlans(efx);
1087 mutex_destroy(&nic_data->vlan_lock);
1089 efx_ptp_remove(efx);
1091 efx_mcdi_mon_remove(efx);
1093 efx_ef10_rx_free_indir_table(efx);
1095 if (nic_data->wc_membase)
1096 iounmap(nic_data->wc_membase);
1098 rc = efx_ef10_free_vis(efx);
1099 WARN_ON(rc != 0);
1101 if (!nic_data->must_restore_piobufs)
1102 efx_ef10_free_piobufs(efx);
1104 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
1105 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
1107 efx_mcdi_detach(efx);
1109 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
1110 mutex_lock(&nic_data->udp_tunnels_lock);
1111 (void)efx_ef10_set_udp_tnl_ports(efx, true);
1112 mutex_unlock(&nic_data->udp_tunnels_lock);
1114 mutex_destroy(&nic_data->udp_tunnels_lock);
1116 efx_mcdi_fini(efx);
1117 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
1118 kfree(nic_data);
1121 static int efx_ef10_probe_pf(struct efx_nic *efx)
1123 return efx_ef10_probe(efx);
1126 int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
1127 u32 *port_flags, u32 *vadaptor_flags,
1128 unsigned int *vlan_tags)
1130 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1131 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
1132 MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
1133 size_t outlen;
1134 int rc;
1136 if (nic_data->datapath_caps &
1137 (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
1138 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
1139 port_id);
1141 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
1142 outbuf, sizeof(outbuf), &outlen);
1143 if (rc)
1144 return rc;
1146 if (outlen < sizeof(outbuf)) {
1147 rc = -EIO;
1148 return rc;
1152 if (port_flags)
1153 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
1154 if (vadaptor_flags)
1155 *vadaptor_flags =
1156 MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
1157 if (vlan_tags)
1158 *vlan_tags =
1159 MCDI_DWORD(outbuf,
1160 VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
1162 return 0;
1165 int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
1167 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
1169 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
1170 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
1171 NULL, 0, NULL);
1174 int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
1176 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
1178 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
1179 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
1180 NULL, 0, NULL);
1183 int efx_ef10_vport_add_mac(struct efx_nic *efx,
1184 unsigned int port_id, u8 *mac)
1186 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
1188 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
1189 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
1191 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
1192 sizeof(inbuf), NULL, 0, NULL);
1195 int efx_ef10_vport_del_mac(struct efx_nic *efx,
1196 unsigned int port_id, u8 *mac)
1198 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
1200 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
1201 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
1203 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
1204 sizeof(inbuf), NULL, 0, NULL);
1207 #ifdef CONFIG_SFC_SRIOV
1208 static int efx_ef10_probe_vf(struct efx_nic *efx)
1210 int rc;
1211 struct pci_dev *pci_dev_pf;
1213 /* If the parent PF has no VF data structure, it doesn't know about this
1214 * VF so fail probe. The VF needs to be re-created. This can happen
1215 * if the PF driver is unloaded while the VF is assigned to a guest.
1217 pci_dev_pf = efx->pci_dev->physfn;
1218 if (pci_dev_pf) {
1219 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
1220 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
1222 if (!nic_data_pf->vf) {
1223 netif_info(efx, drv, efx->net_dev,
1224 "The VF cannot link to its parent PF; "
1225 "please destroy and re-create the VF\n");
1226 return -EBUSY;
1230 rc = efx_ef10_probe(efx);
1231 if (rc)
1232 return rc;
1234 rc = efx_ef10_get_vf_index(efx);
1235 if (rc)
1236 goto fail;
1238 if (efx->pci_dev->is_virtfn) {
1239 if (efx->pci_dev->physfn) {
1240 struct efx_nic *efx_pf =
1241 pci_get_drvdata(efx->pci_dev->physfn);
1242 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
1243 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1245 nic_data_p->vf[nic_data->vf_index].efx = efx;
1246 nic_data_p->vf[nic_data->vf_index].pci_dev =
1247 efx->pci_dev;
1248 } else
1249 netif_info(efx, drv, efx->net_dev,
1250 "Could not get the PF id from VF\n");
1253 return 0;
1255 fail:
1256 efx_ef10_remove(efx);
1257 return rc;
1259 #else
1260 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
1262 return 0;
1264 #endif
1266 static int efx_ef10_alloc_vis(struct efx_nic *efx,
1267 unsigned int min_vis, unsigned int max_vis)
1269 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
1270 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
1271 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1272 size_t outlen;
1273 int rc;
1275 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
1276 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
1277 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
1278 outbuf, sizeof(outbuf), &outlen);
1279 if (rc != 0)
1280 return rc;
1282 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
1283 return -EIO;
1285 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
1286 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
1288 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
1289 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
1290 return 0;
1293 /* Note that the failure path of this function does not free
1294 * resources, as this will be done by efx_ef10_remove().
1296 static int efx_ef10_dimension_resources(struct efx_nic *efx)
1298 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1299 unsigned int uc_mem_map_size, wc_mem_map_size;
1300 unsigned int min_vis = max(EFX_TXQ_TYPES,
1301 efx_separate_tx_channels ? 2 : 1);
1302 unsigned int channel_vis, pio_write_vi_base, max_vis;
1303 void __iomem *membase;
1304 int rc;
1306 channel_vis = max(efx->n_channels,
1307 (efx->n_tx_channels + efx->n_extra_tx_channels) *
1308 EFX_TXQ_TYPES);
1310 #ifdef EFX_USE_PIO
1311 /* Try to allocate PIO buffers if wanted and if the full
1312 * number of PIO buffers would be sufficient to allocate one
1313 * copy-buffer per TX channel. Failure is non-fatal, as there
1314 * are only a small number of PIO buffers shared between all
1315 * functions of the controller.
1317 if (efx_piobuf_size != 0 &&
1318 nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
1319 efx->n_tx_channels) {
1320 unsigned int n_piobufs =
1321 DIV_ROUND_UP(efx->n_tx_channels,
1322 nic_data->piobuf_size / efx_piobuf_size);
1324 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
1325 if (rc == -ENOSPC)
1326 netif_dbg(efx, probe, efx->net_dev,
1327 "out of PIO buffers; cannot allocate more\n");
1328 else if (rc == -EPERM)
1329 netif_dbg(efx, probe, efx->net_dev,
1330 "not permitted to allocate PIO buffers\n");
1331 else if (rc)
1332 netif_err(efx, probe, efx->net_dev,
1333 "failed to allocate PIO buffers (%d)\n", rc);
1334 else
1335 netif_dbg(efx, probe, efx->net_dev,
1336 "allocated %u PIO buffers\n", n_piobufs);
1338 #else
1339 nic_data->n_piobufs = 0;
1340 #endif
1342 /* PIO buffers should be mapped with write-combining enabled,
1343 * and we want to make single UC and WC mappings rather than
1344 * several of each (in fact that's the only option if host
1345 * page size is >4K). So we may allocate some extra VIs just
1346 * for writing PIO buffers through.
1348 * The UC mapping contains (channel_vis - 1) complete VIs and the
1349 * first 4K of the next VI. Then the WC mapping begins with
1350 * the remainder of this last VI.
1352 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
1353 ER_DZ_TX_PIOBUF);
1354 if (nic_data->n_piobufs) {
1355 /* pio_write_vi_base rounds down to give the number of complete
1356 * VIs inside the UC mapping.
1358 pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
1359 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
1360 nic_data->n_piobufs) *
1361 efx->vi_stride) -
1362 uc_mem_map_size);
1363 max_vis = pio_write_vi_base + nic_data->n_piobufs;
1364 } else {
1365 pio_write_vi_base = 0;
1366 wc_mem_map_size = 0;
1367 max_vis = channel_vis;
1370 /* In case the last attached driver failed to free VIs, do it now */
1371 rc = efx_ef10_free_vis(efx);
1372 if (rc != 0)
1373 return rc;
1375 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
1376 if (rc != 0)
1377 return rc;
1379 if (nic_data->n_allocated_vis < channel_vis) {
1380 netif_info(efx, drv, efx->net_dev,
1381 "Could not allocate enough VIs to satisfy RSS"
1382 " requirements. Performance may not be optimal.\n");
1383 /* We didn't get the VIs to populate our channels.
1384 * We could keep what we got but then we'd have more
1385 * interrupts than we need.
1386 * Instead calculate new max_channels and restart
1388 efx->max_channels = nic_data->n_allocated_vis;
1389 efx->max_tx_channels =
1390 nic_data->n_allocated_vis / EFX_TXQ_TYPES;
1392 efx_ef10_free_vis(efx);
1393 return -EAGAIN;
1396 /* If we didn't get enough VIs to map all the PIO buffers, free the
1397 * PIO buffers
1399 if (nic_data->n_piobufs &&
1400 nic_data->n_allocated_vis <
1401 pio_write_vi_base + nic_data->n_piobufs) {
1402 netif_dbg(efx, probe, efx->net_dev,
1403 "%u VIs are not sufficient to map %u PIO buffers\n",
1404 nic_data->n_allocated_vis, nic_data->n_piobufs);
1405 efx_ef10_free_piobufs(efx);
1408 /* Shrink the original UC mapping of the memory BAR */
1409 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
1410 if (!membase) {
1411 netif_err(efx, probe, efx->net_dev,
1412 "could not shrink memory BAR to %x\n",
1413 uc_mem_map_size);
1414 return -ENOMEM;
1416 iounmap(efx->membase);
1417 efx->membase = membase;
1419 /* Set up the WC mapping if needed */
1420 if (wc_mem_map_size) {
1421 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
1422 uc_mem_map_size,
1423 wc_mem_map_size);
1424 if (!nic_data->wc_membase) {
1425 netif_err(efx, probe, efx->net_dev,
1426 "could not allocate WC mapping of size %x\n",
1427 wc_mem_map_size);
1428 return -ENOMEM;
1430 nic_data->pio_write_vi_base = pio_write_vi_base;
1431 nic_data->pio_write_base =
1432 nic_data->wc_membase +
1433 (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
1434 uc_mem_map_size);
1436 rc = efx_ef10_link_piobufs(efx);
1437 if (rc)
1438 efx_ef10_free_piobufs(efx);
1441 netif_dbg(efx, probe, efx->net_dev,
1442 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
1443 &efx->membase_phys, efx->membase, uc_mem_map_size,
1444 nic_data->wc_membase, wc_mem_map_size);
1446 return 0;
1449 static int efx_ef10_init_nic(struct efx_nic *efx)
1451 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1452 int rc;
1454 if (nic_data->must_check_datapath_caps) {
1455 rc = efx_ef10_init_datapath_caps(efx);
1456 if (rc)
1457 return rc;
1458 nic_data->must_check_datapath_caps = false;
1461 if (nic_data->must_realloc_vis) {
1462 /* We cannot let the number of VIs change now */
1463 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
1464 nic_data->n_allocated_vis);
1465 if (rc)
1466 return rc;
1467 nic_data->must_realloc_vis = false;
1470 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
1471 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
1472 if (rc == 0) {
1473 rc = efx_ef10_link_piobufs(efx);
1474 if (rc)
1475 efx_ef10_free_piobufs(efx);
1478 /* Log an error on failure, but this is non-fatal.
1479 * Permission errors are less important - we've presumably
1480 * had the PIO buffer licence removed.
1482 if (rc == -EPERM)
1483 netif_dbg(efx, drv, efx->net_dev,
1484 "not permitted to restore PIO buffers\n");
1485 else if (rc)
1486 netif_err(efx, drv, efx->net_dev,
1487 "failed to restore PIO buffers (%d)\n", rc);
1488 nic_data->must_restore_piobufs = false;
1491 /* don't fail init if RSS setup doesn't work */
1492 rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL);
1493 efx->rss_active = (rc == 0);
1495 return 0;
1498 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1500 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1501 #ifdef CONFIG_SFC_SRIOV
1502 unsigned int i;
1503 #endif
1505 /* All our allocations have been reset */
1506 nic_data->must_realloc_vis = true;
1507 nic_data->must_restore_filters = true;
1508 nic_data->must_restore_piobufs = true;
1509 efx_ef10_forget_old_piobufs(efx);
1510 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1512 /* Driver-created vswitches and vports must be re-created */
1513 nic_data->must_probe_vswitching = true;
1514 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
1515 #ifdef CONFIG_SFC_SRIOV
1516 if (nic_data->vf)
1517 for (i = 0; i < efx->vf_count; i++)
1518 nic_data->vf[i].vport_id = 0;
1519 #endif
1522 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1524 if (reason == RESET_TYPE_MC_FAILURE)
1525 return RESET_TYPE_DATAPATH;
1527 return efx_mcdi_map_reset_reason(reason);
1530 static int efx_ef10_map_reset_flags(u32 *flags)
1532 enum {
1533 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1534 ETH_RESET_SHARED_SHIFT),
1535 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1536 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1537 ETH_RESET_PHY | ETH_RESET_MGMT) <<
1538 ETH_RESET_SHARED_SHIFT)
1541 /* We assume for now that our PCI function is permitted to
1542 * reset everything.
1545 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1546 *flags &= ~EF10_RESET_MC;
1547 return RESET_TYPE_WORLD;
1550 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1551 *flags &= ~EF10_RESET_PORT;
1552 return RESET_TYPE_ALL;
1555 /* no invisible reset implemented */
1557 return -EINVAL;
1560 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1562 int rc = efx_mcdi_reset(efx, reset_type);
1564 /* Unprivileged functions return -EPERM, but need to return success
1565 * here so that the datapath is brought back up.
1567 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1568 rc = 0;
1570 /* If it was a port reset, trigger reallocation of MC resources.
1571 * Note that on an MC reset nothing needs to be done now because we'll
1572 * detect the MC reset later and handle it then.
1573 * For an FLR, we never get an MC reset event, but the MC has reset all
1574 * resources assigned to us, so we have to trigger reallocation now.
1576 if ((reset_type == RESET_TYPE_ALL ||
1577 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
1578 efx_ef10_reset_mc_allocations(efx);
1579 return rc;
1582 #define EF10_DMA_STAT(ext_name, mcdi_name) \
1583 [EF10_STAT_ ## ext_name] = \
1584 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1585 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
1586 [EF10_STAT_ ## int_name] = \
1587 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1588 #define EF10_OTHER_STAT(ext_name) \
1589 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1590 #define GENERIC_SW_STAT(ext_name) \
1591 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1593 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
1594 EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1595 EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1596 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1597 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1598 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1599 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1600 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1601 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1602 EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1603 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1604 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1605 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1606 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1607 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1608 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1609 EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1610 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1611 EF10_OTHER_STAT(port_rx_good_bytes),
1612 EF10_OTHER_STAT(port_rx_bad_bytes),
1613 EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1614 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1615 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1616 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1617 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1618 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1619 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1620 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1621 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1622 EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1623 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1624 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1625 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1626 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1627 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1628 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1629 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1630 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1631 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1632 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1633 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1634 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
1635 GENERIC_SW_STAT(rx_nodesc_trunc),
1636 GENERIC_SW_STAT(rx_noskb_drops),
1637 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1638 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1639 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1640 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1641 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1642 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1643 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1644 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1645 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1646 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1647 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1648 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
1649 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1650 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1651 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1652 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1653 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1654 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1655 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1656 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1657 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1658 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1659 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1660 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1661 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1662 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1663 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1664 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1665 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1666 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
1667 EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
1668 EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
1669 EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
1670 EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
1671 EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
1672 EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
1673 EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START),
1674 EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
1675 EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
1676 EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
1677 EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
1678 EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
1679 EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
1680 EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
1681 EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
1682 EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
1683 EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
1684 EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
1685 EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
1686 EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
1687 EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
1688 EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
1689 EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
1692 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
1693 (1ULL << EF10_STAT_port_tx_packets) | \
1694 (1ULL << EF10_STAT_port_tx_pause) | \
1695 (1ULL << EF10_STAT_port_tx_unicast) | \
1696 (1ULL << EF10_STAT_port_tx_multicast) | \
1697 (1ULL << EF10_STAT_port_tx_broadcast) | \
1698 (1ULL << EF10_STAT_port_rx_bytes) | \
1699 (1ULL << \
1700 EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1701 (1ULL << EF10_STAT_port_rx_good_bytes) | \
1702 (1ULL << EF10_STAT_port_rx_bad_bytes) | \
1703 (1ULL << EF10_STAT_port_rx_packets) | \
1704 (1ULL << EF10_STAT_port_rx_good) | \
1705 (1ULL << EF10_STAT_port_rx_bad) | \
1706 (1ULL << EF10_STAT_port_rx_pause) | \
1707 (1ULL << EF10_STAT_port_rx_control) | \
1708 (1ULL << EF10_STAT_port_rx_unicast) | \
1709 (1ULL << EF10_STAT_port_rx_multicast) | \
1710 (1ULL << EF10_STAT_port_rx_broadcast) | \
1711 (1ULL << EF10_STAT_port_rx_lt64) | \
1712 (1ULL << EF10_STAT_port_rx_64) | \
1713 (1ULL << EF10_STAT_port_rx_65_to_127) | \
1714 (1ULL << EF10_STAT_port_rx_128_to_255) | \
1715 (1ULL << EF10_STAT_port_rx_256_to_511) | \
1716 (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1717 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1718 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1719 (1ULL << EF10_STAT_port_rx_gtjumbo) | \
1720 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1721 (1ULL << EF10_STAT_port_rx_overflow) | \
1722 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
1723 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
1724 (1ULL << GENERIC_STAT_rx_noskb_drops))
1726 /* On 7000 series NICs, these statistics are only provided by the 10G MAC.
1727 * For a 10G/40G switchable port we do not expose these because they might
1728 * not include all the packets they should.
1729 * On 8000 series NICs these statistics are always provided.
1731 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
1732 (1ULL << EF10_STAT_port_tx_lt64) | \
1733 (1ULL << EF10_STAT_port_tx_64) | \
1734 (1ULL << EF10_STAT_port_tx_65_to_127) |\
1735 (1ULL << EF10_STAT_port_tx_128_to_255) |\
1736 (1ULL << EF10_STAT_port_tx_256_to_511) |\
1737 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1738 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1739 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
1741 /* These statistics are only provided by the 40G MAC. For a 10G/40G
1742 * switchable port we do expose these because the errors will otherwise
1743 * be silent.
1745 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1746 (1ULL << EF10_STAT_port_rx_length_error))
1748 /* These statistics are only provided if the firmware supports the
1749 * capability PM_AND_RXDP_COUNTERS.
1751 #define HUNT_PM_AND_RXDP_STAT_MASK ( \
1752 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
1753 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
1754 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
1755 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
1756 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
1757 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
1758 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
1759 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
1760 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
1761 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
1762 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
1763 (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
1765 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
1766 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
1767 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1768 * These bits are in the second u64 of the raw mask.
1770 #define EF10_FEC_STAT_MASK ( \
1771 (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \
1772 (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \
1773 (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \
1774 (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \
1775 (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \
1776 (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
1778 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
1779 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
1780 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1781 * These bits are in the second u64 of the raw mask.
1783 #define EF10_CTPIO_STAT_MASK ( \
1784 (1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) | \
1785 (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \
1786 (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \
1787 (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \
1788 (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \
1789 (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \
1790 (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \
1791 (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \
1792 (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \
1793 (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \
1794 (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \
1795 (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \
1796 (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \
1797 (1ULL << (EF10_STAT_ctpio_success - 64)) | \
1798 (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \
1799 (1ULL << (EF10_STAT_ctpio_poison - 64)) | \
1800 (1ULL << (EF10_STAT_ctpio_erase - 64)))
1802 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
1804 u64 raw_mask = HUNT_COMMON_STAT_MASK;
1805 u32 port_caps = efx_mcdi_phy_get_caps(efx);
1806 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1808 if (!(efx->mcdi->fn_flags &
1809 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1810 return 0;
1812 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
1813 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
1814 /* 8000 series have everything even at 40G */
1815 if (nic_data->datapath_caps2 &
1816 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN))
1817 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1818 } else {
1819 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1822 if (nic_data->datapath_caps &
1823 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1824 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1826 return raw_mask;
1829 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1831 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1832 u64 raw_mask[2];
1834 raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1836 /* Only show vadaptor stats when EVB capability is present */
1837 if (nic_data->datapath_caps &
1838 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1839 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
1840 raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
1841 } else {
1842 raw_mask[1] = 0;
1844 /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
1845 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
1846 raw_mask[1] |= EF10_FEC_STAT_MASK;
1848 /* CTPIO stats appear in V3. Only show them on devices that actually
1849 * support CTPIO. Although this driver doesn't use CTPIO others might,
1850 * and we may be reporting the stats for the underlying port.
1852 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
1853 (nic_data->datapath_caps2 &
1854 (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
1855 raw_mask[1] |= EF10_CTPIO_STAT_MASK;
1857 #if BITS_PER_LONG == 64
1858 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
1859 mask[0] = raw_mask[0];
1860 mask[1] = raw_mask[1];
1861 #else
1862 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
1863 mask[0] = raw_mask[0] & 0xffffffff;
1864 mask[1] = raw_mask[0] >> 32;
1865 mask[2] = raw_mask[1] & 0xffffffff;
1866 #endif
1869 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1871 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1873 efx_ef10_get_stat_mask(efx, mask);
1874 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
1875 mask, names);
1878 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1879 struct rtnl_link_stats64 *core_stats)
1881 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1882 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1883 u64 *stats = nic_data->stats;
1884 size_t stats_count = 0, index;
1886 efx_ef10_get_stat_mask(efx, mask);
1888 if (full_stats) {
1889 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1890 if (efx_ef10_stat_desc[index].name) {
1891 *full_stats++ = stats[index];
1892 ++stats_count;
1897 if (!core_stats)
1898 return stats_count;
1900 if (nic_data->datapath_caps &
1901 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1902 /* Use vadaptor stats. */
1903 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1904 stats[EF10_STAT_rx_multicast] +
1905 stats[EF10_STAT_rx_broadcast];
1906 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1907 stats[EF10_STAT_tx_multicast] +
1908 stats[EF10_STAT_tx_broadcast];
1909 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1910 stats[EF10_STAT_rx_multicast_bytes] +
1911 stats[EF10_STAT_rx_broadcast_bytes];
1912 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1913 stats[EF10_STAT_tx_multicast_bytes] +
1914 stats[EF10_STAT_tx_broadcast_bytes];
1915 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
1916 stats[GENERIC_STAT_rx_noskb_drops];
1917 core_stats->multicast = stats[EF10_STAT_rx_multicast];
1918 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1919 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1920 core_stats->rx_errors = core_stats->rx_crc_errors;
1921 core_stats->tx_errors = stats[EF10_STAT_tx_bad];
1922 } else {
1923 /* Use port stats. */
1924 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1925 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1926 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1927 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1928 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1929 stats[GENERIC_STAT_rx_nodesc_trunc] +
1930 stats[GENERIC_STAT_rx_noskb_drops];
1931 core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1932 core_stats->rx_length_errors =
1933 stats[EF10_STAT_port_rx_gtjumbo] +
1934 stats[EF10_STAT_port_rx_length_error];
1935 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1936 core_stats->rx_frame_errors =
1937 stats[EF10_STAT_port_rx_align_error];
1938 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1939 core_stats->rx_errors = (core_stats->rx_length_errors +
1940 core_stats->rx_crc_errors +
1941 core_stats->rx_frame_errors);
1944 return stats_count;
1947 static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
1949 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1950 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1951 __le64 generation_start, generation_end;
1952 u64 *stats = nic_data->stats;
1953 __le64 *dma_stats;
1955 efx_ef10_get_stat_mask(efx, mask);
1957 dma_stats = efx->stats_buffer.addr;
1959 generation_end = dma_stats[efx->num_mac_stats - 1];
1960 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
1961 return 0;
1962 rmb();
1963 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1964 stats, efx->stats_buffer.addr, false);
1965 rmb();
1966 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1967 if (generation_end != generation_start)
1968 return -EAGAIN;
1970 /* Update derived statistics */
1971 efx_nic_fix_nodesc_drop_stat(efx,
1972 &stats[EF10_STAT_port_rx_nodesc_drops]);
1973 stats[EF10_STAT_port_rx_good_bytes] =
1974 stats[EF10_STAT_port_rx_bytes] -
1975 stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
1976 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1977 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
1978 efx_update_sw_stats(efx, stats);
1979 return 0;
1983 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1984 struct rtnl_link_stats64 *core_stats)
1986 int retry;
1988 /* If we're unlucky enough to read statistics during the DMA, wait
1989 * up to 10ms for it to finish (typically takes <500us)
1991 for (retry = 0; retry < 100; ++retry) {
1992 if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
1993 break;
1994 udelay(100);
1997 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
2000 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
2002 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
2003 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2004 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
2005 __le64 generation_start, generation_end;
2006 u64 *stats = nic_data->stats;
2007 u32 dma_len = efx->num_mac_stats * sizeof(u64);
2008 struct efx_buffer stats_buf;
2009 __le64 *dma_stats;
2010 int rc;
2012 spin_unlock_bh(&efx->stats_lock);
2014 if (in_interrupt()) {
2015 /* If in atomic context, cannot update stats. Just update the
2016 * software stats and return so the caller can continue.
2018 spin_lock_bh(&efx->stats_lock);
2019 efx_update_sw_stats(efx, stats);
2020 return 0;
2023 efx_ef10_get_stat_mask(efx, mask);
2025 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
2026 if (rc) {
2027 spin_lock_bh(&efx->stats_lock);
2028 return rc;
2031 dma_stats = stats_buf.addr;
2032 dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
2034 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
2035 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
2036 MAC_STATS_IN_DMA, 1);
2037 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
2038 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
2040 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
2041 NULL, 0, NULL);
2042 spin_lock_bh(&efx->stats_lock);
2043 if (rc) {
2044 /* Expect ENOENT if DMA queues have not been set up */
2045 if (rc != -ENOENT || atomic_read(&efx->active_queues))
2046 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
2047 sizeof(inbuf), NULL, 0, rc);
2048 goto out;
2051 generation_end = dma_stats[efx->num_mac_stats - 1];
2052 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
2053 WARN_ON_ONCE(1);
2054 goto out;
2056 rmb();
2057 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
2058 stats, stats_buf.addr, false);
2059 rmb();
2060 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
2061 if (generation_end != generation_start) {
2062 rc = -EAGAIN;
2063 goto out;
2066 efx_update_sw_stats(efx, stats);
2067 out:
2068 efx_nic_free_buffer(efx, &stats_buf);
2069 return rc;
2072 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
2073 struct rtnl_link_stats64 *core_stats)
2075 if (efx_ef10_try_update_nic_stats_vf(efx))
2076 return 0;
2078 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
2081 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
2083 struct efx_nic *efx = channel->efx;
2084 unsigned int mode, usecs;
2085 efx_dword_t timer_cmd;
2087 if (channel->irq_moderation_us) {
2088 mode = 3;
2089 usecs = channel->irq_moderation_us;
2090 } else {
2091 mode = 0;
2092 usecs = 0;
2095 if (EFX_EF10_WORKAROUND_61265(efx)) {
2096 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
2097 unsigned int ns = usecs * 1000;
2099 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
2100 channel->channel);
2101 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
2102 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
2103 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
2105 efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
2106 inbuf, sizeof(inbuf), 0, NULL, 0);
2107 } else if (EFX_EF10_WORKAROUND_35388(efx)) {
2108 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
2110 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
2111 EFE_DD_EVQ_IND_TIMER_FLAGS,
2112 ERF_DD_EVQ_IND_TIMER_MODE, mode,
2113 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
2114 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
2115 channel->channel);
2116 } else {
2117 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
2119 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
2120 ERF_DZ_TC_TIMER_VAL, ticks,
2121 ERF_FZ_TC_TMR_REL_VAL, ticks);
2122 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
2123 channel->channel);
2127 static void efx_ef10_get_wol_vf(struct efx_nic *efx,
2128 struct ethtool_wolinfo *wol) {}
2130 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
2132 return -EOPNOTSUPP;
2135 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
2137 wol->supported = 0;
2138 wol->wolopts = 0;
2139 memset(&wol->sopass, 0, sizeof(wol->sopass));
2142 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
2144 if (type != 0)
2145 return -EINVAL;
2146 return 0;
2149 static void efx_ef10_mcdi_request(struct efx_nic *efx,
2150 const efx_dword_t *hdr, size_t hdr_len,
2151 const efx_dword_t *sdu, size_t sdu_len)
2153 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2154 u8 *pdu = nic_data->mcdi_buf.addr;
2156 memcpy(pdu, hdr, hdr_len);
2157 memcpy(pdu + hdr_len, sdu, sdu_len);
2158 wmb();
2160 /* The hardware provides 'low' and 'high' (doorbell) registers
2161 * for passing the 64-bit address of an MCDI request to
2162 * firmware. However the dwords are swapped by firmware. The
2163 * least significant bits of the doorbell are then 0 for all
2164 * MCDI requests due to alignment.
2166 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
2167 ER_DZ_MC_DB_LWRD);
2168 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
2169 ER_DZ_MC_DB_HWRD);
2172 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
2174 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2175 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
2177 rmb();
2178 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
2181 static void
2182 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
2183 size_t offset, size_t outlen)
2185 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2186 const u8 *pdu = nic_data->mcdi_buf.addr;
2188 memcpy(outbuf, pdu + offset, outlen);
2191 static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
2193 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2195 /* All our allocations have been reset */
2196 efx_ef10_reset_mc_allocations(efx);
2198 /* The datapath firmware might have been changed */
2199 nic_data->must_check_datapath_caps = true;
2201 /* MAC statistics have been cleared on the NIC; clear the local
2202 * statistic that we update with efx_update_diff_stat().
2204 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
2207 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
2209 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2210 int rc;
2212 rc = efx_ef10_get_warm_boot_count(efx);
2213 if (rc < 0) {
2214 /* The firmware is presumably in the process of
2215 * rebooting. However, we are supposed to report each
2216 * reboot just once, so we must only do that once we
2217 * can read and store the updated warm boot count.
2219 return 0;
2222 if (rc == nic_data->warm_boot_count)
2223 return 0;
2225 nic_data->warm_boot_count = rc;
2226 efx_ef10_mcdi_reboot_detected(efx);
2228 return -EIO;
2231 /* Handle an MSI interrupt
2233 * Handle an MSI hardware interrupt. This routine schedules event
2234 * queue processing. No interrupt acknowledgement cycle is necessary.
2235 * Also, we never need to check that the interrupt is for us, since
2236 * MSI interrupts cannot be shared.
2238 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
2240 struct efx_msi_context *context = dev_id;
2241 struct efx_nic *efx = context->efx;
2243 netif_vdbg(efx, intr, efx->net_dev,
2244 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
2246 if (likely(READ_ONCE(efx->irq_soft_enabled))) {
2247 /* Note test interrupts */
2248 if (context->index == efx->irq_level)
2249 efx->last_irq_cpu = raw_smp_processor_id();
2251 /* Schedule processing of the channel */
2252 efx_schedule_channel_irq(efx->channel[context->index]);
2255 return IRQ_HANDLED;
2258 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
2260 struct efx_nic *efx = dev_id;
2261 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
2262 struct efx_channel *channel;
2263 efx_dword_t reg;
2264 u32 queues;
2266 /* Read the ISR which also ACKs the interrupts */
2267 efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
2268 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
2270 if (queues == 0)
2271 return IRQ_NONE;
2273 if (likely(soft_enabled)) {
2274 /* Note test interrupts */
2275 if (queues & (1U << efx->irq_level))
2276 efx->last_irq_cpu = raw_smp_processor_id();
2278 efx_for_each_channel(channel, efx) {
2279 if (queues & 1)
2280 efx_schedule_channel_irq(channel);
2281 queues >>= 1;
2285 netif_vdbg(efx, intr, efx->net_dev,
2286 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
2287 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
2289 return IRQ_HANDLED;
2292 static int efx_ef10_irq_test_generate(struct efx_nic *efx)
2294 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
2296 if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true,
2297 NULL) == 0)
2298 return -ENOTSUPP;
2300 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
2302 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
2303 return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
2304 inbuf, sizeof(inbuf), NULL, 0, NULL);
2307 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
2309 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
2310 (tx_queue->ptr_mask + 1) *
2311 sizeof(efx_qword_t),
2312 GFP_KERNEL);
2315 /* This writes to the TX_DESC_WPTR and also pushes data */
2316 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
2317 const efx_qword_t *txd)
2319 unsigned int write_ptr;
2320 efx_oword_t reg;
2322 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2323 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
2324 reg.qword[0] = *txd;
2325 efx_writeo_page(tx_queue->efx, &reg,
2326 ER_DZ_TX_DESC_UPD, tx_queue->queue);
2329 /* Add Firmware-Assisted TSO v2 option descriptors to a queue.
2331 static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
2332 struct sk_buff *skb,
2333 bool *data_mapped)
2335 struct efx_tx_buffer *buffer;
2336 struct tcphdr *tcp;
2337 struct iphdr *ip;
2339 u16 ipv4_id;
2340 u32 seqnum;
2341 u32 mss;
2343 EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2);
2345 mss = skb_shinfo(skb)->gso_size;
2347 if (unlikely(mss < 4)) {
2348 WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss);
2349 return -EINVAL;
2352 ip = ip_hdr(skb);
2353 if (ip->version == 4) {
2354 /* Modify IPv4 header if needed. */
2355 ip->tot_len = 0;
2356 ip->check = 0;
2357 ipv4_id = ntohs(ip->id);
2358 } else {
2359 /* Modify IPv6 header if needed. */
2360 struct ipv6hdr *ipv6 = ipv6_hdr(skb);
2362 ipv6->payload_len = 0;
2363 ipv4_id = 0;
2366 tcp = tcp_hdr(skb);
2367 seqnum = ntohl(tcp->seq);
2369 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2371 buffer->flags = EFX_TX_BUF_OPTION;
2372 buffer->len = 0;
2373 buffer->unmap_len = 0;
2374 EFX_POPULATE_QWORD_5(buffer->option,
2375 ESF_DZ_TX_DESC_IS_OPT, 1,
2376 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2377 ESF_DZ_TX_TSO_OPTION_TYPE,
2378 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
2379 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
2380 ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
2382 ++tx_queue->insert_count;
2384 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2386 buffer->flags = EFX_TX_BUF_OPTION;
2387 buffer->len = 0;
2388 buffer->unmap_len = 0;
2389 EFX_POPULATE_QWORD_4(buffer->option,
2390 ESF_DZ_TX_DESC_IS_OPT, 1,
2391 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2392 ESF_DZ_TX_TSO_OPTION_TYPE,
2393 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
2394 ESF_DZ_TX_TSO_TCP_MSS, mss
2396 ++tx_queue->insert_count;
2398 return 0;
2401 static u32 efx_ef10_tso_versions(struct efx_nic *efx)
2403 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2404 u32 tso_versions = 0;
2406 if (nic_data->datapath_caps &
2407 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))
2408 tso_versions |= BIT(1);
2409 if (nic_data->datapath_caps2 &
2410 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))
2411 tso_versions |= BIT(2);
2412 return tso_versions;
2415 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
2417 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2418 EFX_BUF_SIZE));
2419 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
2420 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
2421 struct efx_channel *channel = tx_queue->channel;
2422 struct efx_nic *efx = tx_queue->efx;
2423 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2424 bool tso_v2 = false;
2425 size_t inlen;
2426 dma_addr_t dma_addr;
2427 efx_qword_t *txd;
2428 int rc;
2429 int i;
2430 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
2432 /* Only attempt to enable TX timestamping if we have the license for it,
2433 * otherwise TXQ init will fail
2435 if (!(nic_data->licensed_features &
2436 (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) {
2437 tx_queue->timestamping = false;
2438 /* Disable sync events on this channel. */
2439 if (efx->type->ptp_set_ts_sync_events)
2440 efx->type->ptp_set_ts_sync_events(efx, false, false);
2443 /* TSOv2 is a limited resource that can only be configured on a limited
2444 * number of queues. TSO without checksum offload is not really a thing,
2445 * so we only enable it for those queues.
2446 * TSOv2 cannot be used with Hardware timestamping.
2448 if (csum_offload && (nic_data->datapath_caps2 &
2449 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
2450 !tx_queue->timestamping) {
2451 tso_v2 = true;
2452 netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
2453 channel->channel);
2456 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
2457 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
2458 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
2459 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
2460 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
2461 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
2463 dma_addr = tx_queue->txd.buf.dma_addr;
2465 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
2466 tx_queue->queue, entries, (u64)dma_addr);
2468 for (i = 0; i < entries; ++i) {
2469 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
2470 dma_addr += EFX_BUF_SIZE;
2473 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
2475 do {
2476 MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
2477 /* This flag was removed from mcdi_pcol.h for
2478 * the non-_EXT version of INIT_TXQ. However,
2479 * firmware still honours it.
2481 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
2482 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
2483 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
2484 INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
2485 tx_queue->timestamping);
2487 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
2488 NULL, 0, NULL);
2489 if (rc == -ENOSPC && tso_v2) {
2490 /* Retry without TSOv2 if we're short on contexts. */
2491 tso_v2 = false;
2492 netif_warn(efx, probe, efx->net_dev,
2493 "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n");
2494 } else if (rc) {
2495 efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
2496 MC_CMD_INIT_TXQ_EXT_IN_LEN,
2497 NULL, 0, rc);
2498 goto fail;
2500 } while (rc);
2502 /* A previous user of this TX queue might have set us up the
2503 * bomb by writing a descriptor to the TX push collector but
2504 * not the doorbell. (Each collector belongs to a port, not a
2505 * queue or function, so cannot easily be reset.) We must
2506 * attempt to push a no-op descriptor in its place.
2508 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
2509 tx_queue->insert_count = 1;
2510 txd = efx_tx_desc(tx_queue, 0);
2511 EFX_POPULATE_QWORD_5(*txd,
2512 ESF_DZ_TX_DESC_IS_OPT, true,
2513 ESF_DZ_TX_OPTION_TYPE,
2514 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
2515 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
2516 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload,
2517 ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
2518 tx_queue->write_count = 1;
2520 if (tso_v2) {
2521 tx_queue->handle_tso = efx_ef10_tx_tso_desc;
2522 tx_queue->tso_version = 2;
2523 } else if (nic_data->datapath_caps &
2524 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
2525 tx_queue->tso_version = 1;
2528 wmb();
2529 efx_ef10_push_tx_desc(tx_queue, txd);
2531 return;
2533 fail:
2534 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
2535 tx_queue->queue);
2538 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
2540 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
2541 MCDI_DECLARE_BUF_ERR(outbuf);
2542 struct efx_nic *efx = tx_queue->efx;
2543 size_t outlen;
2544 int rc;
2546 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
2547 tx_queue->queue);
2549 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
2550 outbuf, sizeof(outbuf), &outlen);
2552 if (rc && rc != -EALREADY)
2553 goto fail;
2555 return;
2557 fail:
2558 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
2559 outbuf, outlen, rc);
2562 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
2564 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
2567 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
2568 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
2570 unsigned int write_ptr;
2571 efx_dword_t reg;
2573 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2574 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
2575 efx_writed_page(tx_queue->efx, &reg,
2576 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
2579 #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff
2581 static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue,
2582 dma_addr_t dma_addr, unsigned int len)
2584 if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) {
2585 /* If we need to break across multiple descriptors we should
2586 * stop at a page boundary. This assumes the length limit is
2587 * greater than the page size.
2589 dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN;
2591 BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE);
2592 len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr;
2595 return len;
2598 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
2600 unsigned int old_write_count = tx_queue->write_count;
2601 struct efx_tx_buffer *buffer;
2602 unsigned int write_ptr;
2603 efx_qword_t *txd;
2605 tx_queue->xmit_more_available = false;
2606 if (unlikely(tx_queue->write_count == tx_queue->insert_count))
2607 return;
2609 do {
2610 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2611 buffer = &tx_queue->buffer[write_ptr];
2612 txd = efx_tx_desc(tx_queue, write_ptr);
2613 ++tx_queue->write_count;
2615 /* Create TX descriptor ring entry */
2616 if (buffer->flags & EFX_TX_BUF_OPTION) {
2617 *txd = buffer->option;
2618 if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1)
2619 /* PIO descriptor */
2620 tx_queue->packet_write_count = tx_queue->write_count;
2621 } else {
2622 tx_queue->packet_write_count = tx_queue->write_count;
2623 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
2624 EFX_POPULATE_QWORD_3(
2625 *txd,
2626 ESF_DZ_TX_KER_CONT,
2627 buffer->flags & EFX_TX_BUF_CONT,
2628 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
2629 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
2631 } while (tx_queue->write_count != tx_queue->insert_count);
2633 wmb(); /* Ensure descriptors are written before they are fetched */
2635 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
2636 txd = efx_tx_desc(tx_queue,
2637 old_write_count & tx_queue->ptr_mask);
2638 efx_ef10_push_tx_desc(tx_queue, txd);
2639 ++tx_queue->pushes;
2640 } else {
2641 efx_ef10_notify_tx_desc(tx_queue);
2645 #define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
2646 1 << RSS_MODE_HASH_DST_ADDR_LBN)
2647 #define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\
2648 1 << RSS_MODE_HASH_DST_PORT_LBN)
2649 #define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
2650 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
2651 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
2652 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
2653 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
2654 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
2655 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
2656 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
2657 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
2658 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
2660 static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags)
2662 /* Firmware had a bug (sfc bug 61952) where it would not actually
2663 * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
2664 * This meant that it would always contain whatever was previously
2665 * in the MCDI buffer. Fortunately, all firmware versions with
2666 * this bug have the same default flags value for a newly-allocated
2667 * RSS context, and the only time we want to get the flags is just
2668 * after allocating. Moreover, the response has a 32-bit hole
2669 * where the context ID would be in the request, so we can use an
2670 * overlength buffer in the request and pre-fill the flags field
2671 * with what we believe the default to be. Thus if the firmware
2672 * has the bug, it will leave our pre-filled value in the flags
2673 * field of the response, and we will get the right answer.
2675 * However, this does mean that this function should NOT be used if
2676 * the RSS context flags might not be their defaults - it is ONLY
2677 * reliably correct for a newly-allocated RSS context.
2679 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
2680 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
2681 size_t outlen;
2682 int rc;
2684 /* Check we have a hole for the context ID */
2685 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
2686 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
2687 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
2688 RSS_CONTEXT_FLAGS_DEFAULT);
2689 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
2690 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
2691 if (rc == 0) {
2692 if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
2693 rc = -EIO;
2694 else
2695 *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
2697 return rc;
2700 /* Attempt to enable 4-tuple UDP hashing on the specified RSS context.
2701 * If we fail, we just leave the RSS context at its default hash settings,
2702 * which is safe but may slightly reduce performance.
2703 * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
2704 * just need to set the UDP ports flags (for both IP versions).
2706 static void efx_ef10_set_rss_flags(struct efx_nic *efx, u32 context)
2708 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
2709 u32 flags;
2711 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
2713 if (efx_ef10_get_rss_flags(efx, context, &flags) != 0)
2714 return;
2715 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, context);
2716 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
2717 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
2718 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
2719 if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
2720 NULL, 0, NULL))
2721 /* Succeeded, so UDP 4-tuple is now enabled */
2722 efx->rx_hash_udp_4tuple = true;
2725 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
2726 bool exclusive, unsigned *context_size)
2728 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
2729 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
2730 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2731 size_t outlen;
2732 int rc;
2733 u32 alloc_type = exclusive ?
2734 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
2735 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
2736 unsigned rss_spread = exclusive ?
2737 efx->rss_spread :
2738 min(rounddown_pow_of_two(efx->rss_spread),
2739 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
2741 if (!exclusive && rss_spread == 1) {
2742 *context = EFX_EF10_RSS_CONTEXT_INVALID;
2743 if (context_size)
2744 *context_size = 1;
2745 return 0;
2748 if (nic_data->datapath_caps &
2749 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
2750 return -EOPNOTSUPP;
2752 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
2753 nic_data->vport_id);
2754 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
2755 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
2757 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
2758 outbuf, sizeof(outbuf), &outlen);
2759 if (rc != 0)
2760 return rc;
2762 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
2763 return -EIO;
2765 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
2767 if (context_size)
2768 *context_size = rss_spread;
2770 if (nic_data->datapath_caps &
2771 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
2772 efx_ef10_set_rss_flags(efx, *context);
2774 return 0;
2777 static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
2779 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
2780 int rc;
2782 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
2783 context);
2785 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
2786 NULL, 0, NULL);
2787 WARN_ON(rc != 0);
2790 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
2791 const u32 *rx_indir_table, const u8 *key)
2793 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
2794 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
2795 int i, rc;
2797 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
2798 context);
2799 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
2800 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
2802 /* This iterates over the length of efx->rx_indir_table, but copies
2803 * bytes from rx_indir_table. That's because the latter is a pointer
2804 * rather than an array, but should have the same length.
2805 * The efx->rx_hash_key loop below is similar.
2807 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
2808 MCDI_PTR(tablebuf,
2809 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
2810 (u8) rx_indir_table[i];
2812 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
2813 sizeof(tablebuf), NULL, 0, NULL);
2814 if (rc != 0)
2815 return rc;
2817 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
2818 context);
2819 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
2820 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2821 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
2822 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
2824 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
2825 sizeof(keybuf), NULL, 0, NULL);
2828 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
2830 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2832 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2833 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
2834 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
2837 static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
2838 unsigned *context_size)
2840 u32 new_rx_rss_context;
2841 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2842 int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
2843 false, context_size);
2845 if (rc != 0)
2846 return rc;
2848 nic_data->rx_rss_context = new_rx_rss_context;
2849 nic_data->rx_rss_context_exclusive = false;
2850 efx_set_default_rx_indir_table(efx);
2851 return 0;
2854 static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
2855 const u32 *rx_indir_table,
2856 const u8 *key)
2858 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2859 int rc;
2860 u32 new_rx_rss_context;
2862 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
2863 !nic_data->rx_rss_context_exclusive) {
2864 rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
2865 true, NULL);
2866 if (rc == -EOPNOTSUPP)
2867 return rc;
2868 else if (rc != 0)
2869 goto fail1;
2870 } else {
2871 new_rx_rss_context = nic_data->rx_rss_context;
2874 rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
2875 rx_indir_table, key);
2876 if (rc != 0)
2877 goto fail2;
2879 if (nic_data->rx_rss_context != new_rx_rss_context)
2880 efx_ef10_rx_free_indir_table(efx);
2881 nic_data->rx_rss_context = new_rx_rss_context;
2882 nic_data->rx_rss_context_exclusive = true;
2883 if (rx_indir_table != efx->rx_indir_table)
2884 memcpy(efx->rx_indir_table, rx_indir_table,
2885 sizeof(efx->rx_indir_table));
2886 if (key != efx->rx_hash_key)
2887 memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size);
2889 return 0;
2891 fail2:
2892 if (new_rx_rss_context != nic_data->rx_rss_context)
2893 efx_ef10_free_rss_context(efx, new_rx_rss_context);
2894 fail1:
2895 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2896 return rc;
2899 static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
2901 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2902 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
2903 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
2904 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
2905 size_t outlen;
2906 int rc, i;
2908 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
2909 MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
2911 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
2912 return -ENOENT;
2914 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
2915 nic_data->rx_rss_context);
2916 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
2917 MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
2918 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
2919 tablebuf, sizeof(tablebuf), &outlen);
2920 if (rc != 0)
2921 return rc;
2923 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
2924 return -EIO;
2926 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
2927 efx->rx_indir_table[i] = MCDI_PTR(tablebuf,
2928 RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
2930 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
2931 nic_data->rx_rss_context);
2932 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
2933 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2934 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
2935 keybuf, sizeof(keybuf), &outlen);
2936 if (rc != 0)
2937 return rc;
2939 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
2940 return -EIO;
2942 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
2943 efx->rx_hash_key[i] = MCDI_PTR(
2944 keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
2946 return 0;
2949 static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
2950 const u32 *rx_indir_table,
2951 const u8 *key)
2953 int rc;
2955 if (efx->rss_spread == 1)
2956 return 0;
2958 if (!key)
2959 key = efx->rx_hash_key;
2961 rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
2963 if (rc == -ENOBUFS && !user) {
2964 unsigned context_size;
2965 bool mismatch = false;
2966 size_t i;
2968 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
2969 i++)
2970 mismatch = rx_indir_table[i] !=
2971 ethtool_rxfh_indir_default(i, efx->rss_spread);
2973 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
2974 if (rc == 0) {
2975 if (context_size != efx->rss_spread)
2976 netif_warn(efx, probe, efx->net_dev,
2977 "Could not allocate an exclusive RSS"
2978 " context; allocated a shared one of"
2979 " different size."
2980 " Wanted %u, got %u.\n",
2981 efx->rss_spread, context_size);
2982 else if (mismatch)
2983 netif_warn(efx, probe, efx->net_dev,
2984 "Could not allocate an exclusive RSS"
2985 " context; allocated a shared one but"
2986 " could not apply custom"
2987 " indirection.\n");
2988 else
2989 netif_info(efx, probe, efx->net_dev,
2990 "Could not allocate an exclusive RSS"
2991 " context; allocated a shared one.\n");
2994 return rc;
2997 static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
2998 const u32 *rx_indir_table
2999 __attribute__ ((unused)),
3000 const u8 *key
3001 __attribute__ ((unused)))
3003 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3005 if (user)
3006 return -EOPNOTSUPP;
3007 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
3008 return 0;
3009 return efx_ef10_rx_push_shared_rss_config(efx, NULL);
3012 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
3014 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
3015 (rx_queue->ptr_mask + 1) *
3016 sizeof(efx_qword_t),
3017 GFP_KERNEL);
3020 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
3022 MCDI_DECLARE_BUF(inbuf,
3023 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
3024 EFX_BUF_SIZE));
3025 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
3026 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
3027 struct efx_nic *efx = rx_queue->efx;
3028 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3029 size_t inlen;
3030 dma_addr_t dma_addr;
3031 int rc;
3032 int i;
3033 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
3035 rx_queue->scatter_n = 0;
3036 rx_queue->scatter_len = 0;
3038 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
3039 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
3040 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
3041 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
3042 efx_rx_queue_index(rx_queue));
3043 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
3044 INIT_RXQ_IN_FLAG_PREFIX, 1,
3045 INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
3046 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
3047 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
3049 dma_addr = rx_queue->rxd.buf.dma_addr;
3051 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
3052 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
3054 for (i = 0; i < entries; ++i) {
3055 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
3056 dma_addr += EFX_BUF_SIZE;
3059 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
3061 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
3062 NULL, 0, NULL);
3063 if (rc)
3064 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
3065 efx_rx_queue_index(rx_queue));
3068 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
3070 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
3071 MCDI_DECLARE_BUF_ERR(outbuf);
3072 struct efx_nic *efx = rx_queue->efx;
3073 size_t outlen;
3074 int rc;
3076 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
3077 efx_rx_queue_index(rx_queue));
3079 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
3080 outbuf, sizeof(outbuf), &outlen);
3082 if (rc && rc != -EALREADY)
3083 goto fail;
3085 return;
3087 fail:
3088 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
3089 outbuf, outlen, rc);
3092 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
3094 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
3097 /* This creates an entry in the RX descriptor queue */
3098 static inline void
3099 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
3101 struct efx_rx_buffer *rx_buf;
3102 efx_qword_t *rxd;
3104 rxd = efx_rx_desc(rx_queue, index);
3105 rx_buf = efx_rx_buffer(rx_queue, index);
3106 EFX_POPULATE_QWORD_2(*rxd,
3107 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
3108 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
3111 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
3113 struct efx_nic *efx = rx_queue->efx;
3114 unsigned int write_count;
3115 efx_dword_t reg;
3117 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
3118 write_count = rx_queue->added_count & ~7;
3119 if (rx_queue->notified_count == write_count)
3120 return;
3123 efx_ef10_build_rx_desc(
3124 rx_queue,
3125 rx_queue->notified_count & rx_queue->ptr_mask);
3126 while (++rx_queue->notified_count != write_count);
3128 wmb();
3129 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
3130 write_count & rx_queue->ptr_mask);
3131 efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
3132 efx_rx_queue_index(rx_queue));
3135 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
3137 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
3139 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
3140 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3141 efx_qword_t event;
3143 EFX_POPULATE_QWORD_2(event,
3144 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3145 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
3147 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3149 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3150 * already swapped the data to little-endian order.
3152 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3153 sizeof(efx_qword_t));
3155 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
3156 inbuf, sizeof(inbuf), 0,
3157 efx_ef10_rx_defer_refill_complete, 0);
3160 static void
3161 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
3162 int rc, efx_dword_t *outbuf,
3163 size_t outlen_actual)
3165 /* nothing to do */
3168 static int efx_ef10_ev_probe(struct efx_channel *channel)
3170 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
3171 (channel->eventq_mask + 1) *
3172 sizeof(efx_qword_t),
3173 GFP_KERNEL);
3176 static void efx_ef10_ev_fini(struct efx_channel *channel)
3178 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
3179 MCDI_DECLARE_BUF_ERR(outbuf);
3180 struct efx_nic *efx = channel->efx;
3181 size_t outlen;
3182 int rc;
3184 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
3186 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
3187 outbuf, sizeof(outbuf), &outlen);
3189 if (rc && rc != -EALREADY)
3190 goto fail;
3192 return;
3194 fail:
3195 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
3196 outbuf, outlen, rc);
3199 static int efx_ef10_ev_init(struct efx_channel *channel)
3201 MCDI_DECLARE_BUF(inbuf,
3202 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
3203 EFX_BUF_SIZE));
3204 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
3205 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
3206 struct efx_nic *efx = channel->efx;
3207 struct efx_ef10_nic_data *nic_data;
3208 size_t inlen, outlen;
3209 unsigned int enabled, implemented;
3210 dma_addr_t dma_addr;
3211 int rc;
3212 int i;
3214 nic_data = efx->nic_data;
3216 /* Fill event queue with all ones (i.e. empty events) */
3217 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
3219 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
3220 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
3221 /* INIT_EVQ expects index in vector table, not absolute */
3222 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
3223 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
3224 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
3225 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
3226 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
3227 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
3228 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
3229 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
3231 if (nic_data->datapath_caps2 &
3232 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
3233 /* Use the new generic approach to specifying event queue
3234 * configuration, requesting lower latency or higher throughput.
3235 * The options that actually get used appear in the output.
3237 MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
3238 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
3239 INIT_EVQ_V2_IN_FLAG_TYPE,
3240 MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
3241 } else {
3242 bool cut_thru = !(nic_data->datapath_caps &
3243 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
3245 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
3246 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
3247 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
3248 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
3249 INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
3252 dma_addr = channel->eventq.buf.dma_addr;
3253 for (i = 0; i < entries; ++i) {
3254 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
3255 dma_addr += EFX_BUF_SIZE;
3258 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
3260 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
3261 outbuf, sizeof(outbuf), &outlen);
3263 if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
3264 netif_dbg(efx, drv, efx->net_dev,
3265 "Channel %d using event queue flags %08x\n",
3266 channel->channel,
3267 MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
3269 /* IRQ return is ignored */
3270 if (channel->channel || rc)
3271 return rc;
3273 /* Successfully created event queue on channel 0 */
3274 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
3275 if (rc == -ENOSYS) {
3276 /* GET_WORKAROUNDS was implemented before this workaround,
3277 * thus it must be unavailable in this firmware.
3279 nic_data->workaround_26807 = false;
3280 rc = 0;
3281 } else if (rc) {
3282 goto fail;
3283 } else {
3284 nic_data->workaround_26807 =
3285 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
3287 if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
3288 !nic_data->workaround_26807) {
3289 unsigned int flags;
3291 rc = efx_mcdi_set_workaround(efx,
3292 MC_CMD_WORKAROUND_BUG26807,
3293 true, &flags);
3295 if (!rc) {
3296 if (flags &
3297 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
3298 netif_info(efx, drv, efx->net_dev,
3299 "other functions on NIC have been reset\n");
3301 /* With MCFW v4.6.x and earlier, the
3302 * boot count will have incremented,
3303 * so re-read the warm_boot_count
3304 * value now to ensure this function
3305 * doesn't think it has changed next
3306 * time it checks.
3308 rc = efx_ef10_get_warm_boot_count(efx);
3309 if (rc >= 0) {
3310 nic_data->warm_boot_count = rc;
3311 rc = 0;
3314 nic_data->workaround_26807 = true;
3315 } else if (rc == -EPERM) {
3316 rc = 0;
3321 if (!rc)
3322 return 0;
3324 fail:
3325 efx_ef10_ev_fini(channel);
3326 return rc;
3329 static void efx_ef10_ev_remove(struct efx_channel *channel)
3331 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
3334 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
3335 unsigned int rx_queue_label)
3337 struct efx_nic *efx = rx_queue->efx;
3339 netif_info(efx, hw, efx->net_dev,
3340 "rx event arrived on queue %d labeled as queue %u\n",
3341 efx_rx_queue_index(rx_queue), rx_queue_label);
3343 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
3346 static void
3347 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
3348 unsigned int actual, unsigned int expected)
3350 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
3351 struct efx_nic *efx = rx_queue->efx;
3353 netif_info(efx, hw, efx->net_dev,
3354 "dropped %d events (index=%d expected=%d)\n",
3355 dropped, actual, expected);
3357 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
3360 /* partially received RX was aborted. clean up. */
3361 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
3363 unsigned int rx_desc_ptr;
3365 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
3366 "scattered RX aborted (dropping %u buffers)\n",
3367 rx_queue->scatter_n);
3369 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
3371 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
3372 0, EFX_RX_PKT_DISCARD);
3374 rx_queue->removed_count += rx_queue->scatter_n;
3375 rx_queue->scatter_n = 0;
3376 rx_queue->scatter_len = 0;
3377 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
3380 static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
3381 unsigned int n_packets,
3382 unsigned int rx_encap_hdr,
3383 unsigned int rx_l3_class,
3384 unsigned int rx_l4_class,
3385 const efx_qword_t *event)
3387 struct efx_nic *efx = channel->efx;
3388 bool handled = false;
3390 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) {
3391 if (!(efx->net_dev->features & NETIF_F_RXALL)) {
3392 if (!efx->loopback_selftest)
3393 channel->n_rx_eth_crc_err += n_packets;
3394 return EFX_RX_PKT_DISCARD;
3396 handled = true;
3398 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) {
3399 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
3400 rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3401 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
3402 rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
3403 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
3404 netdev_WARN(efx->net_dev,
3405 "invalid class for RX_IPCKSUM_ERR: event="
3406 EFX_QWORD_FMT "\n",
3407 EFX_QWORD_VAL(*event));
3408 if (!efx->loopback_selftest)
3409 *(rx_encap_hdr ?
3410 &channel->n_rx_outer_ip_hdr_chksum_err :
3411 &channel->n_rx_ip_hdr_chksum_err) += n_packets;
3412 return 0;
3414 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
3415 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
3416 ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3417 rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
3418 (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
3419 rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
3420 netdev_WARN(efx->net_dev,
3421 "invalid class for RX_TCPUDP_CKSUM_ERR: event="
3422 EFX_QWORD_FMT "\n",
3423 EFX_QWORD_VAL(*event));
3424 if (!efx->loopback_selftest)
3425 *(rx_encap_hdr ?
3426 &channel->n_rx_outer_tcp_udp_chksum_err :
3427 &channel->n_rx_tcp_udp_chksum_err) += n_packets;
3428 return 0;
3430 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) {
3431 if (unlikely(!rx_encap_hdr))
3432 netdev_WARN(efx->net_dev,
3433 "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event="
3434 EFX_QWORD_FMT "\n",
3435 EFX_QWORD_VAL(*event));
3436 else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3437 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
3438 rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
3439 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
3440 netdev_WARN(efx->net_dev,
3441 "invalid class for RX_IP_INNER_CHKSUM_ERR: event="
3442 EFX_QWORD_FMT "\n",
3443 EFX_QWORD_VAL(*event));
3444 if (!efx->loopback_selftest)
3445 channel->n_rx_inner_ip_hdr_chksum_err += n_packets;
3446 return 0;
3448 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) {
3449 if (unlikely(!rx_encap_hdr))
3450 netdev_WARN(efx->net_dev,
3451 "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
3452 EFX_QWORD_FMT "\n",
3453 EFX_QWORD_VAL(*event));
3454 else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3455 rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
3456 (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
3457 rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
3458 netdev_WARN(efx->net_dev,
3459 "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
3460 EFX_QWORD_FMT "\n",
3461 EFX_QWORD_VAL(*event));
3462 if (!efx->loopback_selftest)
3463 channel->n_rx_inner_tcp_udp_chksum_err += n_packets;
3464 return 0;
3467 WARN_ON(!handled); /* No error bits were recognised */
3468 return 0;
3471 static int efx_ef10_handle_rx_event(struct efx_channel *channel,
3472 const efx_qword_t *event)
3474 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label;
3475 unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr;
3476 unsigned int n_descs, n_packets, i;
3477 struct efx_nic *efx = channel->efx;
3478 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3479 struct efx_rx_queue *rx_queue;
3480 efx_qword_t errors;
3481 bool rx_cont;
3482 u16 flags = 0;
3484 if (unlikely(READ_ONCE(efx->reset_pending)))
3485 return 0;
3487 /* Basic packet information */
3488 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
3489 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
3490 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
3491 rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
3492 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
3493 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
3494 rx_encap_hdr =
3495 nic_data->datapath_caps &
3496 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ?
3497 EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) :
3498 ESE_EZ_ENCAP_HDR_NONE;
3500 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
3501 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
3502 EFX_QWORD_FMT "\n",
3503 EFX_QWORD_VAL(*event));
3505 rx_queue = efx_channel_get_rx_queue(channel);
3507 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
3508 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
3510 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
3511 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
3513 if (n_descs != rx_queue->scatter_n + 1) {
3514 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3516 /* detect rx abort */
3517 if (unlikely(n_descs == rx_queue->scatter_n)) {
3518 if (rx_queue->scatter_n == 0 || rx_bytes != 0)
3519 netdev_WARN(efx->net_dev,
3520 "invalid RX abort: scatter_n=%u event="
3521 EFX_QWORD_FMT "\n",
3522 rx_queue->scatter_n,
3523 EFX_QWORD_VAL(*event));
3524 efx_ef10_handle_rx_abort(rx_queue);
3525 return 0;
3528 /* Check that RX completion merging is valid, i.e.
3529 * the current firmware supports it and this is a
3530 * non-scattered packet.
3532 if (!(nic_data->datapath_caps &
3533 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
3534 rx_queue->scatter_n != 0 || rx_cont) {
3535 efx_ef10_handle_rx_bad_lbits(
3536 rx_queue, next_ptr_lbits,
3537 (rx_queue->removed_count +
3538 rx_queue->scatter_n + 1) &
3539 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
3540 return 0;
3543 /* Merged completion for multiple non-scattered packets */
3544 rx_queue->scatter_n = 1;
3545 rx_queue->scatter_len = 0;
3546 n_packets = n_descs;
3547 ++channel->n_rx_merge_events;
3548 channel->n_rx_merge_packets += n_packets;
3549 flags |= EFX_RX_PKT_PREFIX_LEN;
3550 } else {
3551 ++rx_queue->scatter_n;
3552 rx_queue->scatter_len += rx_bytes;
3553 if (rx_cont)
3554 return 0;
3555 n_packets = 1;
3558 EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1,
3559 ESF_DZ_RX_IPCKSUM_ERR, 1,
3560 ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1,
3561 ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1,
3562 ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1);
3563 EFX_AND_QWORD(errors, *event, errors);
3564 if (unlikely(!EFX_QWORD_IS_ZERO(errors))) {
3565 flags |= efx_ef10_handle_rx_event_errors(channel, n_packets,
3566 rx_encap_hdr,
3567 rx_l3_class, rx_l4_class,
3568 event);
3569 } else {
3570 bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
3571 rx_l4_class == ESE_FZ_L4_CLASS_UDP;
3573 switch (rx_encap_hdr) {
3574 case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
3575 flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */
3576 if (tcpudp)
3577 flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */
3578 break;
3579 case ESE_EZ_ENCAP_HDR_GRE:
3580 case ESE_EZ_ENCAP_HDR_NONE:
3581 if (tcpudp)
3582 flags |= EFX_RX_PKT_CSUMMED;
3583 break;
3584 default:
3585 netdev_WARN(efx->net_dev,
3586 "unknown encapsulation type: event="
3587 EFX_QWORD_FMT "\n",
3588 EFX_QWORD_VAL(*event));
3592 if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
3593 flags |= EFX_RX_PKT_TCP;
3595 channel->irq_mod_score += 2 * n_packets;
3597 /* Handle received packet(s) */
3598 for (i = 0; i < n_packets; i++) {
3599 efx_rx_packet(rx_queue,
3600 rx_queue->removed_count & rx_queue->ptr_mask,
3601 rx_queue->scatter_n, rx_queue->scatter_len,
3602 flags);
3603 rx_queue->removed_count += rx_queue->scatter_n;
3606 rx_queue->scatter_n = 0;
3607 rx_queue->scatter_len = 0;
3609 return n_packets;
3612 static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
3614 u32 tstamp;
3616 tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI);
3617 tstamp <<= 16;
3618 tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO);
3620 return tstamp;
3623 static void
3624 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
3626 struct efx_nic *efx = channel->efx;
3627 struct efx_tx_queue *tx_queue;
3628 unsigned int tx_ev_desc_ptr;
3629 unsigned int tx_ev_q_label;
3630 unsigned int tx_ev_type;
3631 u64 ts_part;
3633 if (unlikely(READ_ONCE(efx->reset_pending)))
3634 return;
3636 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
3637 return;
3639 /* Get the transmit queue */
3640 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
3641 tx_queue = efx_channel_get_tx_queue(channel,
3642 tx_ev_q_label % EFX_TXQ_TYPES);
3644 if (!tx_queue->timestamping) {
3645 /* Transmit completion */
3646 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
3647 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
3648 return;
3651 /* Transmit timestamps are only available for 8XXX series. They result
3652 * in three events per packet. These occur in order, and are:
3653 * - the normal completion event
3654 * - the low part of the timestamp
3655 * - the high part of the timestamp
3657 * Each part of the timestamp is itself split across two 16 bit
3658 * fields in the event.
3660 tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
3662 switch (tx_ev_type) {
3663 case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
3664 /* In case of Queue flush or FLR, we might have received
3665 * the previous TX completion event but not the Timestamp
3666 * events.
3668 if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
3669 efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
3671 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
3672 ESF_DZ_TX_DESCR_INDX);
3673 tx_queue->completed_desc_ptr =
3674 tx_ev_desc_ptr & tx_queue->ptr_mask;
3675 break;
3677 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
3678 ts_part = efx_ef10_extract_event_ts(event);
3679 tx_queue->completed_timestamp_minor = ts_part;
3680 break;
3682 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI:
3683 ts_part = efx_ef10_extract_event_ts(event);
3684 tx_queue->completed_timestamp_major = ts_part;
3686 efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
3687 tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
3688 break;
3690 default:
3691 netif_err(efx, hw, efx->net_dev,
3692 "channel %d unknown tx event type %d (data "
3693 EFX_QWORD_FMT ")\n",
3694 channel->channel, tx_ev_type,
3695 EFX_QWORD_VAL(*event));
3696 break;
3700 static void
3701 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
3703 struct efx_nic *efx = channel->efx;
3704 int subcode;
3706 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
3708 switch (subcode) {
3709 case ESE_DZ_DRV_TIMER_EV:
3710 case ESE_DZ_DRV_WAKE_UP_EV:
3711 break;
3712 case ESE_DZ_DRV_START_UP_EV:
3713 /* event queue init complete. ok. */
3714 break;
3715 default:
3716 netif_err(efx, hw, efx->net_dev,
3717 "channel %d unknown driver event type %d"
3718 " (data " EFX_QWORD_FMT ")\n",
3719 channel->channel, subcode,
3720 EFX_QWORD_VAL(*event));
3725 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
3726 efx_qword_t *event)
3728 struct efx_nic *efx = channel->efx;
3729 u32 subcode;
3731 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
3733 switch (subcode) {
3734 case EFX_EF10_TEST:
3735 channel->event_test_cpu = raw_smp_processor_id();
3736 break;
3737 case EFX_EF10_REFILL:
3738 /* The queue must be empty, so we won't receive any rx
3739 * events, so efx_process_channel() won't refill the
3740 * queue. Refill it here
3742 efx_fast_push_rx_descriptors(&channel->rx_queue, true);
3743 break;
3744 default:
3745 netif_err(efx, hw, efx->net_dev,
3746 "channel %d unknown driver event type %u"
3747 " (data " EFX_QWORD_FMT ")\n",
3748 channel->channel, (unsigned) subcode,
3749 EFX_QWORD_VAL(*event));
3753 static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
3755 struct efx_nic *efx = channel->efx;
3756 efx_qword_t event, *p_event;
3757 unsigned int read_ptr;
3758 int ev_code;
3759 int spent = 0;
3761 if (quota <= 0)
3762 return spent;
3764 read_ptr = channel->eventq_read_ptr;
3766 for (;;) {
3767 p_event = efx_event(channel, read_ptr);
3768 event = *p_event;
3770 if (!efx_event_present(&event))
3771 break;
3773 EFX_SET_QWORD(*p_event);
3775 ++read_ptr;
3777 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
3779 netif_vdbg(efx, drv, efx->net_dev,
3780 "processing event on %d " EFX_QWORD_FMT "\n",
3781 channel->channel, EFX_QWORD_VAL(event));
3783 switch (ev_code) {
3784 case ESE_DZ_EV_CODE_MCDI_EV:
3785 efx_mcdi_process_event(channel, &event);
3786 break;
3787 case ESE_DZ_EV_CODE_RX_EV:
3788 spent += efx_ef10_handle_rx_event(channel, &event);
3789 if (spent >= quota) {
3790 /* XXX can we split a merged event to
3791 * avoid going over-quota?
3793 spent = quota;
3794 goto out;
3796 break;
3797 case ESE_DZ_EV_CODE_TX_EV:
3798 efx_ef10_handle_tx_event(channel, &event);
3799 break;
3800 case ESE_DZ_EV_CODE_DRIVER_EV:
3801 efx_ef10_handle_driver_event(channel, &event);
3802 if (++spent == quota)
3803 goto out;
3804 break;
3805 case EFX_EF10_DRVGEN_EV:
3806 efx_ef10_handle_driver_generated_event(channel, &event);
3807 break;
3808 default:
3809 netif_err(efx, hw, efx->net_dev,
3810 "channel %d unknown event type %d"
3811 " (data " EFX_QWORD_FMT ")\n",
3812 channel->channel, ev_code,
3813 EFX_QWORD_VAL(event));
3817 out:
3818 channel->eventq_read_ptr = read_ptr;
3819 return spent;
3822 static void efx_ef10_ev_read_ack(struct efx_channel *channel)
3824 struct efx_nic *efx = channel->efx;
3825 efx_dword_t rptr;
3827 if (EFX_EF10_WORKAROUND_35388(efx)) {
3828 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
3829 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
3830 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
3831 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
3833 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3834 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
3835 ERF_DD_EVQ_IND_RPTR,
3836 (channel->eventq_read_ptr &
3837 channel->eventq_mask) >>
3838 ERF_DD_EVQ_IND_RPTR_WIDTH);
3839 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3840 channel->channel);
3841 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3842 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
3843 ERF_DD_EVQ_IND_RPTR,
3844 channel->eventq_read_ptr &
3845 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
3846 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3847 channel->channel);
3848 } else {
3849 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
3850 channel->eventq_read_ptr &
3851 channel->eventq_mask);
3852 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
3856 static void efx_ef10_ev_test_generate(struct efx_channel *channel)
3858 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3859 struct efx_nic *efx = channel->efx;
3860 efx_qword_t event;
3861 int rc;
3863 EFX_POPULATE_QWORD_2(event,
3864 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3865 ESF_DZ_EV_DATA, EFX_EF10_TEST);
3867 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3869 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3870 * already swapped the data to little-endian order.
3872 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3873 sizeof(efx_qword_t));
3875 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
3876 NULL, 0, NULL);
3877 if (rc != 0)
3878 goto fail;
3880 return;
3882 fail:
3883 WARN_ON(true);
3884 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
3887 void efx_ef10_handle_drain_event(struct efx_nic *efx)
3889 if (atomic_dec_and_test(&efx->active_queues))
3890 wake_up(&efx->flush_wq);
3892 WARN_ON(atomic_read(&efx->active_queues) < 0);
3895 static int efx_ef10_fini_dmaq(struct efx_nic *efx)
3897 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3898 struct efx_channel *channel;
3899 struct efx_tx_queue *tx_queue;
3900 struct efx_rx_queue *rx_queue;
3901 int pending;
3903 /* If the MC has just rebooted, the TX/RX queues will have already been
3904 * torn down, but efx->active_queues needs to be set to zero.
3906 if (nic_data->must_realloc_vis) {
3907 atomic_set(&efx->active_queues, 0);
3908 return 0;
3911 /* Do not attempt to write to the NIC during EEH recovery */
3912 if (efx->state != STATE_RECOVERY) {
3913 efx_for_each_channel(channel, efx) {
3914 efx_for_each_channel_rx_queue(rx_queue, channel)
3915 efx_ef10_rx_fini(rx_queue);
3916 efx_for_each_channel_tx_queue(tx_queue, channel)
3917 efx_ef10_tx_fini(tx_queue);
3920 wait_event_timeout(efx->flush_wq,
3921 atomic_read(&efx->active_queues) == 0,
3922 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
3923 pending = atomic_read(&efx->active_queues);
3924 if (pending) {
3925 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
3926 pending);
3927 return -ETIMEDOUT;
3931 return 0;
3934 static void efx_ef10_prepare_flr(struct efx_nic *efx)
3936 atomic_set(&efx->active_queues, 0);
3939 static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
3940 const struct efx_filter_spec *right)
3942 if ((left->match_flags ^ right->match_flags) |
3943 ((left->flags ^ right->flags) &
3944 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3945 return false;
3947 return memcmp(&left->outer_vid, &right->outer_vid,
3948 sizeof(struct efx_filter_spec) -
3949 offsetof(struct efx_filter_spec, outer_vid)) == 0;
3952 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
3954 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
3955 return jhash2((const u32 *)&spec->outer_vid,
3956 (sizeof(struct efx_filter_spec) -
3957 offsetof(struct efx_filter_spec, outer_vid)) / 4,
3959 /* XXX should we randomise the initval? */
3962 /* Decide whether a filter should be exclusive or else should allow
3963 * delivery to additional recipients. Currently we decide that
3964 * filters for specific local unicast MAC and IP addresses are
3965 * exclusive.
3967 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
3969 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
3970 !is_multicast_ether_addr(spec->loc_mac))
3971 return true;
3973 if ((spec->match_flags &
3974 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
3975 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
3976 if (spec->ether_type == htons(ETH_P_IP) &&
3977 !ipv4_is_multicast(spec->loc_host[0]))
3978 return true;
3979 if (spec->ether_type == htons(ETH_P_IPV6) &&
3980 ((const u8 *)spec->loc_host)[0] != 0xff)
3981 return true;
3984 return false;
3987 static struct efx_filter_spec *
3988 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
3989 unsigned int filter_idx)
3991 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
3992 ~EFX_EF10_FILTER_FLAGS);
3995 static unsigned int
3996 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
3997 unsigned int filter_idx)
3999 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
4002 static void
4003 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
4004 unsigned int filter_idx,
4005 const struct efx_filter_spec *spec,
4006 unsigned int flags)
4008 table->entry[filter_idx].spec = (unsigned long)spec | flags;
4011 static void
4012 efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx,
4013 const struct efx_filter_spec *spec,
4014 efx_dword_t *inbuf)
4016 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
4017 u32 match_fields = 0, uc_match, mc_match;
4019 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4020 efx_ef10_filter_is_exclusive(spec) ?
4021 MC_CMD_FILTER_OP_IN_OP_INSERT :
4022 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
4024 /* Convert match flags and values. Unlike almost
4025 * everything else in MCDI, these fields are in
4026 * network byte order.
4028 #define COPY_VALUE(value, mcdi_field) \
4029 do { \
4030 match_fields |= \
4031 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
4032 mcdi_field ## _LBN; \
4033 BUILD_BUG_ON( \
4034 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
4035 sizeof(value)); \
4036 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
4037 &value, sizeof(value)); \
4038 } while (0)
4039 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
4040 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
4041 COPY_VALUE(spec->gen_field, mcdi_field); \
4043 /* Handle encap filters first. They will always be mismatch
4044 * (unknown UC or MC) filters
4046 if (encap_type) {
4047 /* ether_type and outer_ip_proto need to be variables
4048 * because COPY_VALUE wants to memcpy them
4050 __be16 ether_type =
4051 htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
4052 ETH_P_IPV6 : ETH_P_IP);
4053 u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
4054 u8 outer_ip_proto;
4056 switch (encap_type & EFX_ENCAP_TYPES_MASK) {
4057 case EFX_ENCAP_TYPE_VXLAN:
4058 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
4059 /* fallthrough */
4060 case EFX_ENCAP_TYPE_GENEVE:
4061 COPY_VALUE(ether_type, ETHER_TYPE);
4062 outer_ip_proto = IPPROTO_UDP;
4063 COPY_VALUE(outer_ip_proto, IP_PROTO);
4064 /* We always need to set the type field, even
4065 * though we're not matching on the TNI.
4067 MCDI_POPULATE_DWORD_1(inbuf,
4068 FILTER_OP_EXT_IN_VNI_OR_VSID,
4069 FILTER_OP_EXT_IN_VNI_TYPE,
4070 vni_type);
4071 break;
4072 case EFX_ENCAP_TYPE_NVGRE:
4073 COPY_VALUE(ether_type, ETHER_TYPE);
4074 outer_ip_proto = IPPROTO_GRE;
4075 COPY_VALUE(outer_ip_proto, IP_PROTO);
4076 break;
4077 default:
4078 WARN_ON(1);
4081 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
4082 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
4083 } else {
4084 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
4085 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
4088 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
4089 match_fields |=
4090 is_multicast_ether_addr(spec->loc_mac) ?
4091 1 << mc_match :
4092 1 << uc_match;
4093 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
4094 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
4095 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
4096 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
4097 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
4098 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
4099 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
4100 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
4101 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
4102 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
4103 #undef COPY_FIELD
4104 #undef COPY_VALUE
4105 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
4106 match_fields);
4109 static void efx_ef10_filter_push_prep(struct efx_nic *efx,
4110 const struct efx_filter_spec *spec,
4111 efx_dword_t *inbuf, u64 handle,
4112 bool replacing)
4114 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4115 u32 flags = spec->flags;
4117 memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
4119 /* Remove RSS flag if we don't have an RSS context. */
4120 if (flags & EFX_FILTER_FLAG_RX_RSS &&
4121 spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
4122 nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
4123 flags &= ~EFX_FILTER_FLAG_RX_RSS;
4125 if (replacing) {
4126 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4127 MC_CMD_FILTER_OP_IN_OP_REPLACE);
4128 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
4129 } else {
4130 efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf);
4133 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
4134 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
4135 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
4136 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
4137 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
4138 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
4139 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
4140 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
4141 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
4142 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
4143 0 : spec->dmaq_id);
4144 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
4145 (flags & EFX_FILTER_FLAG_RX_RSS) ?
4146 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
4147 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
4148 if (flags & EFX_FILTER_FLAG_RX_RSS)
4149 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
4150 spec->rss_context !=
4151 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
4152 spec->rss_context : nic_data->rx_rss_context);
4155 static int efx_ef10_filter_push(struct efx_nic *efx,
4156 const struct efx_filter_spec *spec,
4157 u64 *handle, bool replacing)
4159 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4160 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
4161 int rc;
4163 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
4164 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
4165 outbuf, sizeof(outbuf), NULL);
4166 if (rc == 0)
4167 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
4168 if (rc == -ENOSPC)
4169 rc = -EBUSY; /* to match efx_farch_filter_insert() */
4170 return rc;
4173 static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
4175 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
4176 unsigned int match_flags = spec->match_flags;
4177 unsigned int uc_match, mc_match;
4178 u32 mcdi_flags = 0;
4180 #define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
4181 unsigned int old_match_flags = match_flags; \
4182 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
4183 if (match_flags != old_match_flags) \
4184 mcdi_flags |= \
4185 (1 << ((encap) ? \
4186 MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
4187 mcdi_field ## _LBN : \
4188 MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
4189 mcdi_field ## _LBN)); \
4191 /* inner or outer based on encap type */
4192 MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
4193 MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
4194 MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
4195 MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
4196 MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
4197 MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
4198 MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
4199 MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
4200 /* always outer */
4201 MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
4202 MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
4203 #undef MAP_FILTER_TO_MCDI_FLAG
4205 /* special handling for encap type, and mismatch */
4206 if (encap_type) {
4207 match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
4208 mcdi_flags |=
4209 (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
4210 mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
4212 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
4213 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
4214 } else {
4215 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
4216 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
4219 if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
4220 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
4221 mcdi_flags |=
4222 is_multicast_ether_addr(spec->loc_mac) ?
4223 1 << mc_match :
4224 1 << uc_match;
4227 /* Did we map them all? */
4228 WARN_ON_ONCE(match_flags);
4230 return mcdi_flags;
4233 static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
4234 const struct efx_filter_spec *spec)
4236 u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
4237 unsigned int match_pri;
4239 for (match_pri = 0;
4240 match_pri < table->rx_match_count;
4241 match_pri++)
4242 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
4243 return match_pri;
4245 return -EPROTONOSUPPORT;
4248 static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4249 struct efx_filter_spec *spec,
4250 bool replace_equal)
4252 struct efx_ef10_filter_table *table = efx->filter_state;
4253 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
4254 struct efx_filter_spec *saved_spec;
4255 unsigned int match_pri, hash;
4256 unsigned int priv_flags;
4257 bool replacing = false;
4258 int ins_index = -1;
4259 DEFINE_WAIT(wait);
4260 bool is_mc_recip;
4261 s32 rc;
4263 /* For now, only support RX filters */
4264 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
4265 EFX_FILTER_FLAG_RX)
4266 return -EINVAL;
4268 rc = efx_ef10_filter_pri(table, spec);
4269 if (rc < 0)
4270 return rc;
4271 match_pri = rc;
4273 hash = efx_ef10_filter_hash(spec);
4274 is_mc_recip = efx_filter_is_mc_recipient(spec);
4275 if (is_mc_recip)
4276 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
4278 /* Find any existing filters with the same match tuple or
4279 * else a free slot to insert at. If any of them are busy,
4280 * we have to wait and retry.
4282 for (;;) {
4283 unsigned int depth = 1;
4284 unsigned int i;
4286 spin_lock_bh(&efx->filter_lock);
4288 for (;;) {
4289 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4290 saved_spec = efx_ef10_filter_entry_spec(table, i);
4292 if (!saved_spec) {
4293 if (ins_index < 0)
4294 ins_index = i;
4295 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
4296 if (table->entry[i].spec &
4297 EFX_EF10_FILTER_FLAG_BUSY)
4298 break;
4299 if (spec->priority < saved_spec->priority &&
4300 spec->priority != EFX_FILTER_PRI_AUTO) {
4301 rc = -EPERM;
4302 goto out_unlock;
4304 if (!is_mc_recip) {
4305 /* This is the only one */
4306 if (spec->priority ==
4307 saved_spec->priority &&
4308 !replace_equal) {
4309 rc = -EEXIST;
4310 goto out_unlock;
4312 ins_index = i;
4313 goto found;
4314 } else if (spec->priority >
4315 saved_spec->priority ||
4316 (spec->priority ==
4317 saved_spec->priority &&
4318 replace_equal)) {
4319 if (ins_index < 0)
4320 ins_index = i;
4321 else
4322 __set_bit(depth, mc_rem_map);
4326 /* Once we reach the maximum search depth, use
4327 * the first suitable slot or return -EBUSY if
4328 * there was none
4330 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
4331 if (ins_index < 0) {
4332 rc = -EBUSY;
4333 goto out_unlock;
4335 goto found;
4338 ++depth;
4341 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
4342 spin_unlock_bh(&efx->filter_lock);
4343 schedule();
4346 found:
4347 /* Create a software table entry if necessary, and mark it
4348 * busy. We might yet fail to insert, but any attempt to
4349 * insert a conflicting filter while we're waiting for the
4350 * firmware must find the busy entry.
4352 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
4353 if (saved_spec) {
4354 if (spec->priority == EFX_FILTER_PRI_AUTO &&
4355 saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
4356 /* Just make sure it won't be removed */
4357 if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
4358 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
4359 table->entry[ins_index].spec &=
4360 ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
4361 rc = ins_index;
4362 goto out_unlock;
4364 replacing = true;
4365 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
4366 } else {
4367 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
4368 if (!saved_spec) {
4369 rc = -ENOMEM;
4370 goto out_unlock;
4372 *saved_spec = *spec;
4373 priv_flags = 0;
4375 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
4376 priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
4378 /* Mark lower-priority multicast recipients busy prior to removal */
4379 if (is_mc_recip) {
4380 unsigned int depth, i;
4382 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
4383 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4384 if (test_bit(depth, mc_rem_map))
4385 table->entry[i].spec |=
4386 EFX_EF10_FILTER_FLAG_BUSY;
4390 spin_unlock_bh(&efx->filter_lock);
4392 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
4393 replacing);
4395 /* Finalise the software table entry */
4396 spin_lock_bh(&efx->filter_lock);
4397 if (rc == 0) {
4398 if (replacing) {
4399 /* Update the fields that may differ */
4400 if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
4401 saved_spec->flags |=
4402 EFX_FILTER_FLAG_RX_OVER_AUTO;
4403 saved_spec->priority = spec->priority;
4404 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
4405 saved_spec->flags |= spec->flags;
4406 saved_spec->rss_context = spec->rss_context;
4407 saved_spec->dmaq_id = spec->dmaq_id;
4409 } else if (!replacing) {
4410 kfree(saved_spec);
4411 saved_spec = NULL;
4413 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
4415 /* Remove and finalise entries for lower-priority multicast
4416 * recipients
4418 if (is_mc_recip) {
4419 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4420 unsigned int depth, i;
4422 memset(inbuf, 0, sizeof(inbuf));
4424 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
4425 if (!test_bit(depth, mc_rem_map))
4426 continue;
4428 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4429 saved_spec = efx_ef10_filter_entry_spec(table, i);
4430 priv_flags = efx_ef10_filter_entry_flags(table, i);
4432 if (rc == 0) {
4433 spin_unlock_bh(&efx->filter_lock);
4434 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4435 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4436 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4437 table->entry[i].handle);
4438 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
4439 inbuf, sizeof(inbuf),
4440 NULL, 0, NULL);
4441 spin_lock_bh(&efx->filter_lock);
4444 if (rc == 0) {
4445 kfree(saved_spec);
4446 saved_spec = NULL;
4447 priv_flags = 0;
4448 } else {
4449 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
4451 efx_ef10_filter_set_entry(table, i, saved_spec,
4452 priv_flags);
4456 /* If successful, return the inserted filter ID */
4457 if (rc == 0)
4458 rc = efx_ef10_make_filter_id(match_pri, ins_index);
4460 wake_up_all(&table->waitq);
4461 out_unlock:
4462 spin_unlock_bh(&efx->filter_lock);
4463 finish_wait(&table->waitq, &wait);
4464 return rc;
4467 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
4469 /* no need to do anything here on EF10 */
4472 /* Remove a filter.
4473 * If !by_index, remove by ID
4474 * If by_index, remove by index
4475 * Filter ID may come from userland and must be range-checked.
4477 static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
4478 unsigned int priority_mask,
4479 u32 filter_id, bool by_index)
4481 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
4482 struct efx_ef10_filter_table *table = efx->filter_state;
4483 MCDI_DECLARE_BUF(inbuf,
4484 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
4485 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
4486 struct efx_filter_spec *spec;
4487 DEFINE_WAIT(wait);
4488 int rc;
4490 /* Find the software table entry and mark it busy. Don't
4491 * remove it yet; any attempt to update while we're waiting
4492 * for the firmware must find the busy entry.
4494 for (;;) {
4495 spin_lock_bh(&efx->filter_lock);
4496 if (!(table->entry[filter_idx].spec &
4497 EFX_EF10_FILTER_FLAG_BUSY))
4498 break;
4499 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
4500 spin_unlock_bh(&efx->filter_lock);
4501 schedule();
4504 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4505 if (!spec ||
4506 (!by_index &&
4507 efx_ef10_filter_pri(table, spec) !=
4508 efx_ef10_filter_get_unsafe_pri(filter_id))) {
4509 rc = -ENOENT;
4510 goto out_unlock;
4513 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
4514 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
4515 /* Just remove flags */
4516 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
4517 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
4518 rc = 0;
4519 goto out_unlock;
4522 if (!(priority_mask & (1U << spec->priority))) {
4523 rc = -ENOENT;
4524 goto out_unlock;
4527 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
4528 spin_unlock_bh(&efx->filter_lock);
4530 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
4531 /* Reset to an automatic filter */
4533 struct efx_filter_spec new_spec = *spec;
4535 new_spec.priority = EFX_FILTER_PRI_AUTO;
4536 new_spec.flags = (EFX_FILTER_FLAG_RX |
4537 (efx_rss_enabled(efx) ?
4538 EFX_FILTER_FLAG_RX_RSS : 0));
4539 new_spec.dmaq_id = 0;
4540 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
4541 rc = efx_ef10_filter_push(efx, &new_spec,
4542 &table->entry[filter_idx].handle,
4543 true);
4545 spin_lock_bh(&efx->filter_lock);
4546 if (rc == 0)
4547 *spec = new_spec;
4548 } else {
4549 /* Really remove the filter */
4551 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4552 efx_ef10_filter_is_exclusive(spec) ?
4553 MC_CMD_FILTER_OP_IN_OP_REMOVE :
4554 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4555 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4556 table->entry[filter_idx].handle);
4557 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
4558 inbuf, sizeof(inbuf), NULL, 0, NULL);
4560 spin_lock_bh(&efx->filter_lock);
4561 if ((rc == 0) || (rc == -ENOENT)) {
4562 /* Filter removed OK or didn't actually exist */
4563 kfree(spec);
4564 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
4565 } else {
4566 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
4567 MC_CMD_FILTER_OP_EXT_IN_LEN,
4568 NULL, 0, rc);
4572 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
4573 wake_up_all(&table->waitq);
4574 out_unlock:
4575 spin_unlock_bh(&efx->filter_lock);
4576 finish_wait(&table->waitq, &wait);
4577 return rc;
4580 static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
4581 enum efx_filter_priority priority,
4582 u32 filter_id)
4584 return efx_ef10_filter_remove_internal(efx, 1U << priority,
4585 filter_id, false);
4588 static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
4589 enum efx_filter_priority priority,
4590 u32 filter_id)
4592 if (filter_id == EFX_EF10_FILTER_ID_INVALID)
4593 return;
4594 efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true);
4597 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
4598 enum efx_filter_priority priority,
4599 u32 filter_id, struct efx_filter_spec *spec)
4601 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
4602 struct efx_ef10_filter_table *table = efx->filter_state;
4603 const struct efx_filter_spec *saved_spec;
4604 int rc;
4606 spin_lock_bh(&efx->filter_lock);
4607 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
4608 if (saved_spec && saved_spec->priority == priority &&
4609 efx_ef10_filter_pri(table, saved_spec) ==
4610 efx_ef10_filter_get_unsafe_pri(filter_id)) {
4611 *spec = *saved_spec;
4612 rc = 0;
4613 } else {
4614 rc = -ENOENT;
4616 spin_unlock_bh(&efx->filter_lock);
4617 return rc;
4620 static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
4621 enum efx_filter_priority priority)
4623 unsigned int priority_mask;
4624 unsigned int i;
4625 int rc;
4627 priority_mask = (((1U << (priority + 1)) - 1) &
4628 ~(1U << EFX_FILTER_PRI_AUTO));
4630 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
4631 rc = efx_ef10_filter_remove_internal(efx, priority_mask,
4632 i, true);
4633 if (rc && rc != -ENOENT)
4634 return rc;
4637 return 0;
4640 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
4641 enum efx_filter_priority priority)
4643 struct efx_ef10_filter_table *table = efx->filter_state;
4644 unsigned int filter_idx;
4645 s32 count = 0;
4647 spin_lock_bh(&efx->filter_lock);
4648 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4649 if (table->entry[filter_idx].spec &&
4650 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
4651 priority)
4652 ++count;
4654 spin_unlock_bh(&efx->filter_lock);
4655 return count;
4658 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
4660 struct efx_ef10_filter_table *table = efx->filter_state;
4662 return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2;
4665 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
4666 enum efx_filter_priority priority,
4667 u32 *buf, u32 size)
4669 struct efx_ef10_filter_table *table = efx->filter_state;
4670 struct efx_filter_spec *spec;
4671 unsigned int filter_idx;
4672 s32 count = 0;
4674 spin_lock_bh(&efx->filter_lock);
4675 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4676 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4677 if (spec && spec->priority == priority) {
4678 if (count == size) {
4679 count = -EMSGSIZE;
4680 break;
4682 buf[count++] =
4683 efx_ef10_make_filter_id(
4684 efx_ef10_filter_pri(table, spec),
4685 filter_idx);
4688 spin_unlock_bh(&efx->filter_lock);
4689 return count;
4692 #ifdef CONFIG_RFS_ACCEL
4694 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
4696 static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
4697 struct efx_filter_spec *spec)
4699 struct efx_ef10_filter_table *table = efx->filter_state;
4700 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4701 struct efx_filter_spec *saved_spec;
4702 unsigned int hash, i, depth = 1;
4703 bool replacing = false;
4704 int ins_index = -1;
4705 u64 cookie;
4706 s32 rc;
4708 /* Must be an RX filter without RSS and not for a multicast
4709 * destination address (RFS only works for connected sockets).
4710 * These restrictions allow us to pass only a tiny amount of
4711 * data through to the completion function.
4713 EFX_WARN_ON_PARANOID(spec->flags !=
4714 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
4715 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
4716 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
4718 hash = efx_ef10_filter_hash(spec);
4720 spin_lock_bh(&efx->filter_lock);
4722 /* Find any existing filter with the same match tuple or else
4723 * a free slot to insert at. If an existing filter is busy,
4724 * we have to give up.
4726 for (;;) {
4727 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4728 saved_spec = efx_ef10_filter_entry_spec(table, i);
4730 if (!saved_spec) {
4731 if (ins_index < 0)
4732 ins_index = i;
4733 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
4734 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
4735 rc = -EBUSY;
4736 goto fail_unlock;
4738 if (spec->priority < saved_spec->priority) {
4739 rc = -EPERM;
4740 goto fail_unlock;
4742 ins_index = i;
4743 break;
4746 /* Once we reach the maximum search depth, use the
4747 * first suitable slot or return -EBUSY if there was
4748 * none
4750 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
4751 if (ins_index < 0) {
4752 rc = -EBUSY;
4753 goto fail_unlock;
4755 break;
4758 ++depth;
4761 /* Create a software table entry if necessary, and mark it
4762 * busy. We might yet fail to insert, but any attempt to
4763 * insert a conflicting filter while we're waiting for the
4764 * firmware must find the busy entry.
4766 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
4767 if (saved_spec) {
4768 replacing = true;
4769 } else {
4770 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
4771 if (!saved_spec) {
4772 rc = -ENOMEM;
4773 goto fail_unlock;
4775 *saved_spec = *spec;
4777 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
4778 EFX_EF10_FILTER_FLAG_BUSY);
4780 spin_unlock_bh(&efx->filter_lock);
4782 /* Pack up the variables needed on completion */
4783 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
4785 efx_ef10_filter_push_prep(efx, spec, inbuf,
4786 table->entry[ins_index].handle, replacing);
4787 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
4788 MC_CMD_FILTER_OP_OUT_LEN,
4789 efx_ef10_filter_rfs_insert_complete, cookie);
4791 return ins_index;
4793 fail_unlock:
4794 spin_unlock_bh(&efx->filter_lock);
4795 return rc;
4798 static void
4799 efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
4800 int rc, efx_dword_t *outbuf,
4801 size_t outlen_actual)
4803 struct efx_ef10_filter_table *table = efx->filter_state;
4804 unsigned int ins_index, dmaq_id;
4805 struct efx_filter_spec *spec;
4806 bool replacing;
4808 /* Unpack the cookie */
4809 replacing = cookie >> 31;
4810 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
4811 dmaq_id = cookie & 0xffff;
4813 spin_lock_bh(&efx->filter_lock);
4814 spec = efx_ef10_filter_entry_spec(table, ins_index);
4815 if (rc == 0) {
4816 table->entry[ins_index].handle =
4817 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
4818 if (replacing)
4819 spec->dmaq_id = dmaq_id;
4820 } else if (!replacing) {
4821 kfree(spec);
4822 spec = NULL;
4824 efx_ef10_filter_set_entry(table, ins_index, spec, 0);
4825 spin_unlock_bh(&efx->filter_lock);
4827 wake_up_all(&table->waitq);
4830 static void
4831 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
4832 unsigned long filter_idx,
4833 int rc, efx_dword_t *outbuf,
4834 size_t outlen_actual);
4836 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
4837 unsigned int filter_idx)
4839 struct efx_ef10_filter_table *table = efx->filter_state;
4840 struct efx_filter_spec *spec =
4841 efx_ef10_filter_entry_spec(table, filter_idx);
4842 MCDI_DECLARE_BUF(inbuf,
4843 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
4844 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
4846 if (!spec ||
4847 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
4848 spec->priority != EFX_FILTER_PRI_HINT ||
4849 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
4850 flow_id, filter_idx))
4851 return false;
4853 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4854 MC_CMD_FILTER_OP_IN_OP_REMOVE);
4855 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4856 table->entry[filter_idx].handle);
4857 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
4858 efx_ef10_filter_rfs_expire_complete, filter_idx))
4859 return false;
4861 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
4862 return true;
4865 static void
4866 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
4867 unsigned long filter_idx,
4868 int rc, efx_dword_t *outbuf,
4869 size_t outlen_actual)
4871 struct efx_ef10_filter_table *table = efx->filter_state;
4872 struct efx_filter_spec *spec =
4873 efx_ef10_filter_entry_spec(table, filter_idx);
4875 spin_lock_bh(&efx->filter_lock);
4876 if (rc == 0) {
4877 kfree(spec);
4878 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
4880 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
4881 wake_up_all(&table->waitq);
4882 spin_unlock_bh(&efx->filter_lock);
4885 #endif /* CONFIG_RFS_ACCEL */
4887 static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
4889 int match_flags = 0;
4891 #define MAP_FLAG(gen_flag, mcdi_field) do { \
4892 u32 old_mcdi_flags = mcdi_flags; \
4893 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
4894 mcdi_field ## _LBN); \
4895 if (mcdi_flags != old_mcdi_flags) \
4896 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
4897 } while (0)
4899 if (encap) {
4900 /* encap filters must specify encap type */
4901 match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
4902 /* and imply ethertype and ip proto */
4903 mcdi_flags &=
4904 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
4905 mcdi_flags &=
4906 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
4907 /* VLAN tags refer to the outer packet */
4908 MAP_FLAG(INNER_VID, INNER_VLAN);
4909 MAP_FLAG(OUTER_VID, OUTER_VLAN);
4910 /* everything else refers to the inner packet */
4911 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
4912 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
4913 MAP_FLAG(REM_HOST, IFRM_SRC_IP);
4914 MAP_FLAG(LOC_HOST, IFRM_DST_IP);
4915 MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
4916 MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
4917 MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
4918 MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
4919 MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
4920 MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
4921 } else {
4922 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
4923 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
4924 MAP_FLAG(REM_HOST, SRC_IP);
4925 MAP_FLAG(LOC_HOST, DST_IP);
4926 MAP_FLAG(REM_MAC, SRC_MAC);
4927 MAP_FLAG(REM_PORT, SRC_PORT);
4928 MAP_FLAG(LOC_MAC, DST_MAC);
4929 MAP_FLAG(LOC_PORT, DST_PORT);
4930 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
4931 MAP_FLAG(INNER_VID, INNER_VLAN);
4932 MAP_FLAG(OUTER_VID, OUTER_VLAN);
4933 MAP_FLAG(IP_PROTO, IP_PROTO);
4935 #undef MAP_FLAG
4937 /* Did we map them all? */
4938 if (mcdi_flags)
4939 return -EINVAL;
4941 return match_flags;
4944 static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
4946 struct efx_ef10_filter_table *table = efx->filter_state;
4947 struct efx_ef10_filter_vlan *vlan, *next_vlan;
4949 /* See comment in efx_ef10_filter_table_remove() */
4950 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4951 return;
4953 if (!table)
4954 return;
4956 list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
4957 efx_ef10_filter_del_vlan_internal(efx, vlan);
4960 static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
4961 bool encap,
4962 enum efx_filter_match_flags match_flags)
4964 unsigned int match_pri;
4965 int mf;
4967 for (match_pri = 0;
4968 match_pri < table->rx_match_count;
4969 match_pri++) {
4970 mf = efx_ef10_filter_match_flags_from_mcdi(encap,
4971 table->rx_match_mcdi_flags[match_pri]);
4972 if (mf == match_flags)
4973 return true;
4976 return false;
4979 static int
4980 efx_ef10_filter_table_probe_matches(struct efx_nic *efx,
4981 struct efx_ef10_filter_table *table,
4982 bool encap)
4984 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
4985 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
4986 unsigned int pd_match_pri, pd_match_count;
4987 size_t outlen;
4988 int rc;
4990 /* Find out which RX filter types are supported, and their priorities */
4991 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
4992 encap ?
4993 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
4994 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
4995 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
4996 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
4997 &outlen);
4998 if (rc)
4999 return rc;
5001 pd_match_count = MCDI_VAR_ARRAY_LEN(
5002 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
5004 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
5005 u32 mcdi_flags =
5006 MCDI_ARRAY_DWORD(
5007 outbuf,
5008 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
5009 pd_match_pri);
5010 rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags);
5011 if (rc < 0) {
5012 netif_dbg(efx, probe, efx->net_dev,
5013 "%s: fw flags %#x pri %u not supported in driver\n",
5014 __func__, mcdi_flags, pd_match_pri);
5015 } else {
5016 netif_dbg(efx, probe, efx->net_dev,
5017 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
5018 __func__, mcdi_flags, pd_match_pri,
5019 rc, table->rx_match_count);
5020 table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
5021 table->rx_match_count++;
5025 return 0;
5028 static int efx_ef10_filter_table_probe(struct efx_nic *efx)
5030 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5031 struct net_device *net_dev = efx->net_dev;
5032 struct efx_ef10_filter_table *table;
5033 struct efx_ef10_vlan *vlan;
5034 int rc;
5036 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5037 return -EINVAL;
5039 if (efx->filter_state) /* already probed */
5040 return 0;
5042 table = kzalloc(sizeof(*table), GFP_KERNEL);
5043 if (!table)
5044 return -ENOMEM;
5046 table->rx_match_count = 0;
5047 rc = efx_ef10_filter_table_probe_matches(efx, table, false);
5048 if (rc)
5049 goto fail;
5050 if (nic_data->datapath_caps &
5051 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
5052 rc = efx_ef10_filter_table_probe_matches(efx, table, true);
5053 if (rc)
5054 goto fail;
5055 if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5056 !(efx_ef10_filter_match_supported(table, false,
5057 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
5058 efx_ef10_filter_match_supported(table, false,
5059 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
5060 netif_info(efx, probe, net_dev,
5061 "VLAN filters are not supported in this firmware variant\n");
5062 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
5063 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
5064 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
5067 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
5068 if (!table->entry) {
5069 rc = -ENOMEM;
5070 goto fail;
5073 table->mc_promisc_last = false;
5074 table->vlan_filter =
5075 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
5076 INIT_LIST_HEAD(&table->vlan_list);
5078 efx->filter_state = table;
5079 init_waitqueue_head(&table->waitq);
5081 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
5082 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
5083 if (rc)
5084 goto fail_add_vlan;
5087 return 0;
5089 fail_add_vlan:
5090 efx_ef10_filter_cleanup_vlans(efx);
5091 efx->filter_state = NULL;
5092 fail:
5093 kfree(table);
5094 return rc;
5097 /* Caller must hold efx->filter_sem for read if race against
5098 * efx_ef10_filter_table_remove() is possible
5100 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
5102 struct efx_ef10_filter_table *table = efx->filter_state;
5103 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5104 unsigned int invalid_filters = 0, failed = 0;
5105 struct efx_ef10_filter_vlan *vlan;
5106 struct efx_filter_spec *spec;
5107 unsigned int filter_idx;
5108 u32 mcdi_flags;
5109 int match_pri;
5110 int rc, i;
5112 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
5114 if (!nic_data->must_restore_filters)
5115 return;
5117 if (!table)
5118 return;
5120 spin_lock_bh(&efx->filter_lock);
5122 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
5123 spec = efx_ef10_filter_entry_spec(table, filter_idx);
5124 if (!spec)
5125 continue;
5127 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
5128 match_pri = 0;
5129 while (match_pri < table->rx_match_count &&
5130 table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
5131 ++match_pri;
5132 if (match_pri >= table->rx_match_count) {
5133 invalid_filters++;
5134 goto not_restored;
5136 if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT &&
5137 spec->rss_context != nic_data->rx_rss_context)
5138 netif_warn(efx, drv, efx->net_dev,
5139 "Warning: unable to restore a filter with specific RSS context.\n");
5141 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
5142 spin_unlock_bh(&efx->filter_lock);
5144 rc = efx_ef10_filter_push(efx, spec,
5145 &table->entry[filter_idx].handle,
5146 false);
5147 if (rc)
5148 failed++;
5149 spin_lock_bh(&efx->filter_lock);
5151 if (rc) {
5152 not_restored:
5153 list_for_each_entry(vlan, &table->vlan_list, list)
5154 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
5155 if (vlan->default_filters[i] == filter_idx)
5156 vlan->default_filters[i] =
5157 EFX_EF10_FILTER_ID_INVALID;
5159 kfree(spec);
5160 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
5161 } else {
5162 table->entry[filter_idx].spec &=
5163 ~EFX_EF10_FILTER_FLAG_BUSY;
5167 spin_unlock_bh(&efx->filter_lock);
5169 /* This can happen validly if the MC's capabilities have changed, so
5170 * is not an error.
5172 if (invalid_filters)
5173 netif_dbg(efx, drv, efx->net_dev,
5174 "Did not restore %u filters that are now unsupported.\n",
5175 invalid_filters);
5177 if (failed)
5178 netif_err(efx, hw, efx->net_dev,
5179 "unable to restore %u filters\n", failed);
5180 else
5181 nic_data->must_restore_filters = false;
5184 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
5186 struct efx_ef10_filter_table *table = efx->filter_state;
5187 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
5188 struct efx_filter_spec *spec;
5189 unsigned int filter_idx;
5190 int rc;
5192 efx_ef10_filter_cleanup_vlans(efx);
5193 efx->filter_state = NULL;
5194 /* If we were called without locking, then it's not safe to free
5195 * the table as others might be using it. So we just WARN, leak
5196 * the memory, and potentially get an inconsistent filter table
5197 * state.
5198 * This should never actually happen.
5200 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5201 return;
5203 if (!table)
5204 return;
5206 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
5207 spec = efx_ef10_filter_entry_spec(table, filter_idx);
5208 if (!spec)
5209 continue;
5211 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
5212 efx_ef10_filter_is_exclusive(spec) ?
5213 MC_CMD_FILTER_OP_IN_OP_REMOVE :
5214 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
5215 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
5216 table->entry[filter_idx].handle);
5217 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
5218 sizeof(inbuf), NULL, 0, NULL);
5219 if (rc)
5220 netif_info(efx, drv, efx->net_dev,
5221 "%s: filter %04x remove failed\n",
5222 __func__, filter_idx);
5223 kfree(spec);
5226 vfree(table->entry);
5227 kfree(table);
5230 static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
5232 struct efx_ef10_filter_table *table = efx->filter_state;
5233 unsigned int filter_idx;
5235 if (*id != EFX_EF10_FILTER_ID_INVALID) {
5236 filter_idx = efx_ef10_filter_get_unsafe_id(*id);
5237 if (!table->entry[filter_idx].spec)
5238 netif_dbg(efx, drv, efx->net_dev,
5239 "marked null spec old %04x:%04x\n", *id,
5240 filter_idx);
5241 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
5242 *id = EFX_EF10_FILTER_ID_INVALID;
5246 /* Mark old per-VLAN filters that may need to be removed */
5247 static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
5248 struct efx_ef10_filter_vlan *vlan)
5250 struct efx_ef10_filter_table *table = efx->filter_state;
5251 unsigned int i;
5253 for (i = 0; i < table->dev_uc_count; i++)
5254 efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
5255 for (i = 0; i < table->dev_mc_count; i++)
5256 efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
5257 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5258 efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]);
5261 /* Mark old filters that may need to be removed.
5262 * Caller must hold efx->filter_sem for read if race against
5263 * efx_ef10_filter_table_remove() is possible
5265 static void efx_ef10_filter_mark_old(struct efx_nic *efx)
5267 struct efx_ef10_filter_table *table = efx->filter_state;
5268 struct efx_ef10_filter_vlan *vlan;
5270 spin_lock_bh(&efx->filter_lock);
5271 list_for_each_entry(vlan, &table->vlan_list, list)
5272 _efx_ef10_filter_vlan_mark_old(efx, vlan);
5273 spin_unlock_bh(&efx->filter_lock);
5276 static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
5278 struct efx_ef10_filter_table *table = efx->filter_state;
5279 struct net_device *net_dev = efx->net_dev;
5280 struct netdev_hw_addr *uc;
5281 unsigned int i;
5283 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
5284 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
5285 i = 1;
5286 netdev_for_each_uc_addr(uc, net_dev) {
5287 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
5288 table->uc_promisc = true;
5289 break;
5291 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
5292 i++;
5295 table->dev_uc_count = i;
5298 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
5300 struct efx_ef10_filter_table *table = efx->filter_state;
5301 struct net_device *net_dev = efx->net_dev;
5302 struct netdev_hw_addr *mc;
5303 unsigned int i;
5305 table->mc_overflow = false;
5306 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
5308 i = 0;
5309 netdev_for_each_mc_addr(mc, net_dev) {
5310 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
5311 table->mc_promisc = true;
5312 table->mc_overflow = true;
5313 break;
5315 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
5316 i++;
5319 table->dev_mc_count = i;
5322 static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
5323 struct efx_ef10_filter_vlan *vlan,
5324 bool multicast, bool rollback)
5326 struct efx_ef10_filter_table *table = efx->filter_state;
5327 struct efx_ef10_dev_addr *addr_list;
5328 enum efx_filter_flags filter_flags;
5329 struct efx_filter_spec spec;
5330 u8 baddr[ETH_ALEN];
5331 unsigned int i, j;
5332 int addr_count;
5333 u16 *ids;
5334 int rc;
5336 if (multicast) {
5337 addr_list = table->dev_mc_list;
5338 addr_count = table->dev_mc_count;
5339 ids = vlan->mc;
5340 } else {
5341 addr_list = table->dev_uc_list;
5342 addr_count = table->dev_uc_count;
5343 ids = vlan->uc;
5346 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
5348 /* Insert/renew filters */
5349 for (i = 0; i < addr_count; i++) {
5350 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
5351 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5352 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
5353 rc = efx_ef10_filter_insert(efx, &spec, true);
5354 if (rc < 0) {
5355 if (rollback) {
5356 netif_info(efx, drv, efx->net_dev,
5357 "efx_ef10_filter_insert failed rc=%d\n",
5358 rc);
5359 /* Fall back to promiscuous */
5360 for (j = 0; j < i; j++) {
5361 efx_ef10_filter_remove_unsafe(
5362 efx, EFX_FILTER_PRI_AUTO,
5363 ids[j]);
5364 ids[j] = EFX_EF10_FILTER_ID_INVALID;
5366 return rc;
5367 } else {
5368 /* keep invalid ID, and carry on */
5370 } else {
5371 ids[i] = efx_ef10_filter_get_unsafe_id(rc);
5375 if (multicast && rollback) {
5376 /* Also need an Ethernet broadcast filter */
5377 EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
5378 EFX_EF10_FILTER_ID_INVALID);
5379 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5380 eth_broadcast_addr(baddr);
5381 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
5382 rc = efx_ef10_filter_insert(efx, &spec, true);
5383 if (rc < 0) {
5384 netif_warn(efx, drv, efx->net_dev,
5385 "Broadcast filter insert failed rc=%d\n", rc);
5386 /* Fall back to promiscuous */
5387 for (j = 0; j < i; j++) {
5388 efx_ef10_filter_remove_unsafe(
5389 efx, EFX_FILTER_PRI_AUTO,
5390 ids[j]);
5391 ids[j] = EFX_EF10_FILTER_ID_INVALID;
5393 return rc;
5394 } else {
5395 vlan->default_filters[EFX_EF10_BCAST] =
5396 efx_ef10_filter_get_unsafe_id(rc);
5400 return 0;
5403 static int efx_ef10_filter_insert_def(struct efx_nic *efx,
5404 struct efx_ef10_filter_vlan *vlan,
5405 enum efx_encap_type encap_type,
5406 bool multicast, bool rollback)
5408 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5409 enum efx_filter_flags filter_flags;
5410 struct efx_filter_spec spec;
5411 u8 baddr[ETH_ALEN];
5412 int rc;
5413 u16 *id;
5415 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
5417 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5419 if (multicast)
5420 efx_filter_set_mc_def(&spec);
5421 else
5422 efx_filter_set_uc_def(&spec);
5424 if (encap_type) {
5425 if (nic_data->datapath_caps &
5426 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
5427 efx_filter_set_encap_type(&spec, encap_type);
5428 else
5429 /* don't insert encap filters on non-supporting
5430 * platforms. ID will be left as INVALID.
5432 return 0;
5435 if (vlan->vid != EFX_FILTER_VID_UNSPEC)
5436 efx_filter_set_eth_local(&spec, vlan->vid, NULL);
5438 rc = efx_ef10_filter_insert(efx, &spec, true);
5439 if (rc < 0) {
5440 const char *um = multicast ? "Multicast" : "Unicast";
5441 const char *encap_name = "";
5442 const char *encap_ipv = "";
5444 if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5445 EFX_ENCAP_TYPE_VXLAN)
5446 encap_name = "VXLAN ";
5447 else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5448 EFX_ENCAP_TYPE_NVGRE)
5449 encap_name = "NVGRE ";
5450 else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5451 EFX_ENCAP_TYPE_GENEVE)
5452 encap_name = "GENEVE ";
5453 if (encap_type & EFX_ENCAP_FLAG_IPV6)
5454 encap_ipv = "IPv6 ";
5455 else if (encap_type)
5456 encap_ipv = "IPv4 ";
5458 /* unprivileged functions can't insert mismatch filters
5459 * for encapsulated or unicast traffic, so downgrade
5460 * those warnings to debug.
5462 netif_cond_dbg(efx, drv, efx->net_dev,
5463 rc == -EPERM && (encap_type || !multicast), warn,
5464 "%s%s%s mismatch filter insert failed rc=%d\n",
5465 encap_name, encap_ipv, um, rc);
5466 } else if (multicast) {
5467 /* mapping from encap types to default filter IDs (multicast) */
5468 static enum efx_ef10_default_filters map[] = {
5469 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
5470 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
5471 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
5472 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
5473 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
5474 EFX_EF10_VXLAN6_MCDEF,
5475 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
5476 EFX_EF10_NVGRE6_MCDEF,
5477 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
5478 EFX_EF10_GENEVE6_MCDEF,
5481 /* quick bounds check (BCAST result impossible) */
5482 BUILD_BUG_ON(EFX_EF10_BCAST != 0);
5483 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
5484 WARN_ON(1);
5485 return -EINVAL;
5487 /* then follow map */
5488 id = &vlan->default_filters[map[encap_type]];
5490 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
5491 *id = efx_ef10_filter_get_unsafe_id(rc);
5492 if (!nic_data->workaround_26807 && !encap_type) {
5493 /* Also need an Ethernet broadcast filter */
5494 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
5495 filter_flags, 0);
5496 eth_broadcast_addr(baddr);
5497 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
5498 rc = efx_ef10_filter_insert(efx, &spec, true);
5499 if (rc < 0) {
5500 netif_warn(efx, drv, efx->net_dev,
5501 "Broadcast filter insert failed rc=%d\n",
5502 rc);
5503 if (rollback) {
5504 /* Roll back the mc_def filter */
5505 efx_ef10_filter_remove_unsafe(
5506 efx, EFX_FILTER_PRI_AUTO,
5507 *id);
5508 *id = EFX_EF10_FILTER_ID_INVALID;
5509 return rc;
5511 } else {
5512 EFX_WARN_ON_PARANOID(
5513 vlan->default_filters[EFX_EF10_BCAST] !=
5514 EFX_EF10_FILTER_ID_INVALID);
5515 vlan->default_filters[EFX_EF10_BCAST] =
5516 efx_ef10_filter_get_unsafe_id(rc);
5519 rc = 0;
5520 } else {
5521 /* mapping from encap types to default filter IDs (unicast) */
5522 static enum efx_ef10_default_filters map[] = {
5523 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
5524 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
5525 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
5526 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
5527 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
5528 EFX_EF10_VXLAN6_UCDEF,
5529 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
5530 EFX_EF10_NVGRE6_UCDEF,
5531 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
5532 EFX_EF10_GENEVE6_UCDEF,
5535 /* quick bounds check (BCAST result impossible) */
5536 BUILD_BUG_ON(EFX_EF10_BCAST != 0);
5537 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
5538 WARN_ON(1);
5539 return -EINVAL;
5541 /* then follow map */
5542 id = &vlan->default_filters[map[encap_type]];
5543 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
5544 *id = rc;
5545 rc = 0;
5547 return rc;
5550 /* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD
5551 * flag or removes these filters, we don't need to hold the filter_lock while
5552 * scanning for these filters.
5554 static void efx_ef10_filter_remove_old(struct efx_nic *efx)
5556 struct efx_ef10_filter_table *table = efx->filter_state;
5557 int remove_failed = 0;
5558 int remove_noent = 0;
5559 int rc;
5560 int i;
5562 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
5563 if (READ_ONCE(table->entry[i].spec) &
5564 EFX_EF10_FILTER_FLAG_AUTO_OLD) {
5565 rc = efx_ef10_filter_remove_internal(efx,
5566 1U << EFX_FILTER_PRI_AUTO, i, true);
5567 if (rc == -ENOENT)
5568 remove_noent++;
5569 else if (rc)
5570 remove_failed++;
5574 if (remove_failed)
5575 netif_info(efx, drv, efx->net_dev,
5576 "%s: failed to remove %d filters\n",
5577 __func__, remove_failed);
5578 if (remove_noent)
5579 netif_info(efx, drv, efx->net_dev,
5580 "%s: failed to remove %d non-existent filters\n",
5581 __func__, remove_noent);
5584 static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
5586 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5587 u8 mac_old[ETH_ALEN];
5588 int rc, rc2;
5590 /* Only reconfigure a PF-created vport */
5591 if (is_zero_ether_addr(nic_data->vport_mac))
5592 return 0;
5594 efx_device_detach_sync(efx);
5595 efx_net_stop(efx->net_dev);
5596 down_write(&efx->filter_sem);
5597 efx_ef10_filter_table_remove(efx);
5598 up_write(&efx->filter_sem);
5600 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
5601 if (rc)
5602 goto restore_filters;
5604 ether_addr_copy(mac_old, nic_data->vport_mac);
5605 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
5606 nic_data->vport_mac);
5607 if (rc)
5608 goto restore_vadaptor;
5610 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
5611 efx->net_dev->dev_addr);
5612 if (!rc) {
5613 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
5614 } else {
5615 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
5616 if (rc2) {
5617 /* Failed to add original MAC, so clear vport_mac */
5618 eth_zero_addr(nic_data->vport_mac);
5619 goto reset_nic;
5623 restore_vadaptor:
5624 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
5625 if (rc2)
5626 goto reset_nic;
5627 restore_filters:
5628 down_write(&efx->filter_sem);
5629 rc2 = efx_ef10_filter_table_probe(efx);
5630 up_write(&efx->filter_sem);
5631 if (rc2)
5632 goto reset_nic;
5634 rc2 = efx_net_open(efx->net_dev);
5635 if (rc2)
5636 goto reset_nic;
5638 efx_device_attach_if_not_resetting(efx);
5640 return rc;
5642 reset_nic:
5643 netif_err(efx, drv, efx->net_dev,
5644 "Failed to restore when changing MAC address - scheduling reset\n");
5645 efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
5647 return rc ? rc : rc2;
5650 /* Caller must hold efx->filter_sem for read if race against
5651 * efx_ef10_filter_table_remove() is possible
5653 static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
5654 struct efx_ef10_filter_vlan *vlan)
5656 struct efx_ef10_filter_table *table = efx->filter_state;
5657 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5659 /* Do not install unspecified VID if VLAN filtering is enabled.
5660 * Do not install all specified VIDs if VLAN filtering is disabled.
5662 if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
5663 return;
5665 /* Insert/renew unicast filters */
5666 if (table->uc_promisc) {
5667 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
5668 false, false);
5669 efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
5670 } else {
5671 /* If any of the filters failed to insert, fall back to
5672 * promiscuous mode - add in the uc_def filter. But keep
5673 * our individual unicast filters.
5675 if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
5676 efx_ef10_filter_insert_def(efx, vlan,
5677 EFX_ENCAP_TYPE_NONE,
5678 false, false);
5680 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
5681 false, false);
5682 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
5683 EFX_ENCAP_FLAG_IPV6,
5684 false, false);
5685 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
5686 false, false);
5687 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
5688 EFX_ENCAP_FLAG_IPV6,
5689 false, false);
5690 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
5691 false, false);
5692 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
5693 EFX_ENCAP_FLAG_IPV6,
5694 false, false);
5696 /* Insert/renew multicast filters */
5697 /* If changing promiscuous state with cascaded multicast filters, remove
5698 * old filters first, so that packets are dropped rather than duplicated
5700 if (nic_data->workaround_26807 &&
5701 table->mc_promisc_last != table->mc_promisc)
5702 efx_ef10_filter_remove_old(efx);
5703 if (table->mc_promisc) {
5704 if (nic_data->workaround_26807) {
5705 /* If we failed to insert promiscuous filters, rollback
5706 * and fall back to individual multicast filters
5708 if (efx_ef10_filter_insert_def(efx, vlan,
5709 EFX_ENCAP_TYPE_NONE,
5710 true, true)) {
5711 /* Changing promisc state, so remove old filters */
5712 efx_ef10_filter_remove_old(efx);
5713 efx_ef10_filter_insert_addr_list(efx, vlan,
5714 true, false);
5716 } else {
5717 /* If we failed to insert promiscuous filters, don't
5718 * rollback. Regardless, also insert the mc_list,
5719 * unless it's incomplete due to overflow
5721 efx_ef10_filter_insert_def(efx, vlan,
5722 EFX_ENCAP_TYPE_NONE,
5723 true, false);
5724 if (!table->mc_overflow)
5725 efx_ef10_filter_insert_addr_list(efx, vlan,
5726 true, false);
5728 } else {
5729 /* If any filters failed to insert, rollback and fall back to
5730 * promiscuous mode - mc_def filter and maybe broadcast. If
5731 * that fails, roll back again and insert as many of our
5732 * individual multicast filters as we can.
5734 if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
5735 /* Changing promisc state, so remove old filters */
5736 if (nic_data->workaround_26807)
5737 efx_ef10_filter_remove_old(efx);
5738 if (efx_ef10_filter_insert_def(efx, vlan,
5739 EFX_ENCAP_TYPE_NONE,
5740 true, true))
5741 efx_ef10_filter_insert_addr_list(efx, vlan,
5742 true, false);
5745 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
5746 true, false);
5747 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
5748 EFX_ENCAP_FLAG_IPV6,
5749 true, false);
5750 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
5751 true, false);
5752 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
5753 EFX_ENCAP_FLAG_IPV6,
5754 true, false);
5755 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
5756 true, false);
5757 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
5758 EFX_ENCAP_FLAG_IPV6,
5759 true, false);
5762 /* Caller must hold efx->filter_sem for read if race against
5763 * efx_ef10_filter_table_remove() is possible
5765 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
5767 struct efx_ef10_filter_table *table = efx->filter_state;
5768 struct net_device *net_dev = efx->net_dev;
5769 struct efx_ef10_filter_vlan *vlan;
5770 bool vlan_filter;
5772 if (!efx_dev_registered(efx))
5773 return;
5775 if (!table)
5776 return;
5778 efx_ef10_filter_mark_old(efx);
5780 /* Copy/convert the address lists; add the primary station
5781 * address and broadcast address
5783 netif_addr_lock_bh(net_dev);
5784 efx_ef10_filter_uc_addr_list(efx);
5785 efx_ef10_filter_mc_addr_list(efx);
5786 netif_addr_unlock_bh(net_dev);
5788 /* If VLAN filtering changes, all old filters are finally removed.
5789 * Do it in advance to avoid conflicts for unicast untagged and
5790 * VLAN 0 tagged filters.
5792 vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
5793 if (table->vlan_filter != vlan_filter) {
5794 table->vlan_filter = vlan_filter;
5795 efx_ef10_filter_remove_old(efx);
5798 list_for_each_entry(vlan, &table->vlan_list, list)
5799 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
5801 efx_ef10_filter_remove_old(efx);
5802 table->mc_promisc_last = table->mc_promisc;
5805 static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
5807 struct efx_ef10_filter_table *table = efx->filter_state;
5808 struct efx_ef10_filter_vlan *vlan;
5810 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
5812 list_for_each_entry(vlan, &table->vlan_list, list) {
5813 if (vlan->vid == vid)
5814 return vlan;
5817 return NULL;
5820 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
5822 struct efx_ef10_filter_table *table = efx->filter_state;
5823 struct efx_ef10_filter_vlan *vlan;
5824 unsigned int i;
5826 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5827 return -EINVAL;
5829 vlan = efx_ef10_filter_find_vlan(efx, vid);
5830 if (WARN_ON(vlan)) {
5831 netif_err(efx, drv, efx->net_dev,
5832 "VLAN %u already added\n", vid);
5833 return -EALREADY;
5836 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
5837 if (!vlan)
5838 return -ENOMEM;
5840 vlan->vid = vid;
5842 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
5843 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
5844 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
5845 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
5846 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5847 vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
5849 list_add_tail(&vlan->list, &table->vlan_list);
5851 if (efx_dev_registered(efx))
5852 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
5854 return 0;
5857 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
5858 struct efx_ef10_filter_vlan *vlan)
5860 unsigned int i;
5862 /* See comment in efx_ef10_filter_table_remove() */
5863 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5864 return;
5866 list_del(&vlan->list);
5868 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
5869 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5870 vlan->uc[i]);
5871 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
5872 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5873 vlan->mc[i]);
5874 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5875 if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
5876 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5877 vlan->default_filters[i]);
5879 kfree(vlan);
5882 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
5884 struct efx_ef10_filter_vlan *vlan;
5886 /* See comment in efx_ef10_filter_table_remove() */
5887 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5888 return;
5890 vlan = efx_ef10_filter_find_vlan(efx, vid);
5891 if (!vlan) {
5892 netif_err(efx, drv, efx->net_dev,
5893 "VLAN %u not found in filter state\n", vid);
5894 return;
5897 efx_ef10_filter_del_vlan_internal(efx, vlan);
5900 static int efx_ef10_set_mac_address(struct efx_nic *efx)
5902 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
5903 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5904 bool was_enabled = efx->port_enabled;
5905 int rc;
5907 efx_device_detach_sync(efx);
5908 efx_net_stop(efx->net_dev);
5910 mutex_lock(&efx->mac_lock);
5911 down_write(&efx->filter_sem);
5912 efx_ef10_filter_table_remove(efx);
5914 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
5915 efx->net_dev->dev_addr);
5916 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
5917 nic_data->vport_id);
5918 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
5919 sizeof(inbuf), NULL, 0, NULL);
5921 efx_ef10_filter_table_probe(efx);
5922 up_write(&efx->filter_sem);
5923 mutex_unlock(&efx->mac_lock);
5925 if (was_enabled)
5926 efx_net_open(efx->net_dev);
5927 efx_device_attach_if_not_resetting(efx);
5929 #ifdef CONFIG_SFC_SRIOV
5930 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
5931 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
5933 if (rc == -EPERM) {
5934 struct efx_nic *efx_pf;
5936 /* Switch to PF and change MAC address on vport */
5937 efx_pf = pci_get_drvdata(pci_dev_pf);
5939 rc = efx_ef10_sriov_set_vf_mac(efx_pf,
5940 nic_data->vf_index,
5941 efx->net_dev->dev_addr);
5942 } else if (!rc) {
5943 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
5944 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
5945 unsigned int i;
5947 /* MAC address successfully changed by VF (with MAC
5948 * spoofing) so update the parent PF if possible.
5950 for (i = 0; i < efx_pf->vf_count; ++i) {
5951 struct ef10_vf *vf = nic_data->vf + i;
5953 if (vf->efx == efx) {
5954 ether_addr_copy(vf->mac,
5955 efx->net_dev->dev_addr);
5956 return 0;
5960 } else
5961 #endif
5962 if (rc == -EPERM) {
5963 netif_err(efx, drv, efx->net_dev,
5964 "Cannot change MAC address; use sfboot to enable"
5965 " mac-spoofing on this interface\n");
5966 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
5967 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
5968 * fall-back to the method of changing the MAC address on the
5969 * vport. This only applies to PFs because such versions of
5970 * MCFW do not support VFs.
5972 rc = efx_ef10_vport_set_mac_address(efx);
5973 } else if (rc) {
5974 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
5975 sizeof(inbuf), NULL, 0, rc);
5978 return rc;
5981 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
5983 efx_ef10_filter_sync_rx_mode(efx);
5985 return efx_mcdi_set_mac(efx);
5988 static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
5990 efx_ef10_filter_sync_rx_mode(efx);
5992 return 0;
5995 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
5997 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
5999 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
6000 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
6001 NULL, 0, NULL);
6004 /* MC BISTs follow a different poll mechanism to phy BISTs.
6005 * The BIST is done in the poll handler on the MC, and the MCDI command
6006 * will block until the BIST is done.
6008 static int efx_ef10_poll_bist(struct efx_nic *efx)
6010 int rc;
6011 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
6012 size_t outlen;
6013 u32 result;
6015 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
6016 outbuf, sizeof(outbuf), &outlen);
6017 if (rc != 0)
6018 return rc;
6020 if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
6021 return -EIO;
6023 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
6024 switch (result) {
6025 case MC_CMD_POLL_BIST_PASSED:
6026 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
6027 return 0;
6028 case MC_CMD_POLL_BIST_TIMEOUT:
6029 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
6030 return -EIO;
6031 case MC_CMD_POLL_BIST_FAILED:
6032 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
6033 return -EIO;
6034 default:
6035 netif_err(efx, hw, efx->net_dev,
6036 "BIST returned unknown result %u", result);
6037 return -EIO;
6041 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
6043 int rc;
6045 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
6047 rc = efx_ef10_start_bist(efx, bist_type);
6048 if (rc != 0)
6049 return rc;
6051 return efx_ef10_poll_bist(efx);
6054 static int
6055 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
6057 int rc, rc2;
6059 efx_reset_down(efx, RESET_TYPE_WORLD);
6061 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
6062 NULL, 0, NULL, 0, NULL);
6063 if (rc != 0)
6064 goto out;
6066 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
6067 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
6069 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
6071 out:
6072 if (rc == -EPERM)
6073 rc = 0;
6074 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
6075 return rc ? rc : rc2;
6078 #ifdef CONFIG_SFC_MTD
6080 struct efx_ef10_nvram_type_info {
6081 u16 type, type_mask;
6082 u8 port;
6083 const char *name;
6086 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
6087 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
6088 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
6089 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
6090 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
6091 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
6092 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
6093 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
6094 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
6095 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
6096 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
6097 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
6100 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6101 struct efx_mcdi_mtd_partition *part,
6102 unsigned int type)
6104 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
6105 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
6106 const struct efx_ef10_nvram_type_info *info;
6107 size_t size, erase_size, outlen;
6108 bool protected;
6109 int rc;
6111 for (info = efx_ef10_nvram_types; ; info++) {
6112 if (info ==
6113 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
6114 return -ENODEV;
6115 if ((type & ~info->type_mask) == info->type)
6116 break;
6118 if (info->port != efx_port_num(efx))
6119 return -ENODEV;
6121 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
6122 if (rc)
6123 return rc;
6124 if (protected)
6125 return -ENODEV; /* hide it */
6127 part->nvram_type = type;
6129 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
6130 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
6131 outbuf, sizeof(outbuf), &outlen);
6132 if (rc)
6133 return rc;
6134 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
6135 return -EIO;
6136 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
6137 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
6138 part->fw_subtype = MCDI_DWORD(outbuf,
6139 NVRAM_METADATA_OUT_SUBTYPE);
6141 part->common.dev_type_name = "EF10 NVRAM manager";
6142 part->common.type_name = info->name;
6144 part->common.mtd.type = MTD_NORFLASH;
6145 part->common.mtd.flags = MTD_CAP_NORFLASH;
6146 part->common.mtd.size = size;
6147 part->common.mtd.erasesize = erase_size;
6149 return 0;
6152 static int efx_ef10_mtd_probe(struct efx_nic *efx)
6154 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
6155 struct efx_mcdi_mtd_partition *parts;
6156 size_t outlen, n_parts_total, i, n_parts;
6157 unsigned int type;
6158 int rc;
6160 ASSERT_RTNL();
6162 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
6163 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
6164 outbuf, sizeof(outbuf), &outlen);
6165 if (rc)
6166 return rc;
6167 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
6168 return -EIO;
6170 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
6171 if (n_parts_total >
6172 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
6173 return -EIO;
6175 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
6176 if (!parts)
6177 return -ENOMEM;
6179 n_parts = 0;
6180 for (i = 0; i < n_parts_total; i++) {
6181 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
6183 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
6184 if (rc == 0)
6185 n_parts++;
6186 else if (rc != -ENODEV)
6187 goto fail;
6190 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
6191 fail:
6192 if (rc)
6193 kfree(parts);
6194 return rc;
6197 #endif /* CONFIG_SFC_MTD */
6199 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
6201 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
6204 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
6205 u32 host_time) {}
6207 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
6208 bool temp)
6210 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
6211 int rc;
6213 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
6214 channel->sync_events_state == SYNC_EVENTS_VALID ||
6215 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
6216 return 0;
6217 channel->sync_events_state = SYNC_EVENTS_REQUESTED;
6219 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
6220 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
6221 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
6222 channel->channel);
6224 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
6225 inbuf, sizeof(inbuf), NULL, 0, NULL);
6227 if (rc != 0)
6228 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
6229 SYNC_EVENTS_DISABLED;
6231 return rc;
6234 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
6235 bool temp)
6237 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
6238 int rc;
6240 if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
6241 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
6242 return 0;
6243 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
6244 channel->sync_events_state = SYNC_EVENTS_DISABLED;
6245 return 0;
6247 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
6248 SYNC_EVENTS_DISABLED;
6250 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
6251 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
6252 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
6253 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
6254 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
6255 channel->channel);
6257 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
6258 inbuf, sizeof(inbuf), NULL, 0, NULL);
6260 return rc;
6263 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
6264 bool temp)
6266 int (*set)(struct efx_channel *channel, bool temp);
6267 struct efx_channel *channel;
6269 set = en ?
6270 efx_ef10_rx_enable_timestamping :
6271 efx_ef10_rx_disable_timestamping;
6273 channel = efx_ptp_channel(efx);
6274 if (channel) {
6275 int rc = set(channel, temp);
6276 if (en && rc != 0) {
6277 efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
6278 return rc;
6282 return 0;
6285 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
6286 struct hwtstamp_config *init)
6288 return -EOPNOTSUPP;
6291 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
6292 struct hwtstamp_config *init)
6294 int rc;
6296 switch (init->rx_filter) {
6297 case HWTSTAMP_FILTER_NONE:
6298 efx_ef10_ptp_set_ts_sync_events(efx, false, false);
6299 /* if TX timestamping is still requested then leave PTP on */
6300 return efx_ptp_change_mode(efx,
6301 init->tx_type != HWTSTAMP_TX_OFF, 0);
6302 case HWTSTAMP_FILTER_ALL:
6303 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6304 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
6305 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
6306 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6307 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
6308 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
6309 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6310 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6311 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6312 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6313 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6314 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
6315 case HWTSTAMP_FILTER_NTP_ALL:
6316 init->rx_filter = HWTSTAMP_FILTER_ALL;
6317 rc = efx_ptp_change_mode(efx, true, 0);
6318 if (!rc)
6319 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
6320 if (rc)
6321 efx_ptp_change_mode(efx, false, 0);
6322 return rc;
6323 default:
6324 return -ERANGE;
6328 static int efx_ef10_get_phys_port_id(struct efx_nic *efx,
6329 struct netdev_phys_item_id *ppid)
6331 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6333 if (!is_valid_ether_addr(nic_data->port_id))
6334 return -EOPNOTSUPP;
6336 ppid->id_len = ETH_ALEN;
6337 memcpy(ppid->id, nic_data->port_id, ppid->id_len);
6339 return 0;
6342 static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
6344 if (proto != htons(ETH_P_8021Q))
6345 return -EINVAL;
6347 return efx_ef10_add_vlan(efx, vid);
6350 static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
6352 if (proto != htons(ETH_P_8021Q))
6353 return -EINVAL;
6355 return efx_ef10_del_vlan(efx, vid);
6358 /* We rely on the MCDI wiping out our TX rings if it made any changes to the
6359 * ports table, ensuring that any TSO descriptors that were made on a now-
6360 * removed tunnel port will be blown away and won't break things when we try
6361 * to transmit them using the new ports table.
6363 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading)
6365 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6366 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX);
6367 MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN);
6368 bool will_reset = false;
6369 size_t num_entries = 0;
6370 size_t inlen, outlen;
6371 size_t i;
6372 int rc;
6373 efx_dword_t flags_and_num_entries;
6375 WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock));
6377 nic_data->udp_tunnels_dirty = false;
6379 if (!(nic_data->datapath_caps &
6380 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) {
6381 efx_device_attach_if_not_resetting(efx);
6382 return 0;
6385 BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) >
6386 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
6388 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
6389 if (nic_data->udp_tunnels[i].count &&
6390 nic_data->udp_tunnels[i].port) {
6391 efx_dword_t entry;
6393 EFX_POPULATE_DWORD_2(entry,
6394 TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
6395 ntohs(nic_data->udp_tunnels[i].port),
6396 TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
6397 nic_data->udp_tunnels[i].type);
6398 *_MCDI_ARRAY_DWORD(inbuf,
6399 SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES,
6400 num_entries++) = entry;
6404 BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST -
6405 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 !=
6406 EFX_WORD_1_LBN);
6407 BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 !=
6408 EFX_WORD_1_WIDTH);
6409 EFX_POPULATE_DWORD_2(flags_and_num_entries,
6410 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
6411 !!unloading,
6412 EFX_WORD_1, num_entries);
6413 *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) =
6414 flags_and_num_entries;
6416 inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries);
6418 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS,
6419 inbuf, inlen, outbuf, sizeof(outbuf), &outlen);
6420 if (rc == -EIO) {
6421 /* Most likely the MC rebooted due to another function also
6422 * setting its tunnel port list. Mark the tunnel port list as
6423 * dirty, so it will be pushed upon coming up from the reboot.
6425 nic_data->udp_tunnels_dirty = true;
6426 return 0;
6429 if (rc) {
6430 /* expected not available on unprivileged functions */
6431 if (rc != -EPERM)
6432 netif_warn(efx, drv, efx->net_dev,
6433 "Unable to set UDP tunnel ports; rc=%d.\n", rc);
6434 } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) &
6435 (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) {
6436 netif_info(efx, drv, efx->net_dev,
6437 "Rebooting MC due to UDP tunnel port list change\n");
6438 will_reset = true;
6439 if (unloading)
6440 /* Delay for the MC reset to complete. This will make
6441 * unloading other functions a bit smoother. This is a
6442 * race, but the other unload will work whichever way
6443 * it goes, this just avoids an unnecessary error
6444 * message.
6446 msleep(100);
6448 if (!will_reset && !unloading) {
6449 /* The caller will have detached, relying on the MC reset to
6450 * trigger a re-attach. Since there won't be an MC reset, we
6451 * have to do the attach ourselves.
6453 efx_device_attach_if_not_resetting(efx);
6456 return rc;
6459 static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx)
6461 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6462 int rc = 0;
6464 mutex_lock(&nic_data->udp_tunnels_lock);
6465 if (nic_data->udp_tunnels_dirty) {
6466 /* Make sure all TX are stopped while we modify the table, else
6467 * we might race against an efx_features_check().
6469 efx_device_detach_sync(efx);
6470 rc = efx_ef10_set_udp_tnl_ports(efx, false);
6472 mutex_unlock(&nic_data->udp_tunnels_lock);
6473 return rc;
6476 static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx,
6477 __be16 port)
6479 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6480 size_t i;
6482 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
6483 if (!nic_data->udp_tunnels[i].count)
6484 continue;
6485 if (nic_data->udp_tunnels[i].port == port)
6486 return &nic_data->udp_tunnels[i];
6488 return NULL;
6491 static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx,
6492 struct efx_udp_tunnel tnl)
6494 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6495 struct efx_udp_tunnel *match;
6496 char typebuf[8];
6497 size_t i;
6498 int rc;
6500 if (!(nic_data->datapath_caps &
6501 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6502 return 0;
6504 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
6505 netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n",
6506 typebuf, ntohs(tnl.port));
6508 mutex_lock(&nic_data->udp_tunnels_lock);
6509 /* Make sure all TX are stopped while we add to the table, else we
6510 * might race against an efx_features_check().
6512 efx_device_detach_sync(efx);
6514 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
6515 if (match != NULL) {
6516 if (match->type == tnl.type) {
6517 netif_dbg(efx, drv, efx->net_dev,
6518 "Referencing existing tunnel entry\n");
6519 match->count++;
6520 /* No need to cause an MCDI update */
6521 rc = 0;
6522 goto unlock_out;
6524 efx_get_udp_tunnel_type_name(match->type,
6525 typebuf, sizeof(typebuf));
6526 netif_dbg(efx, drv, efx->net_dev,
6527 "UDP port %d is already in use by %s\n",
6528 ntohs(tnl.port), typebuf);
6529 rc = -EEXIST;
6530 goto unlock_out;
6533 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
6534 if (!nic_data->udp_tunnels[i].count) {
6535 nic_data->udp_tunnels[i] = tnl;
6536 nic_data->udp_tunnels[i].count = 1;
6537 rc = efx_ef10_set_udp_tnl_ports(efx, false);
6538 goto unlock_out;
6541 netif_dbg(efx, drv, efx->net_dev,
6542 "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n",
6543 typebuf, ntohs(tnl.port));
6545 rc = -ENOMEM;
6547 unlock_out:
6548 mutex_unlock(&nic_data->udp_tunnels_lock);
6549 return rc;
6552 /* Called under the TX lock with the TX queue running, hence no-one can be
6553 * in the middle of updating the UDP tunnels table. However, they could
6554 * have tried and failed the MCDI, in which case they'll have set the dirty
6555 * flag before dropping their locks.
6557 static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
6559 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6561 if (!(nic_data->datapath_caps &
6562 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6563 return false;
6565 if (nic_data->udp_tunnels_dirty)
6566 /* SW table may not match HW state, so just assume we can't
6567 * use any UDP tunnel offloads.
6569 return false;
6571 return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL;
6574 static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx,
6575 struct efx_udp_tunnel tnl)
6577 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6578 struct efx_udp_tunnel *match;
6579 char typebuf[8];
6580 int rc;
6582 if (!(nic_data->datapath_caps &
6583 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6584 return 0;
6586 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
6587 netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n",
6588 typebuf, ntohs(tnl.port));
6590 mutex_lock(&nic_data->udp_tunnels_lock);
6591 /* Make sure all TX are stopped while we remove from the table, else we
6592 * might race against an efx_features_check().
6594 efx_device_detach_sync(efx);
6596 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
6597 if (match != NULL) {
6598 if (match->type == tnl.type) {
6599 if (--match->count) {
6600 /* Port is still in use, so nothing to do */
6601 netif_dbg(efx, drv, efx->net_dev,
6602 "UDP tunnel port %d remains active\n",
6603 ntohs(tnl.port));
6604 rc = 0;
6605 goto out_unlock;
6607 rc = efx_ef10_set_udp_tnl_ports(efx, false);
6608 goto out_unlock;
6610 efx_get_udp_tunnel_type_name(match->type,
6611 typebuf, sizeof(typebuf));
6612 netif_warn(efx, drv, efx->net_dev,
6613 "UDP port %d is actually in use by %s, not removing\n",
6614 ntohs(tnl.port), typebuf);
6616 rc = -ENOENT;
6618 out_unlock:
6619 mutex_unlock(&nic_data->udp_tunnels_lock);
6620 return rc;
6623 #define EF10_OFFLOAD_FEATURES \
6624 (NETIF_F_IP_CSUM | \
6625 NETIF_F_HW_VLAN_CTAG_FILTER | \
6626 NETIF_F_IPV6_CSUM | \
6627 NETIF_F_RXHASH | \
6628 NETIF_F_NTUPLE)
6630 const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
6631 .is_vf = true,
6632 .mem_bar = efx_ef10_vf_mem_bar,
6633 .mem_map_size = efx_ef10_mem_map_size,
6634 .probe = efx_ef10_probe_vf,
6635 .remove = efx_ef10_remove,
6636 .dimension_resources = efx_ef10_dimension_resources,
6637 .init = efx_ef10_init_nic,
6638 .fini = efx_port_dummy_op_void,
6639 .map_reset_reason = efx_ef10_map_reset_reason,
6640 .map_reset_flags = efx_ef10_map_reset_flags,
6641 .reset = efx_ef10_reset,
6642 .probe_port = efx_mcdi_port_probe,
6643 .remove_port = efx_mcdi_port_remove,
6644 .fini_dmaq = efx_ef10_fini_dmaq,
6645 .prepare_flr = efx_ef10_prepare_flr,
6646 .finish_flr = efx_port_dummy_op_void,
6647 .describe_stats = efx_ef10_describe_stats,
6648 .update_stats = efx_ef10_update_stats_vf,
6649 .start_stats = efx_port_dummy_op_void,
6650 .pull_stats = efx_port_dummy_op_void,
6651 .stop_stats = efx_port_dummy_op_void,
6652 .set_id_led = efx_mcdi_set_id_led,
6653 .push_irq_moderation = efx_ef10_push_irq_moderation,
6654 .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
6655 .check_mac_fault = efx_mcdi_mac_check_fault,
6656 .reconfigure_port = efx_mcdi_port_reconfigure,
6657 .get_wol = efx_ef10_get_wol_vf,
6658 .set_wol = efx_ef10_set_wol_vf,
6659 .resume_wol = efx_port_dummy_op_void,
6660 .mcdi_request = efx_ef10_mcdi_request,
6661 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
6662 .mcdi_read_response = efx_ef10_mcdi_read_response,
6663 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
6664 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
6665 .irq_enable_master = efx_port_dummy_op_void,
6666 .irq_test_generate = efx_ef10_irq_test_generate,
6667 .irq_disable_non_ev = efx_port_dummy_op_void,
6668 .irq_handle_msi = efx_ef10_msi_interrupt,
6669 .irq_handle_legacy = efx_ef10_legacy_interrupt,
6670 .tx_probe = efx_ef10_tx_probe,
6671 .tx_init = efx_ef10_tx_init,
6672 .tx_remove = efx_ef10_tx_remove,
6673 .tx_write = efx_ef10_tx_write,
6674 .tx_limit_len = efx_ef10_tx_limit_len,
6675 .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
6676 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
6677 .rx_probe = efx_ef10_rx_probe,
6678 .rx_init = efx_ef10_rx_init,
6679 .rx_remove = efx_ef10_rx_remove,
6680 .rx_write = efx_ef10_rx_write,
6681 .rx_defer_refill = efx_ef10_rx_defer_refill,
6682 .ev_probe = efx_ef10_ev_probe,
6683 .ev_init = efx_ef10_ev_init,
6684 .ev_fini = efx_ef10_ev_fini,
6685 .ev_remove = efx_ef10_ev_remove,
6686 .ev_process = efx_ef10_ev_process,
6687 .ev_read_ack = efx_ef10_ev_read_ack,
6688 .ev_test_generate = efx_ef10_ev_test_generate,
6689 .filter_table_probe = efx_ef10_filter_table_probe,
6690 .filter_table_restore = efx_ef10_filter_table_restore,
6691 .filter_table_remove = efx_ef10_filter_table_remove,
6692 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
6693 .filter_insert = efx_ef10_filter_insert,
6694 .filter_remove_safe = efx_ef10_filter_remove_safe,
6695 .filter_get_safe = efx_ef10_filter_get_safe,
6696 .filter_clear_rx = efx_ef10_filter_clear_rx,
6697 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
6698 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
6699 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
6700 #ifdef CONFIG_RFS_ACCEL
6701 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
6702 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
6703 #endif
6704 #ifdef CONFIG_SFC_MTD
6705 .mtd_probe = efx_port_dummy_op_int,
6706 #endif
6707 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
6708 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
6709 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
6710 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
6711 #ifdef CONFIG_SFC_SRIOV
6712 .vswitching_probe = efx_ef10_vswitching_probe_vf,
6713 .vswitching_restore = efx_ef10_vswitching_restore_vf,
6714 .vswitching_remove = efx_ef10_vswitching_remove_vf,
6715 #endif
6716 .get_mac_address = efx_ef10_get_mac_address_vf,
6717 .set_mac_address = efx_ef10_set_mac_address,
6719 .get_phys_port_id = efx_ef10_get_phys_port_id,
6720 .revision = EFX_REV_HUNT_A0,
6721 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
6722 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
6723 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
6724 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
6725 .can_rx_scatter = true,
6726 .always_rx_scatter = true,
6727 .min_interrupt_mode = EFX_INT_MODE_MSIX,
6728 .max_interrupt_mode = EFX_INT_MODE_MSIX,
6729 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
6730 .offload_features = EF10_OFFLOAD_FEATURES,
6731 .mcdi_max_ver = 2,
6732 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
6733 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
6734 1 << HWTSTAMP_FILTER_ALL,
6735 .rx_hash_key_size = 40,
6738 const struct efx_nic_type efx_hunt_a0_nic_type = {
6739 .is_vf = false,
6740 .mem_bar = efx_ef10_pf_mem_bar,
6741 .mem_map_size = efx_ef10_mem_map_size,
6742 .probe = efx_ef10_probe_pf,
6743 .remove = efx_ef10_remove,
6744 .dimension_resources = efx_ef10_dimension_resources,
6745 .init = efx_ef10_init_nic,
6746 .fini = efx_port_dummy_op_void,
6747 .map_reset_reason = efx_ef10_map_reset_reason,
6748 .map_reset_flags = efx_ef10_map_reset_flags,
6749 .reset = efx_ef10_reset,
6750 .probe_port = efx_mcdi_port_probe,
6751 .remove_port = efx_mcdi_port_remove,
6752 .fini_dmaq = efx_ef10_fini_dmaq,
6753 .prepare_flr = efx_ef10_prepare_flr,
6754 .finish_flr = efx_port_dummy_op_void,
6755 .describe_stats = efx_ef10_describe_stats,
6756 .update_stats = efx_ef10_update_stats_pf,
6757 .start_stats = efx_mcdi_mac_start_stats,
6758 .pull_stats = efx_mcdi_mac_pull_stats,
6759 .stop_stats = efx_mcdi_mac_stop_stats,
6760 .set_id_led = efx_mcdi_set_id_led,
6761 .push_irq_moderation = efx_ef10_push_irq_moderation,
6762 .reconfigure_mac = efx_ef10_mac_reconfigure,
6763 .check_mac_fault = efx_mcdi_mac_check_fault,
6764 .reconfigure_port = efx_mcdi_port_reconfigure,
6765 .get_wol = efx_ef10_get_wol,
6766 .set_wol = efx_ef10_set_wol,
6767 .resume_wol = efx_port_dummy_op_void,
6768 .test_chip = efx_ef10_test_chip,
6769 .test_nvram = efx_mcdi_nvram_test_all,
6770 .mcdi_request = efx_ef10_mcdi_request,
6771 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
6772 .mcdi_read_response = efx_ef10_mcdi_read_response,
6773 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
6774 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
6775 .irq_enable_master = efx_port_dummy_op_void,
6776 .irq_test_generate = efx_ef10_irq_test_generate,
6777 .irq_disable_non_ev = efx_port_dummy_op_void,
6778 .irq_handle_msi = efx_ef10_msi_interrupt,
6779 .irq_handle_legacy = efx_ef10_legacy_interrupt,
6780 .tx_probe = efx_ef10_tx_probe,
6781 .tx_init = efx_ef10_tx_init,
6782 .tx_remove = efx_ef10_tx_remove,
6783 .tx_write = efx_ef10_tx_write,
6784 .tx_limit_len = efx_ef10_tx_limit_len,
6785 .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
6786 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
6787 .rx_probe = efx_ef10_rx_probe,
6788 .rx_init = efx_ef10_rx_init,
6789 .rx_remove = efx_ef10_rx_remove,
6790 .rx_write = efx_ef10_rx_write,
6791 .rx_defer_refill = efx_ef10_rx_defer_refill,
6792 .ev_probe = efx_ef10_ev_probe,
6793 .ev_init = efx_ef10_ev_init,
6794 .ev_fini = efx_ef10_ev_fini,
6795 .ev_remove = efx_ef10_ev_remove,
6796 .ev_process = efx_ef10_ev_process,
6797 .ev_read_ack = efx_ef10_ev_read_ack,
6798 .ev_test_generate = efx_ef10_ev_test_generate,
6799 .filter_table_probe = efx_ef10_filter_table_probe,
6800 .filter_table_restore = efx_ef10_filter_table_restore,
6801 .filter_table_remove = efx_ef10_filter_table_remove,
6802 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
6803 .filter_insert = efx_ef10_filter_insert,
6804 .filter_remove_safe = efx_ef10_filter_remove_safe,
6805 .filter_get_safe = efx_ef10_filter_get_safe,
6806 .filter_clear_rx = efx_ef10_filter_clear_rx,
6807 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
6808 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
6809 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
6810 #ifdef CONFIG_RFS_ACCEL
6811 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
6812 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
6813 #endif
6814 #ifdef CONFIG_SFC_MTD
6815 .mtd_probe = efx_ef10_mtd_probe,
6816 .mtd_rename = efx_mcdi_mtd_rename,
6817 .mtd_read = efx_mcdi_mtd_read,
6818 .mtd_erase = efx_mcdi_mtd_erase,
6819 .mtd_write = efx_mcdi_mtd_write,
6820 .mtd_sync = efx_mcdi_mtd_sync,
6821 #endif
6822 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
6823 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
6824 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
6825 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
6826 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
6827 .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
6828 .udp_tnl_add_port = efx_ef10_udp_tnl_add_port,
6829 .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
6830 .udp_tnl_del_port = efx_ef10_udp_tnl_del_port,
6831 #ifdef CONFIG_SFC_SRIOV
6832 .sriov_configure = efx_ef10_sriov_configure,
6833 .sriov_init = efx_ef10_sriov_init,
6834 .sriov_fini = efx_ef10_sriov_fini,
6835 .sriov_wanted = efx_ef10_sriov_wanted,
6836 .sriov_reset = efx_ef10_sriov_reset,
6837 .sriov_flr = efx_ef10_sriov_flr,
6838 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
6839 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
6840 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
6841 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
6842 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
6843 .vswitching_probe = efx_ef10_vswitching_probe_pf,
6844 .vswitching_restore = efx_ef10_vswitching_restore_pf,
6845 .vswitching_remove = efx_ef10_vswitching_remove_pf,
6846 #endif
6847 .get_mac_address = efx_ef10_get_mac_address_pf,
6848 .set_mac_address = efx_ef10_set_mac_address,
6849 .tso_versions = efx_ef10_tso_versions,
6851 .get_phys_port_id = efx_ef10_get_phys_port_id,
6852 .revision = EFX_REV_HUNT_A0,
6853 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
6854 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
6855 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
6856 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
6857 .can_rx_scatter = true,
6858 .always_rx_scatter = true,
6859 .option_descriptors = true,
6860 .min_interrupt_mode = EFX_INT_MODE_LEGACY,
6861 .max_interrupt_mode = EFX_INT_MODE_MSIX,
6862 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
6863 .offload_features = EF10_OFFLOAD_FEATURES,
6864 .mcdi_max_ver = 2,
6865 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
6866 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
6867 1 << HWTSTAMP_FILTER_ALL,
6868 .rx_hash_key_size = 40,