1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2019 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include "net_driver.h"
14 #include "mcdi_functions.h"
16 #include "mcdi_pcol.h"
18 int efx_mcdi_free_vis(struct efx_nic
*efx
)
20 MCDI_DECLARE_BUF_ERR(outbuf
);
22 int rc
= efx_mcdi_rpc_quiet(efx
, MC_CMD_FREE_VIS
, NULL
, 0,
23 outbuf
, sizeof(outbuf
), &outlen
);
25 /* -EALREADY means nothing to free, so ignore */
29 efx_mcdi_display_error(efx
, MC_CMD_FREE_VIS
, 0, outbuf
, outlen
,
34 int efx_mcdi_alloc_vis(struct efx_nic
*efx
, unsigned int min_vis
,
35 unsigned int max_vis
, unsigned int *vi_base
,
36 unsigned int *allocated_vis
)
38 MCDI_DECLARE_BUF(outbuf
, MC_CMD_ALLOC_VIS_OUT_LEN
);
39 MCDI_DECLARE_BUF(inbuf
, MC_CMD_ALLOC_VIS_IN_LEN
);
43 MCDI_SET_DWORD(inbuf
, ALLOC_VIS_IN_MIN_VI_COUNT
, min_vis
);
44 MCDI_SET_DWORD(inbuf
, ALLOC_VIS_IN_MAX_VI_COUNT
, max_vis
);
45 rc
= efx_mcdi_rpc(efx
, MC_CMD_ALLOC_VIS
, inbuf
, sizeof(inbuf
),
46 outbuf
, sizeof(outbuf
), &outlen
);
50 if (outlen
< MC_CMD_ALLOC_VIS_OUT_LEN
)
53 netif_dbg(efx
, drv
, efx
->net_dev
, "base VI is A0x%03x\n",
54 MCDI_DWORD(outbuf
, ALLOC_VIS_OUT_VI_BASE
));
57 *vi_base
= MCDI_DWORD(outbuf
, ALLOC_VIS_OUT_VI_BASE
);
59 *allocated_vis
= MCDI_DWORD(outbuf
, ALLOC_VIS_OUT_VI_COUNT
);
63 int efx_mcdi_ev_probe(struct efx_channel
*channel
)
65 return efx_nic_alloc_buffer(channel
->efx
, &channel
->eventq
.buf
,
66 (channel
->eventq_mask
+ 1) *
71 int efx_mcdi_ev_init(struct efx_channel
*channel
, bool v1_cut_thru
, bool v2
)
73 MCDI_DECLARE_BUF(inbuf
,
74 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE
* 8 /
76 MCDI_DECLARE_BUF(outbuf
, MC_CMD_INIT_EVQ_V2_OUT_LEN
);
77 size_t entries
= channel
->eventq
.buf
.len
/ EFX_BUF_SIZE
;
78 struct efx_nic
*efx
= channel
->efx
;
83 /* Fill event queue with all ones (i.e. empty events) */
84 memset(channel
->eventq
.buf
.addr
, 0xff, channel
->eventq
.buf
.len
);
86 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_SIZE
, channel
->eventq_mask
+ 1);
87 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_INSTANCE
, channel
->channel
);
88 /* INIT_EVQ expects index in vector table, not absolute */
89 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_IRQ_NUM
, channel
->channel
);
90 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_TMR_MODE
,
91 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS
);
92 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_TMR_LOAD
, 0);
93 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_TMR_RELOAD
, 0);
94 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_COUNT_MODE
,
95 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS
);
96 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_COUNT_THRSHLD
, 0);
99 /* Use the new generic approach to specifying event queue
100 * configuration, requesting lower latency or higher throughput.
101 * The options that actually get used appear in the output.
103 MCDI_POPULATE_DWORD_2(inbuf
, INIT_EVQ_V2_IN_FLAGS
,
104 INIT_EVQ_V2_IN_FLAG_INTERRUPTING
, 1,
105 INIT_EVQ_V2_IN_FLAG_TYPE
,
106 MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO
);
108 MCDI_POPULATE_DWORD_4(inbuf
, INIT_EVQ_IN_FLAGS
,
109 INIT_EVQ_IN_FLAG_INTERRUPTING
, 1,
110 INIT_EVQ_IN_FLAG_RX_MERGE
, 1,
111 INIT_EVQ_IN_FLAG_TX_MERGE
, 1,
112 INIT_EVQ_IN_FLAG_CUT_THRU
, v1_cut_thru
);
115 dma_addr
= channel
->eventq
.buf
.dma_addr
;
116 for (i
= 0; i
< entries
; ++i
) {
117 MCDI_SET_ARRAY_QWORD(inbuf
, INIT_EVQ_IN_DMA_ADDR
, i
, dma_addr
);
118 dma_addr
+= EFX_BUF_SIZE
;
121 inlen
= MC_CMD_INIT_EVQ_IN_LEN(entries
);
123 rc
= efx_mcdi_rpc(efx
, MC_CMD_INIT_EVQ
, inbuf
, inlen
,
124 outbuf
, sizeof(outbuf
), &outlen
);
126 if (outlen
>= MC_CMD_INIT_EVQ_V2_OUT_LEN
)
127 netif_dbg(efx
, drv
, efx
->net_dev
,
128 "Channel %d using event queue flags %08x\n",
130 MCDI_DWORD(outbuf
, INIT_EVQ_V2_OUT_FLAGS
));
135 void efx_mcdi_ev_remove(struct efx_channel
*channel
)
137 efx_nic_free_buffer(channel
->efx
, &channel
->eventq
.buf
);
140 void efx_mcdi_ev_fini(struct efx_channel
*channel
)
142 MCDI_DECLARE_BUF(inbuf
, MC_CMD_FINI_EVQ_IN_LEN
);
143 MCDI_DECLARE_BUF_ERR(outbuf
);
144 struct efx_nic
*efx
= channel
->efx
;
148 MCDI_SET_DWORD(inbuf
, FINI_EVQ_IN_INSTANCE
, channel
->channel
);
150 rc
= efx_mcdi_rpc_quiet(efx
, MC_CMD_FINI_EVQ
, inbuf
, sizeof(inbuf
),
151 outbuf
, sizeof(outbuf
), &outlen
);
153 if (rc
&& rc
!= -EALREADY
)
159 efx_mcdi_display_error(efx
, MC_CMD_FINI_EVQ
, MC_CMD_FINI_EVQ_IN_LEN
,
163 int efx_mcdi_tx_init(struct efx_tx_queue
*tx_queue
)
165 MCDI_DECLARE_BUF(inbuf
, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE
* 8 /
167 bool csum_offload
= tx_queue
->type
& EFX_TXQ_TYPE_OUTER_CSUM
;
168 bool inner_csum
= tx_queue
->type
& EFX_TXQ_TYPE_INNER_CSUM
;
169 size_t entries
= tx_queue
->txd
.buf
.len
/ EFX_BUF_SIZE
;
170 struct efx_channel
*channel
= tx_queue
->channel
;
171 struct efx_nic
*efx
= tx_queue
->efx
;
176 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN
!= 0);
178 MCDI_SET_DWORD(inbuf
, INIT_TXQ_IN_SIZE
, tx_queue
->ptr_mask
+ 1);
179 MCDI_SET_DWORD(inbuf
, INIT_TXQ_IN_TARGET_EVQ
, channel
->channel
);
180 MCDI_SET_DWORD(inbuf
, INIT_TXQ_IN_LABEL
, tx_queue
->label
);
181 MCDI_SET_DWORD(inbuf
, INIT_TXQ_IN_INSTANCE
, tx_queue
->queue
);
182 MCDI_SET_DWORD(inbuf
, INIT_TXQ_IN_OWNER_ID
, 0);
183 MCDI_SET_DWORD(inbuf
, INIT_TXQ_IN_PORT_ID
, efx
->vport_id
);
185 dma_addr
= tx_queue
->txd
.buf
.dma_addr
;
187 netif_dbg(efx
, hw
, efx
->net_dev
, "pushing TXQ %d. %zu entries (%llx)\n",
188 tx_queue
->queue
, entries
, (u64
)dma_addr
);
190 for (i
= 0; i
< entries
; ++i
) {
191 MCDI_SET_ARRAY_QWORD(inbuf
, INIT_TXQ_IN_DMA_ADDR
, i
, dma_addr
);
192 dma_addr
+= EFX_BUF_SIZE
;
195 inlen
= MC_CMD_INIT_TXQ_IN_LEN(entries
);
198 bool tso_v2
= tx_queue
->tso_version
== 2;
200 /* TSOv2 implies IP header checksum offload for TSO frames,
201 * so we can safely disable IP header checksum offload for
202 * everything else. If we don't have TSOv2, then we have to
203 * enable IP header checksum offload, which is strictly
204 * incorrect but better than breaking TSO.
206 MCDI_POPULATE_DWORD_6(inbuf
, INIT_TXQ_IN_FLAGS
,
207 /* This flag was removed from mcdi_pcol.h for
208 * the non-_EXT version of INIT_TXQ. However,
209 * firmware still honours it.
211 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN
, tso_v2
,
212 INIT_TXQ_IN_FLAG_IP_CSUM_DIS
, !(csum_offload
&& tso_v2
),
213 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS
, !csum_offload
,
214 INIT_TXQ_EXT_IN_FLAG_TIMESTAMP
, tx_queue
->timestamping
,
215 INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN
, inner_csum
&& !tso_v2
,
216 INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN
, inner_csum
);
218 rc
= efx_mcdi_rpc_quiet(efx
, MC_CMD_INIT_TXQ
, inbuf
, inlen
,
220 if (rc
== -ENOSPC
&& tso_v2
) {
221 /* Retry without TSOv2 if we're short on contexts. */
222 tx_queue
->tso_version
= 0;
223 netif_warn(efx
, probe
, efx
->net_dev
,
224 "TSOv2 context not available to segment in "
225 "hardware. TCP performance may be reduced.\n"
228 efx_mcdi_display_error(efx
, MC_CMD_INIT_TXQ
,
229 MC_CMD_INIT_TXQ_EXT_IN_LEN
,
241 void efx_mcdi_tx_remove(struct efx_tx_queue
*tx_queue
)
243 efx_nic_free_buffer(tx_queue
->efx
, &tx_queue
->txd
.buf
);
246 void efx_mcdi_tx_fini(struct efx_tx_queue
*tx_queue
)
248 MCDI_DECLARE_BUF(inbuf
, MC_CMD_FINI_TXQ_IN_LEN
);
249 MCDI_DECLARE_BUF_ERR(outbuf
);
250 struct efx_nic
*efx
= tx_queue
->efx
;
254 MCDI_SET_DWORD(inbuf
, FINI_TXQ_IN_INSTANCE
,
257 rc
= efx_mcdi_rpc_quiet(efx
, MC_CMD_FINI_TXQ
, inbuf
, sizeof(inbuf
),
258 outbuf
, sizeof(outbuf
), &outlen
);
260 if (rc
&& rc
!= -EALREADY
)
266 efx_mcdi_display_error(efx
, MC_CMD_FINI_TXQ
, MC_CMD_FINI_TXQ_IN_LEN
,
270 int efx_mcdi_rx_probe(struct efx_rx_queue
*rx_queue
)
272 return efx_nic_alloc_buffer(rx_queue
->efx
, &rx_queue
->rxd
.buf
,
273 (rx_queue
->ptr_mask
+ 1) *
278 void efx_mcdi_rx_init(struct efx_rx_queue
*rx_queue
)
280 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
281 size_t entries
= rx_queue
->rxd
.buf
.len
/ EFX_BUF_SIZE
;
282 MCDI_DECLARE_BUF(inbuf
, MC_CMD_INIT_RXQ_V4_IN_LEN
);
283 struct efx_nic
*efx
= rx_queue
->efx
;
284 unsigned int buffer_size
;
288 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN
!= 0);
290 rx_queue
->scatter_n
= 0;
291 rx_queue
->scatter_len
= 0;
292 if (efx
->type
->revision
== EFX_REV_EF100
)
293 buffer_size
= efx
->rx_page_buf_step
;
297 MCDI_SET_DWORD(inbuf
, INIT_RXQ_IN_SIZE
, rx_queue
->ptr_mask
+ 1);
298 MCDI_SET_DWORD(inbuf
, INIT_RXQ_IN_TARGET_EVQ
, channel
->channel
);
299 MCDI_SET_DWORD(inbuf
, INIT_RXQ_IN_LABEL
, efx_rx_queue_index(rx_queue
));
300 MCDI_SET_DWORD(inbuf
, INIT_RXQ_IN_INSTANCE
,
301 efx_rx_queue_index(rx_queue
));
302 MCDI_POPULATE_DWORD_2(inbuf
, INIT_RXQ_IN_FLAGS
,
303 INIT_RXQ_IN_FLAG_PREFIX
, 1,
304 INIT_RXQ_IN_FLAG_TIMESTAMP
, 1);
305 MCDI_SET_DWORD(inbuf
, INIT_RXQ_IN_OWNER_ID
, 0);
306 MCDI_SET_DWORD(inbuf
, INIT_RXQ_IN_PORT_ID
, efx
->vport_id
);
307 MCDI_SET_DWORD(inbuf
, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES
, buffer_size
);
309 dma_addr
= rx_queue
->rxd
.buf
.dma_addr
;
311 netif_dbg(efx
, hw
, efx
->net_dev
, "pushing RXQ %d. %zu entries (%llx)\n",
312 efx_rx_queue_index(rx_queue
), entries
, (u64
)dma_addr
);
314 for (i
= 0; i
< entries
; ++i
) {
315 MCDI_SET_ARRAY_QWORD(inbuf
, INIT_RXQ_IN_DMA_ADDR
, i
, dma_addr
);
316 dma_addr
+= EFX_BUF_SIZE
;
319 rc
= efx_mcdi_rpc(efx
, MC_CMD_INIT_RXQ
, inbuf
, sizeof(inbuf
),
322 netdev_WARN(efx
->net_dev
, "failed to initialise RXQ %d\n",
323 efx_rx_queue_index(rx_queue
));
326 void efx_mcdi_rx_remove(struct efx_rx_queue
*rx_queue
)
328 efx_nic_free_buffer(rx_queue
->efx
, &rx_queue
->rxd
.buf
);
331 void efx_mcdi_rx_fini(struct efx_rx_queue
*rx_queue
)
333 MCDI_DECLARE_BUF(inbuf
, MC_CMD_FINI_RXQ_IN_LEN
);
334 MCDI_DECLARE_BUF_ERR(outbuf
);
335 struct efx_nic
*efx
= rx_queue
->efx
;
339 MCDI_SET_DWORD(inbuf
, FINI_RXQ_IN_INSTANCE
,
340 efx_rx_queue_index(rx_queue
));
342 rc
= efx_mcdi_rpc_quiet(efx
, MC_CMD_FINI_RXQ
, inbuf
, sizeof(inbuf
),
343 outbuf
, sizeof(outbuf
), &outlen
);
345 if (rc
&& rc
!= -EALREADY
)
351 efx_mcdi_display_error(efx
, MC_CMD_FINI_RXQ
, MC_CMD_FINI_RXQ_IN_LEN
,
355 int efx_fini_dmaq(struct efx_nic
*efx
)
357 struct efx_tx_queue
*tx_queue
;
358 struct efx_rx_queue
*rx_queue
;
359 struct efx_channel
*channel
;
362 /* If the MC has just rebooted, the TX/RX queues will have already been
363 * torn down, but efx->active_queues needs to be set to zero.
365 if (efx
->must_realloc_vis
) {
366 atomic_set(&efx
->active_queues
, 0);
370 /* Do not attempt to write to the NIC during EEH recovery */
371 if (efx
->state
!= STATE_RECOVERY
) {
372 efx_for_each_channel(channel
, efx
) {
373 efx_for_each_channel_rx_queue(rx_queue
, channel
)
374 efx_mcdi_rx_fini(rx_queue
);
375 efx_for_each_channel_tx_queue(tx_queue
, channel
)
376 efx_mcdi_tx_fini(tx_queue
);
379 wait_event_timeout(efx
->flush_wq
,
380 atomic_read(&efx
->active_queues
) == 0,
381 msecs_to_jiffies(EFX_MAX_FLUSH_TIME
));
382 pending
= atomic_read(&efx
->active_queues
);
384 netif_err(efx
, hw
, efx
->net_dev
, "failed to flush %d queues\n",
393 int efx_mcdi_window_mode_to_stride(struct efx_nic
*efx
, u8 vi_window_mode
)
395 switch (vi_window_mode
) {
396 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K
:
397 efx
->vi_stride
= 8192;
399 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K
:
400 efx
->vi_stride
= 16384;
402 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K
:
403 efx
->vi_stride
= 65536;
406 netif_err(efx
, probe
, efx
->net_dev
,
407 "Unrecognised VI window mode %d\n",
411 netif_dbg(efx
, probe
, efx
->net_dev
, "vi_stride = %u\n",
416 int efx_get_pf_index(struct efx_nic
*efx
, unsigned int *pf_index
)
418 MCDI_DECLARE_BUF(outbuf
, MC_CMD_GET_FUNCTION_INFO_OUT_LEN
);
422 rc
= efx_mcdi_rpc(efx
, MC_CMD_GET_FUNCTION_INFO
, NULL
, 0, outbuf
,
423 sizeof(outbuf
), &outlen
);
426 if (outlen
< sizeof(outbuf
))
429 *pf_index
= MCDI_DWORD(outbuf
, GET_FUNCTION_INFO_OUT_PF
);