1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
4 #include <linux/bitops.h>
5 #include <linux/kernel.h>
6 #include <linux/log2.h>
8 #include "../nfpcore/nfp_cpp.h"
9 #include "../nfpcore/nfp_nffw.h"
10 #include "../nfp_app.h"
11 #include "../nfp_abi.h"
12 #include "../nfp_main.h"
13 #include "../nfp_net.h"
16 #define NFP_NUM_PRIOS_SYM_NAME "_abi_pci_dscp_num_prio_%u"
17 #define NFP_NUM_BANDS_SYM_NAME "_abi_pci_dscp_num_band_%u"
18 #define NFP_ACT_MASK_SYM_NAME "_abi_nfd_out_q_actions_%u"
20 #define NFP_RED_SUPPORT_SYM_NAME "_abi_nfd_out_red_offload_%u"
22 #define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u%s"
23 #define NFP_QLVL_STRIDE 16
24 #define NFP_QLVL_BLOG_BYTES 0
25 #define NFP_QLVL_BLOG_PKTS 4
26 #define NFP_QLVL_THRS 8
27 #define NFP_QLVL_ACT 12
29 #define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats%s"
30 #define NFP_QMSTAT_STRIDE 32
31 #define NFP_QMSTAT_NON_STO 0
32 #define NFP_QMSTAT_STO 8
33 #define NFP_QMSTAT_DROP 16
34 #define NFP_QMSTAT_ECN 24
36 #define NFP_Q_STAT_SYM_NAME "_abi_nfd_rxq_stats%u%s"
37 #define NFP_Q_STAT_STRIDE 16
38 #define NFP_Q_STAT_PKTS 0
39 #define NFP_Q_STAT_BYTES 8
41 #define NFP_NET_ABM_MBOX_CMD NFP_NET_CFG_MBOX_SIMPLE_CMD
42 #define NFP_NET_ABM_MBOX_RET NFP_NET_CFG_MBOX_SIMPLE_RET
43 #define NFP_NET_ABM_MBOX_DATALEN NFP_NET_CFG_MBOX_SIMPLE_VAL
44 #define NFP_NET_ABM_MBOX_RESERVED (NFP_NET_CFG_MBOX_SIMPLE_VAL + 4)
45 #define NFP_NET_ABM_MBOX_DATA (NFP_NET_CFG_MBOX_SIMPLE_VAL + 8)
48 nfp_abm_ctrl_stat(struct nfp_abm_link
*alink
, const struct nfp_rtsym
*sym
,
49 unsigned int stride
, unsigned int offset
, unsigned int band
,
50 unsigned int queue
, bool is_u64
, u64
*res
)
52 struct nfp_cpp
*cpp
= alink
->abm
->app
->cpp
;
58 qid
= band
* NFP_NET_MAX_RX_RINGS
+ alink
->queue_base
+ queue
;
60 sym_offset
= qid
* stride
+ offset
;
62 err
= __nfp_rtsym_readq(cpp
, sym
, 3, 0, sym_offset
, &val
);
64 err
= __nfp_rtsym_readl(cpp
, sym
, 3, 0, sym_offset
, &val32
);
66 nfp_err(cpp
, "RED offload reading stat failed on vNIC %d band %d queue %d (+ %d)\n",
67 alink
->id
, band
, queue
, alink
->queue_base
);
71 *res
= is_u64
? val
: val32
;
75 int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm
*abm
, unsigned int id
, u32 val
)
77 struct nfp_cpp
*cpp
= abm
->app
->cpp
;
81 __clear_bit(id
, abm
->threshold_undef
);
82 if (abm
->thresholds
[id
] == val
)
85 sym_offset
= id
* NFP_QLVL_STRIDE
+ NFP_QLVL_THRS
;
86 err
= __nfp_rtsym_writel(cpp
, abm
->q_lvls
, 4, 0, sym_offset
, val
);
89 "RED offload setting level failed on subqueue %d\n",
94 abm
->thresholds
[id
] = val
;
98 int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link
*alink
, unsigned int band
,
99 unsigned int queue
, u32 val
)
101 unsigned int threshold
;
103 threshold
= band
* NFP_NET_MAX_RX_RINGS
+ alink
->queue_base
+ queue
;
105 return __nfp_abm_ctrl_set_q_lvl(alink
->abm
, threshold
, val
);
108 int __nfp_abm_ctrl_set_q_act(struct nfp_abm
*abm
, unsigned int id
,
109 enum nfp_abm_q_action act
)
111 struct nfp_cpp
*cpp
= abm
->app
->cpp
;
115 if (abm
->actions
[id
] == act
)
118 sym_offset
= id
* NFP_QLVL_STRIDE
+ NFP_QLVL_ACT
;
119 err
= __nfp_rtsym_writel(cpp
, abm
->q_lvls
, 4, 0, sym_offset
, act
);
122 "RED offload setting action failed on subqueue %d\n",
127 abm
->actions
[id
] = act
;
131 int nfp_abm_ctrl_set_q_act(struct nfp_abm_link
*alink
, unsigned int band
,
132 unsigned int queue
, enum nfp_abm_q_action act
)
136 qid
= band
* NFP_NET_MAX_RX_RINGS
+ alink
->queue_base
+ queue
;
138 return __nfp_abm_ctrl_set_q_act(alink
->abm
, qid
, act
);
141 u64
nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link
*alink
, unsigned int queue
)
146 for (band
= 0; band
< alink
->abm
->num_bands
; band
++) {
147 if (nfp_abm_ctrl_stat(alink
, alink
->abm
->qm_stats
,
148 NFP_QMSTAT_STRIDE
, NFP_QMSTAT_NON_STO
,
149 band
, queue
, true, &val
))
157 u64
nfp_abm_ctrl_stat_sto(struct nfp_abm_link
*alink
, unsigned int queue
)
162 for (band
= 0; band
< alink
->abm
->num_bands
; band
++) {
163 if (nfp_abm_ctrl_stat(alink
, alink
->abm
->qm_stats
,
164 NFP_QMSTAT_STRIDE
, NFP_QMSTAT_STO
,
165 band
, queue
, true, &val
))
174 nfp_abm_ctrl_stat_basic(struct nfp_abm_link
*alink
, unsigned int band
,
175 unsigned int queue
, unsigned int off
, u64
*val
)
177 if (!nfp_abm_has_prio(alink
->abm
)) {
179 unsigned int id
= alink
->queue_base
+ queue
;
181 *val
= nn_readq(alink
->vnic
,
182 NFP_NET_CFG_RXR_STATS(id
) + off
);
189 return nfp_abm_ctrl_stat(alink
, alink
->abm
->q_stats
,
190 NFP_Q_STAT_STRIDE
, off
, band
, queue
,
195 int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link
*alink
, unsigned int band
,
196 unsigned int queue
, struct nfp_alink_stats
*stats
)
200 err
= nfp_abm_ctrl_stat_basic(alink
, band
, queue
, NFP_Q_STAT_PKTS
,
205 err
= nfp_abm_ctrl_stat_basic(alink
, band
, queue
, NFP_Q_STAT_BYTES
,
210 err
= nfp_abm_ctrl_stat(alink
, alink
->abm
->q_lvls
, NFP_QLVL_STRIDE
,
211 NFP_QLVL_BLOG_BYTES
, band
, queue
, false,
212 &stats
->backlog_bytes
);
216 err
= nfp_abm_ctrl_stat(alink
, alink
->abm
->q_lvls
,
217 NFP_QLVL_STRIDE
, NFP_QLVL_BLOG_PKTS
,
218 band
, queue
, false, &stats
->backlog_pkts
);
222 err
= nfp_abm_ctrl_stat(alink
, alink
->abm
->qm_stats
,
223 NFP_QMSTAT_STRIDE
, NFP_QMSTAT_DROP
,
224 band
, queue
, true, &stats
->drops
);
228 return nfp_abm_ctrl_stat(alink
, alink
->abm
->qm_stats
,
229 NFP_QMSTAT_STRIDE
, NFP_QMSTAT_ECN
,
230 band
, queue
, true, &stats
->overlimits
);
233 int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link
*alink
,
234 unsigned int band
, unsigned int queue
,
235 struct nfp_alink_xstats
*xstats
)
239 err
= nfp_abm_ctrl_stat(alink
, alink
->abm
->qm_stats
,
240 NFP_QMSTAT_STRIDE
, NFP_QMSTAT_DROP
,
241 band
, queue
, true, &xstats
->pdrop
);
245 return nfp_abm_ctrl_stat(alink
, alink
->abm
->qm_stats
,
246 NFP_QMSTAT_STRIDE
, NFP_QMSTAT_ECN
,
247 band
, queue
, true, &xstats
->ecn_marked
);
250 int nfp_abm_ctrl_qm_enable(struct nfp_abm
*abm
)
252 return nfp_mbox_cmd(abm
->app
->pf
, NFP_MBOX_PCIE_ABM_ENABLE
,
256 int nfp_abm_ctrl_qm_disable(struct nfp_abm
*abm
)
258 return nfp_mbox_cmd(abm
->app
->pf
, NFP_MBOX_PCIE_ABM_DISABLE
,
262 int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link
*alink
, u32
*packed
)
264 const u32 cmd
= NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET
;
265 struct nfp_net
*nn
= alink
->vnic
;
269 err
= nfp_net_mbox_lock(nn
, alink
->abm
->prio_map_len
);
273 /* Write data_len and wipe reserved */
274 nn_writeq(nn
, nn
->tlv_caps
.mbox_off
+ NFP_NET_ABM_MBOX_DATALEN
,
275 alink
->abm
->prio_map_len
);
277 for (i
= 0; i
< alink
->abm
->prio_map_len
; i
+= sizeof(u32
))
278 nn_writel(nn
, nn
->tlv_caps
.mbox_off
+ NFP_NET_ABM_MBOX_DATA
+ i
,
279 packed
[i
/ sizeof(u32
)]);
281 err
= nfp_net_mbox_reconfig_and_unlock(nn
, cmd
);
283 nfp_err(alink
->abm
->app
->cpp
,
284 "setting DSCP -> VQ map failed with error %d\n", err
);
288 static int nfp_abm_ctrl_prio_check_params(struct nfp_abm_link
*alink
)
290 struct nfp_abm
*abm
= alink
->abm
;
291 struct nfp_net
*nn
= alink
->vnic
;
292 unsigned int min_mbox_sz
;
294 if (!nfp_abm_has_prio(alink
->abm
))
297 min_mbox_sz
= NFP_NET_ABM_MBOX_DATA
+ alink
->abm
->prio_map_len
;
298 if (nn
->tlv_caps
.mbox_len
< min_mbox_sz
) {
299 nfp_err(abm
->app
->pf
->cpp
, "vNIC mailbox too small for prio offload: %u, need: %u\n",
300 nn
->tlv_caps
.mbox_len
, min_mbox_sz
);
307 int nfp_abm_ctrl_read_params(struct nfp_abm_link
*alink
)
309 alink
->queue_base
= nn_readl(alink
->vnic
, NFP_NET_CFG_START_RXQ
);
310 alink
->queue_base
/= alink
->vnic
->stride_rx
;
312 return nfp_abm_ctrl_prio_check_params(alink
);
315 static unsigned int nfp_abm_ctrl_prio_map_size(struct nfp_abm
*abm
)
319 size
= roundup_pow_of_two(order_base_2(abm
->num_bands
));
320 size
= DIV_ROUND_UP(size
* abm
->num_prios
, BITS_PER_BYTE
);
321 size
= round_up(size
, sizeof(u32
));
326 static const struct nfp_rtsym
*
327 nfp_abm_ctrl_find_rtsym(struct nfp_pf
*pf
, const char *name
, unsigned int size
)
329 const struct nfp_rtsym
*sym
;
331 sym
= nfp_rtsym_lookup(pf
->rtbl
, name
);
333 nfp_err(pf
->cpp
, "Symbol '%s' not found\n", name
);
334 return ERR_PTR(-ENOENT
);
336 if (nfp_rtsym_size(sym
) != size
) {
338 "Symbol '%s' wrong size: expected %u got %llu\n",
339 name
, size
, nfp_rtsym_size(sym
));
340 return ERR_PTR(-EINVAL
);
346 static const struct nfp_rtsym
*
347 nfp_abm_ctrl_find_q_rtsym(struct nfp_abm
*abm
, const char *name_fmt
,
352 size
= array3_size(size
, abm
->num_bands
, NFP_NET_MAX_RX_RINGS
);
353 snprintf(pf_symbol
, sizeof(pf_symbol
), name_fmt
,
354 abm
->pf_id
, nfp_abm_has_prio(abm
) ? "_per_band" : "");
356 return nfp_abm_ctrl_find_rtsym(abm
->app
->pf
, pf_symbol
, size
);
359 int nfp_abm_ctrl_find_addrs(struct nfp_abm
*abm
)
361 struct nfp_pf
*pf
= abm
->app
->pf
;
362 const struct nfp_rtsym
*sym
;
365 abm
->pf_id
= nfp_cppcore_pcie_unit(pf
->cpp
);
367 /* Check if Qdisc offloads are supported */
368 res
= nfp_pf_rtsym_read_optional(pf
, NFP_RED_SUPPORT_SYM_NAME
, 1);
371 abm
->red_support
= res
;
373 /* Read count of prios and prio bands */
374 res
= nfp_pf_rtsym_read_optional(pf
, NFP_NUM_BANDS_SYM_NAME
, 1);
377 abm
->num_bands
= res
;
379 res
= nfp_pf_rtsym_read_optional(pf
, NFP_NUM_PRIOS_SYM_NAME
, 1);
382 abm
->num_prios
= res
;
384 /* Read available actions */
385 res
= nfp_pf_rtsym_read_optional(pf
, NFP_ACT_MASK_SYM_NAME
,
386 BIT(NFP_ABM_ACT_MARK_DROP
));
389 abm
->action_mask
= res
;
391 abm
->prio_map_len
= nfp_abm_ctrl_prio_map_size(abm
);
392 abm
->dscp_mask
= GENMASK(7, 8 - order_base_2(abm
->num_prios
));
394 /* Check values are sane, U16_MAX is arbitrarily chosen as max */
395 if (!is_power_of_2(abm
->num_bands
) || !is_power_of_2(abm
->num_prios
) ||
396 abm
->num_bands
> U16_MAX
|| abm
->num_prios
> U16_MAX
||
397 (abm
->num_bands
== 1) != (abm
->num_prios
== 1)) {
399 "invalid priomap description num bands: %u and num prios: %u\n",
400 abm
->num_bands
, abm
->num_prios
);
404 /* Find level and stat symbols */
405 if (!abm
->red_support
)
408 sym
= nfp_abm_ctrl_find_q_rtsym(abm
, NFP_QLVL_SYM_NAME
,
414 sym
= nfp_abm_ctrl_find_q_rtsym(abm
, NFP_QMSTAT_SYM_NAME
,
420 if (nfp_abm_has_prio(abm
)) {
421 sym
= nfp_abm_ctrl_find_q_rtsym(abm
, NFP_Q_STAT_SYM_NAME
,