1 // SPDX-License-Identifier: GPL-2.0-only
6 struct rings_req_info
{
7 struct ethnl_req_info base
;
10 struct rings_reply_data
{
11 struct ethnl_reply_data base
;
12 struct ethtool_ringparam ringparam
;
13 struct kernel_ethtool_ringparam kernel_ringparam
;
14 u32 supported_ring_params
;
17 #define RINGS_REPDATA(__reply_base) \
18 container_of(__reply_base, struct rings_reply_data, base)
20 const struct nla_policy ethnl_rings_get_policy
[] = {
21 [ETHTOOL_A_RINGS_HEADER
] =
22 NLA_POLICY_NESTED(ethnl_header_policy
),
25 static int rings_prepare_data(const struct ethnl_req_info
*req_base
,
26 struct ethnl_reply_data
*reply_base
,
27 const struct genl_info
*info
)
29 struct rings_reply_data
*data
= RINGS_REPDATA(reply_base
);
30 struct net_device
*dev
= reply_base
->dev
;
33 if (!dev
->ethtool_ops
->get_ringparam
)
36 data
->supported_ring_params
= dev
->ethtool_ops
->supported_ring_params
;
37 ret
= ethnl_ops_begin(dev
);
40 dev
->ethtool_ops
->get_ringparam(dev
, &data
->ringparam
,
41 &data
->kernel_ringparam
, info
->extack
);
42 ethnl_ops_complete(dev
);
47 static int rings_reply_size(const struct ethnl_req_info
*req_base
,
48 const struct ethnl_reply_data
*reply_base
)
50 return nla_total_size(sizeof(u32
)) + /* _RINGS_RX_MAX */
51 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_MINI_MAX */
52 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_JUMBO_MAX */
53 nla_total_size(sizeof(u32
)) + /* _RINGS_TX_MAX */
54 nla_total_size(sizeof(u32
)) + /* _RINGS_RX */
55 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_MINI */
56 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_JUMBO */
57 nla_total_size(sizeof(u32
)) + /* _RINGS_TX */
58 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_BUF_LEN */
59 nla_total_size(sizeof(u8
)) + /* _RINGS_TCP_DATA_SPLIT */
60 nla_total_size(sizeof(u32
) + /* _RINGS_CQE_SIZE */
61 nla_total_size(sizeof(u8
)) + /* _RINGS_TX_PUSH */
62 nla_total_size(sizeof(u8
))) + /* _RINGS_RX_PUSH */
63 nla_total_size(sizeof(u32
)) + /* _RINGS_TX_PUSH_BUF_LEN */
64 nla_total_size(sizeof(u32
)); /* _RINGS_TX_PUSH_BUF_LEN_MAX */
67 static int rings_fill_reply(struct sk_buff
*skb
,
68 const struct ethnl_req_info
*req_base
,
69 const struct ethnl_reply_data
*reply_base
)
71 const struct rings_reply_data
*data
= RINGS_REPDATA(reply_base
);
72 const struct kernel_ethtool_ringparam
*kr
= &data
->kernel_ringparam
;
73 const struct ethtool_ringparam
*ringparam
= &data
->ringparam
;
74 u32 supported_ring_params
= data
->supported_ring_params
;
76 WARN_ON(kr
->tcp_data_split
> ETHTOOL_TCP_DATA_SPLIT_ENABLED
);
78 if ((ringparam
->rx_max_pending
&&
79 (nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_MAX
,
80 ringparam
->rx_max_pending
) ||
81 nla_put_u32(skb
, ETHTOOL_A_RINGS_RX
,
82 ringparam
->rx_pending
))) ||
83 (ringparam
->rx_mini_max_pending
&&
84 (nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_MINI_MAX
,
85 ringparam
->rx_mini_max_pending
) ||
86 nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_MINI
,
87 ringparam
->rx_mini_pending
))) ||
88 (ringparam
->rx_jumbo_max_pending
&&
89 (nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_JUMBO_MAX
,
90 ringparam
->rx_jumbo_max_pending
) ||
91 nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_JUMBO
,
92 ringparam
->rx_jumbo_pending
))) ||
93 (ringparam
->tx_max_pending
&&
94 (nla_put_u32(skb
, ETHTOOL_A_RINGS_TX_MAX
,
95 ringparam
->tx_max_pending
) ||
96 nla_put_u32(skb
, ETHTOOL_A_RINGS_TX
,
97 ringparam
->tx_pending
))) ||
99 (nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_BUF_LEN
, kr
->rx_buf_len
))) ||
100 (kr
->tcp_data_split
&&
101 (nla_put_u8(skb
, ETHTOOL_A_RINGS_TCP_DATA_SPLIT
,
102 kr
->tcp_data_split
))) ||
104 (nla_put_u32(skb
, ETHTOOL_A_RINGS_CQE_SIZE
, kr
->cqe_size
))) ||
105 nla_put_u8(skb
, ETHTOOL_A_RINGS_TX_PUSH
, !!kr
->tx_push
) ||
106 nla_put_u8(skb
, ETHTOOL_A_RINGS_RX_PUSH
, !!kr
->rx_push
) ||
107 ((supported_ring_params
& ETHTOOL_RING_USE_TX_PUSH_BUF_LEN
) &&
108 (nla_put_u32(skb
, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX
,
109 kr
->tx_push_buf_max_len
) ||
110 nla_put_u32(skb
, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN
,
111 kr
->tx_push_buf_len
))))
119 const struct nla_policy ethnl_rings_set_policy
[] = {
120 [ETHTOOL_A_RINGS_HEADER
] =
121 NLA_POLICY_NESTED(ethnl_header_policy
),
122 [ETHTOOL_A_RINGS_RX
] = { .type
= NLA_U32
},
123 [ETHTOOL_A_RINGS_RX_MINI
] = { .type
= NLA_U32
},
124 [ETHTOOL_A_RINGS_RX_JUMBO
] = { .type
= NLA_U32
},
125 [ETHTOOL_A_RINGS_TX
] = { .type
= NLA_U32
},
126 [ETHTOOL_A_RINGS_RX_BUF_LEN
] = NLA_POLICY_MIN(NLA_U32
, 1),
127 [ETHTOOL_A_RINGS_TCP_DATA_SPLIT
] =
128 NLA_POLICY_MAX(NLA_U8
, ETHTOOL_TCP_DATA_SPLIT_ENABLED
),
129 [ETHTOOL_A_RINGS_CQE_SIZE
] = NLA_POLICY_MIN(NLA_U32
, 1),
130 [ETHTOOL_A_RINGS_TX_PUSH
] = NLA_POLICY_MAX(NLA_U8
, 1),
131 [ETHTOOL_A_RINGS_RX_PUSH
] = NLA_POLICY_MAX(NLA_U8
, 1),
132 [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN
] = { .type
= NLA_U32
},
136 ethnl_set_rings_validate(struct ethnl_req_info
*req_info
,
137 struct genl_info
*info
)
139 const struct ethtool_ops
*ops
= req_info
->dev
->ethtool_ops
;
140 struct nlattr
**tb
= info
->attrs
;
142 if (tb
[ETHTOOL_A_RINGS_RX_BUF_LEN
] &&
143 !(ops
->supported_ring_params
& ETHTOOL_RING_USE_RX_BUF_LEN
)) {
144 NL_SET_ERR_MSG_ATTR(info
->extack
,
145 tb
[ETHTOOL_A_RINGS_RX_BUF_LEN
],
146 "setting rx buf len not supported");
150 if (tb
[ETHTOOL_A_RINGS_TCP_DATA_SPLIT
] &&
151 !(ops
->supported_ring_params
& ETHTOOL_RING_USE_TCP_DATA_SPLIT
)) {
152 NL_SET_ERR_MSG_ATTR(info
->extack
,
153 tb
[ETHTOOL_A_RINGS_TCP_DATA_SPLIT
],
154 "setting TCP data split is not supported");
158 if (tb
[ETHTOOL_A_RINGS_CQE_SIZE
] &&
159 !(ops
->supported_ring_params
& ETHTOOL_RING_USE_CQE_SIZE
)) {
160 NL_SET_ERR_MSG_ATTR(info
->extack
,
161 tb
[ETHTOOL_A_RINGS_CQE_SIZE
],
162 "setting cqe size not supported");
166 if (tb
[ETHTOOL_A_RINGS_TX_PUSH
] &&
167 !(ops
->supported_ring_params
& ETHTOOL_RING_USE_TX_PUSH
)) {
168 NL_SET_ERR_MSG_ATTR(info
->extack
,
169 tb
[ETHTOOL_A_RINGS_TX_PUSH
],
170 "setting tx push not supported");
174 if (tb
[ETHTOOL_A_RINGS_RX_PUSH
] &&
175 !(ops
->supported_ring_params
& ETHTOOL_RING_USE_RX_PUSH
)) {
176 NL_SET_ERR_MSG_ATTR(info
->extack
,
177 tb
[ETHTOOL_A_RINGS_RX_PUSH
],
178 "setting rx push not supported");
182 if (tb
[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN
] &&
183 !(ops
->supported_ring_params
& ETHTOOL_RING_USE_TX_PUSH_BUF_LEN
)) {
184 NL_SET_ERR_MSG_ATTR(info
->extack
,
185 tb
[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN
],
186 "setting tx push buf len is not supported");
190 return ops
->get_ringparam
&& ops
->set_ringparam
? 1 : -EOPNOTSUPP
;
194 ethnl_set_rings(struct ethnl_req_info
*req_info
, struct genl_info
*info
)
196 struct kernel_ethtool_ringparam kernel_ringparam
= {};
197 struct ethtool_ringparam ringparam
= {};
198 struct net_device
*dev
= req_info
->dev
;
199 struct nlattr
**tb
= info
->attrs
;
200 const struct nlattr
*err_attr
;
204 dev
->ethtool_ops
->get_ringparam(dev
, &ringparam
,
205 &kernel_ringparam
, info
->extack
);
207 ethnl_update_u32(&ringparam
.rx_pending
, tb
[ETHTOOL_A_RINGS_RX
], &mod
);
208 ethnl_update_u32(&ringparam
.rx_mini_pending
,
209 tb
[ETHTOOL_A_RINGS_RX_MINI
], &mod
);
210 ethnl_update_u32(&ringparam
.rx_jumbo_pending
,
211 tb
[ETHTOOL_A_RINGS_RX_JUMBO
], &mod
);
212 ethnl_update_u32(&ringparam
.tx_pending
, tb
[ETHTOOL_A_RINGS_TX
], &mod
);
213 ethnl_update_u32(&kernel_ringparam
.rx_buf_len
,
214 tb
[ETHTOOL_A_RINGS_RX_BUF_LEN
], &mod
);
215 ethnl_update_u8(&kernel_ringparam
.tcp_data_split
,
216 tb
[ETHTOOL_A_RINGS_TCP_DATA_SPLIT
], &mod
);
217 ethnl_update_u32(&kernel_ringparam
.cqe_size
,
218 tb
[ETHTOOL_A_RINGS_CQE_SIZE
], &mod
);
219 ethnl_update_u8(&kernel_ringparam
.tx_push
,
220 tb
[ETHTOOL_A_RINGS_TX_PUSH
], &mod
);
221 ethnl_update_u8(&kernel_ringparam
.rx_push
,
222 tb
[ETHTOOL_A_RINGS_RX_PUSH
], &mod
);
223 ethnl_update_u32(&kernel_ringparam
.tx_push_buf_len
,
224 tb
[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN
], &mod
);
228 /* ensure new ring parameters are within limits */
229 if (ringparam
.rx_pending
> ringparam
.rx_max_pending
)
230 err_attr
= tb
[ETHTOOL_A_RINGS_RX
];
231 else if (ringparam
.rx_mini_pending
> ringparam
.rx_mini_max_pending
)
232 err_attr
= tb
[ETHTOOL_A_RINGS_RX_MINI
];
233 else if (ringparam
.rx_jumbo_pending
> ringparam
.rx_jumbo_max_pending
)
234 err_attr
= tb
[ETHTOOL_A_RINGS_RX_JUMBO
];
235 else if (ringparam
.tx_pending
> ringparam
.tx_max_pending
)
236 err_attr
= tb
[ETHTOOL_A_RINGS_TX
];
240 NL_SET_ERR_MSG_ATTR(info
->extack
, err_attr
,
241 "requested ring size exceeds maximum");
245 if (kernel_ringparam
.tx_push_buf_len
> kernel_ringparam
.tx_push_buf_max_len
) {
246 NL_SET_ERR_MSG_ATTR_FMT(info
->extack
, tb
[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN
],
247 "Requested TX push buffer exceeds the maximum of %u",
248 kernel_ringparam
.tx_push_buf_max_len
);
253 ret
= dev
->ethtool_ops
->set_ringparam(dev
, &ringparam
,
254 &kernel_ringparam
, info
->extack
);
255 return ret
< 0 ? ret
: 1;
258 const struct ethnl_request_ops ethnl_rings_request_ops
= {
259 .request_cmd
= ETHTOOL_MSG_RINGS_GET
,
260 .reply_cmd
= ETHTOOL_MSG_RINGS_GET_REPLY
,
261 .hdr_attr
= ETHTOOL_A_RINGS_HEADER
,
262 .req_info_size
= sizeof(struct rings_req_info
),
263 .reply_data_size
= sizeof(struct rings_reply_data
),
265 .prepare_data
= rings_prepare_data
,
266 .reply_size
= rings_reply_size
,
267 .fill_reply
= rings_fill_reply
,
269 .set_validate
= ethnl_set_rings_validate
,
270 .set
= ethnl_set_rings
,
271 .set_ntf_cmd
= ETHTOOL_MSG_RINGS_NTF
,