1 // SPDX-License-Identifier: GPL-2.0-only
6 struct rings_req_info
{
7 struct ethnl_req_info base
;
10 struct rings_reply_data
{
11 struct ethnl_reply_data base
;
12 struct ethtool_ringparam ringparam
;
15 #define RINGS_REPDATA(__reply_base) \
16 container_of(__reply_base, struct rings_reply_data, base)
18 static const struct nla_policy
19 rings_get_policy
[ETHTOOL_A_RINGS_MAX
+ 1] = {
20 [ETHTOOL_A_RINGS_UNSPEC
] = { .type
= NLA_REJECT
},
21 [ETHTOOL_A_RINGS_HEADER
] = { .type
= NLA_NESTED
},
22 [ETHTOOL_A_RINGS_RX_MAX
] = { .type
= NLA_REJECT
},
23 [ETHTOOL_A_RINGS_RX_MINI_MAX
] = { .type
= NLA_REJECT
},
24 [ETHTOOL_A_RINGS_RX_JUMBO_MAX
] = { .type
= NLA_REJECT
},
25 [ETHTOOL_A_RINGS_TX_MAX
] = { .type
= NLA_REJECT
},
26 [ETHTOOL_A_RINGS_RX
] = { .type
= NLA_REJECT
},
27 [ETHTOOL_A_RINGS_RX_MINI
] = { .type
= NLA_REJECT
},
28 [ETHTOOL_A_RINGS_RX_JUMBO
] = { .type
= NLA_REJECT
},
29 [ETHTOOL_A_RINGS_TX
] = { .type
= NLA_REJECT
},
32 static int rings_prepare_data(const struct ethnl_req_info
*req_base
,
33 struct ethnl_reply_data
*reply_base
,
34 struct genl_info
*info
)
36 struct rings_reply_data
*data
= RINGS_REPDATA(reply_base
);
37 struct net_device
*dev
= reply_base
->dev
;
40 if (!dev
->ethtool_ops
->get_ringparam
)
42 ret
= ethnl_ops_begin(dev
);
45 dev
->ethtool_ops
->get_ringparam(dev
, &data
->ringparam
);
46 ethnl_ops_complete(dev
);
51 static int rings_reply_size(const struct ethnl_req_info
*req_base
,
52 const struct ethnl_reply_data
*reply_base
)
54 return nla_total_size(sizeof(u32
)) + /* _RINGS_RX_MAX */
55 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_MINI_MAX */
56 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_JUMBO_MAX */
57 nla_total_size(sizeof(u32
)) + /* _RINGS_TX_MAX */
58 nla_total_size(sizeof(u32
)) + /* _RINGS_RX */
59 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_MINI */
60 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_JUMBO */
61 nla_total_size(sizeof(u32
)); /* _RINGS_TX */
64 static int rings_fill_reply(struct sk_buff
*skb
,
65 const struct ethnl_req_info
*req_base
,
66 const struct ethnl_reply_data
*reply_base
)
68 const struct rings_reply_data
*data
= RINGS_REPDATA(reply_base
);
69 const struct ethtool_ringparam
*ringparam
= &data
->ringparam
;
71 if ((ringparam
->rx_max_pending
&&
72 (nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_MAX
,
73 ringparam
->rx_max_pending
) ||
74 nla_put_u32(skb
, ETHTOOL_A_RINGS_RX
,
75 ringparam
->rx_pending
))) ||
76 (ringparam
->rx_mini_max_pending
&&
77 (nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_MINI_MAX
,
78 ringparam
->rx_mini_max_pending
) ||
79 nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_MINI
,
80 ringparam
->rx_mini_pending
))) ||
81 (ringparam
->rx_jumbo_max_pending
&&
82 (nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_JUMBO_MAX
,
83 ringparam
->rx_jumbo_max_pending
) ||
84 nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_JUMBO
,
85 ringparam
->rx_jumbo_pending
))) ||
86 (ringparam
->tx_max_pending
&&
87 (nla_put_u32(skb
, ETHTOOL_A_RINGS_TX_MAX
,
88 ringparam
->tx_max_pending
) ||
89 nla_put_u32(skb
, ETHTOOL_A_RINGS_TX
,
90 ringparam
->tx_pending
))))
96 const struct ethnl_request_ops ethnl_rings_request_ops
= {
97 .request_cmd
= ETHTOOL_MSG_RINGS_GET
,
98 .reply_cmd
= ETHTOOL_MSG_RINGS_GET_REPLY
,
99 .hdr_attr
= ETHTOOL_A_RINGS_HEADER
,
100 .max_attr
= ETHTOOL_A_RINGS_MAX
,
101 .req_info_size
= sizeof(struct rings_req_info
),
102 .reply_data_size
= sizeof(struct rings_reply_data
),
103 .request_policy
= rings_get_policy
,
105 .prepare_data
= rings_prepare_data
,
106 .reply_size
= rings_reply_size
,
107 .fill_reply
= rings_fill_reply
,
112 static const struct nla_policy
113 rings_set_policy
[ETHTOOL_A_RINGS_MAX
+ 1] = {
114 [ETHTOOL_A_RINGS_UNSPEC
] = { .type
= NLA_REJECT
},
115 [ETHTOOL_A_RINGS_HEADER
] = { .type
= NLA_NESTED
},
116 [ETHTOOL_A_RINGS_RX_MAX
] = { .type
= NLA_REJECT
},
117 [ETHTOOL_A_RINGS_RX_MINI_MAX
] = { .type
= NLA_REJECT
},
118 [ETHTOOL_A_RINGS_RX_JUMBO_MAX
] = { .type
= NLA_REJECT
},
119 [ETHTOOL_A_RINGS_TX_MAX
] = { .type
= NLA_REJECT
},
120 [ETHTOOL_A_RINGS_RX
] = { .type
= NLA_U32
},
121 [ETHTOOL_A_RINGS_RX_MINI
] = { .type
= NLA_U32
},
122 [ETHTOOL_A_RINGS_RX_JUMBO
] = { .type
= NLA_U32
},
123 [ETHTOOL_A_RINGS_TX
] = { .type
= NLA_U32
},
126 int ethnl_set_rings(struct sk_buff
*skb
, struct genl_info
*info
)
128 struct nlattr
*tb
[ETHTOOL_A_RINGS_MAX
+ 1];
129 struct ethtool_ringparam ringparam
= {};
130 struct ethnl_req_info req_info
= {};
131 const struct nlattr
*err_attr
;
132 const struct ethtool_ops
*ops
;
133 struct net_device
*dev
;
137 ret
= nlmsg_parse(info
->nlhdr
, GENL_HDRLEN
, tb
,
138 ETHTOOL_A_RINGS_MAX
, rings_set_policy
,
142 ret
= ethnl_parse_header_dev_get(&req_info
,
143 tb
[ETHTOOL_A_RINGS_HEADER
],
144 genl_info_net(info
), info
->extack
,
149 ops
= dev
->ethtool_ops
;
151 if (!ops
->get_ringparam
|| !ops
->set_ringparam
)
155 ret
= ethnl_ops_begin(dev
);
158 ops
->get_ringparam(dev
, &ringparam
);
160 ethnl_update_u32(&ringparam
.rx_pending
, tb
[ETHTOOL_A_RINGS_RX
], &mod
);
161 ethnl_update_u32(&ringparam
.rx_mini_pending
,
162 tb
[ETHTOOL_A_RINGS_RX_MINI
], &mod
);
163 ethnl_update_u32(&ringparam
.rx_jumbo_pending
,
164 tb
[ETHTOOL_A_RINGS_RX_JUMBO
], &mod
);
165 ethnl_update_u32(&ringparam
.tx_pending
, tb
[ETHTOOL_A_RINGS_TX
], &mod
);
170 /* ensure new ring parameters are within limits */
171 if (ringparam
.rx_pending
> ringparam
.rx_max_pending
)
172 err_attr
= tb
[ETHTOOL_A_RINGS_RX
];
173 else if (ringparam
.rx_mini_pending
> ringparam
.rx_mini_max_pending
)
174 err_attr
= tb
[ETHTOOL_A_RINGS_RX_MINI
];
175 else if (ringparam
.rx_jumbo_pending
> ringparam
.rx_jumbo_max_pending
)
176 err_attr
= tb
[ETHTOOL_A_RINGS_RX_JUMBO
];
177 else if (ringparam
.tx_pending
> ringparam
.tx_max_pending
)
178 err_attr
= tb
[ETHTOOL_A_RINGS_TX
];
183 NL_SET_ERR_MSG_ATTR(info
->extack
, err_attr
,
184 "requested ring size exceeds maximum");
188 ret
= dev
->ethtool_ops
->set_ringparam(dev
, &ringparam
);
191 ethtool_notify(dev
, ETHTOOL_MSG_RINGS_NTF
, NULL
);
194 ethnl_ops_complete(dev
);