1 // SPDX-License-Identifier: GPL-2.0-only
6 struct rings_req_info
{
7 struct ethnl_req_info base
;
10 struct rings_reply_data
{
11 struct ethnl_reply_data base
;
12 struct ethtool_ringparam ringparam
;
15 #define RINGS_REPDATA(__reply_base) \
16 container_of(__reply_base, struct rings_reply_data, base)
18 const struct nla_policy ethnl_rings_get_policy
[] = {
19 [ETHTOOL_A_RINGS_HEADER
] =
20 NLA_POLICY_NESTED(ethnl_header_policy
),
23 static int rings_prepare_data(const struct ethnl_req_info
*req_base
,
24 struct ethnl_reply_data
*reply_base
,
25 struct genl_info
*info
)
27 struct rings_reply_data
*data
= RINGS_REPDATA(reply_base
);
28 struct net_device
*dev
= reply_base
->dev
;
31 if (!dev
->ethtool_ops
->get_ringparam
)
33 ret
= ethnl_ops_begin(dev
);
36 dev
->ethtool_ops
->get_ringparam(dev
, &data
->ringparam
);
37 ethnl_ops_complete(dev
);
42 static int rings_reply_size(const struct ethnl_req_info
*req_base
,
43 const struct ethnl_reply_data
*reply_base
)
45 return nla_total_size(sizeof(u32
)) + /* _RINGS_RX_MAX */
46 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_MINI_MAX */
47 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_JUMBO_MAX */
48 nla_total_size(sizeof(u32
)) + /* _RINGS_TX_MAX */
49 nla_total_size(sizeof(u32
)) + /* _RINGS_RX */
50 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_MINI */
51 nla_total_size(sizeof(u32
)) + /* _RINGS_RX_JUMBO */
52 nla_total_size(sizeof(u32
)); /* _RINGS_TX */
55 static int rings_fill_reply(struct sk_buff
*skb
,
56 const struct ethnl_req_info
*req_base
,
57 const struct ethnl_reply_data
*reply_base
)
59 const struct rings_reply_data
*data
= RINGS_REPDATA(reply_base
);
60 const struct ethtool_ringparam
*ringparam
= &data
->ringparam
;
62 if ((ringparam
->rx_max_pending
&&
63 (nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_MAX
,
64 ringparam
->rx_max_pending
) ||
65 nla_put_u32(skb
, ETHTOOL_A_RINGS_RX
,
66 ringparam
->rx_pending
))) ||
67 (ringparam
->rx_mini_max_pending
&&
68 (nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_MINI_MAX
,
69 ringparam
->rx_mini_max_pending
) ||
70 nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_MINI
,
71 ringparam
->rx_mini_pending
))) ||
72 (ringparam
->rx_jumbo_max_pending
&&
73 (nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_JUMBO_MAX
,
74 ringparam
->rx_jumbo_max_pending
) ||
75 nla_put_u32(skb
, ETHTOOL_A_RINGS_RX_JUMBO
,
76 ringparam
->rx_jumbo_pending
))) ||
77 (ringparam
->tx_max_pending
&&
78 (nla_put_u32(skb
, ETHTOOL_A_RINGS_TX_MAX
,
79 ringparam
->tx_max_pending
) ||
80 nla_put_u32(skb
, ETHTOOL_A_RINGS_TX
,
81 ringparam
->tx_pending
))))
87 const struct ethnl_request_ops ethnl_rings_request_ops
= {
88 .request_cmd
= ETHTOOL_MSG_RINGS_GET
,
89 .reply_cmd
= ETHTOOL_MSG_RINGS_GET_REPLY
,
90 .hdr_attr
= ETHTOOL_A_RINGS_HEADER
,
91 .req_info_size
= sizeof(struct rings_req_info
),
92 .reply_data_size
= sizeof(struct rings_reply_data
),
94 .prepare_data
= rings_prepare_data
,
95 .reply_size
= rings_reply_size
,
96 .fill_reply
= rings_fill_reply
,
101 const struct nla_policy ethnl_rings_set_policy
[] = {
102 [ETHTOOL_A_RINGS_HEADER
] =
103 NLA_POLICY_NESTED(ethnl_header_policy
),
104 [ETHTOOL_A_RINGS_RX
] = { .type
= NLA_U32
},
105 [ETHTOOL_A_RINGS_RX_MINI
] = { .type
= NLA_U32
},
106 [ETHTOOL_A_RINGS_RX_JUMBO
] = { .type
= NLA_U32
},
107 [ETHTOOL_A_RINGS_TX
] = { .type
= NLA_U32
},
110 int ethnl_set_rings(struct sk_buff
*skb
, struct genl_info
*info
)
112 struct ethtool_ringparam ringparam
= {};
113 struct ethnl_req_info req_info
= {};
114 struct nlattr
**tb
= info
->attrs
;
115 const struct nlattr
*err_attr
;
116 const struct ethtool_ops
*ops
;
117 struct net_device
*dev
;
121 ret
= ethnl_parse_header_dev_get(&req_info
,
122 tb
[ETHTOOL_A_RINGS_HEADER
],
123 genl_info_net(info
), info
->extack
,
128 ops
= dev
->ethtool_ops
;
130 if (!ops
->get_ringparam
|| !ops
->set_ringparam
)
134 ret
= ethnl_ops_begin(dev
);
137 ops
->get_ringparam(dev
, &ringparam
);
139 ethnl_update_u32(&ringparam
.rx_pending
, tb
[ETHTOOL_A_RINGS_RX
], &mod
);
140 ethnl_update_u32(&ringparam
.rx_mini_pending
,
141 tb
[ETHTOOL_A_RINGS_RX_MINI
], &mod
);
142 ethnl_update_u32(&ringparam
.rx_jumbo_pending
,
143 tb
[ETHTOOL_A_RINGS_RX_JUMBO
], &mod
);
144 ethnl_update_u32(&ringparam
.tx_pending
, tb
[ETHTOOL_A_RINGS_TX
], &mod
);
149 /* ensure new ring parameters are within limits */
150 if (ringparam
.rx_pending
> ringparam
.rx_max_pending
)
151 err_attr
= tb
[ETHTOOL_A_RINGS_RX
];
152 else if (ringparam
.rx_mini_pending
> ringparam
.rx_mini_max_pending
)
153 err_attr
= tb
[ETHTOOL_A_RINGS_RX_MINI
];
154 else if (ringparam
.rx_jumbo_pending
> ringparam
.rx_jumbo_max_pending
)
155 err_attr
= tb
[ETHTOOL_A_RINGS_RX_JUMBO
];
156 else if (ringparam
.tx_pending
> ringparam
.tx_max_pending
)
157 err_attr
= tb
[ETHTOOL_A_RINGS_TX
];
162 NL_SET_ERR_MSG_ATTR(info
->extack
, err_attr
,
163 "requested ring size exceeds maximum");
167 ret
= dev
->ethtool_ops
->set_ringparam(dev
, &ringparam
);
170 ethtool_notify(dev
, ETHTOOL_MSG_RINGS_NTF
, NULL
);
173 ethnl_ops_complete(dev
);