2 * Copyright (c) 2008, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Lucy Liu <lucy.liu@intel.com>
20 #include <linux/netdevice.h>
21 #include <linux/netlink.h>
22 #include <net/netlink.h>
23 #include <net/rtnetlink.h>
24 #include <linux/dcbnl.h>
25 #include <linux/rtnetlink.h>
29 * Data Center Bridging (DCB) is a collection of Ethernet enhancements
30 * intended to allow network traffic with differing requirements
31 * (highly reliable, no drops vs. best effort vs. low latency) to operate
32 * and co-exist on Ethernet. Current DCB features are:
34 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
35 * framework for assigning bandwidth guarantees to traffic classes.
37 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
38 * can work independently for each 802.1p priority.
40 * Congestion Notification - provides a mechanism for end-to-end congestion
41 * control for protocols which do not have built-in congestion management.
43 * More information about the emerging standards for these Ethernet features
44 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
46 * This file implements an rtnetlink interface to allow configuration of DCB
47 * features for capable devices.
50 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
51 MODULE_DESCRIPTION("Data Center Bridging netlink interface");
52 MODULE_LICENSE("GPL");
54 /**************** DCB attribute policies *************************************/
56 /* DCB netlink attributes policy */
57 static struct nla_policy dcbnl_rtnl_policy
[DCB_ATTR_MAX
+ 1] = {
58 [DCB_ATTR_IFNAME
] = {.type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1},
59 [DCB_ATTR_STATE
] = {.type
= NLA_U8
},
60 [DCB_ATTR_PFC_CFG
] = {.type
= NLA_NESTED
},
61 [DCB_ATTR_PG_CFG
] = {.type
= NLA_NESTED
},
62 [DCB_ATTR_SET_ALL
] = {.type
= NLA_U8
},
63 [DCB_ATTR_PERM_HWADDR
] = {.type
= NLA_FLAG
},
64 [DCB_ATTR_CAP
] = {.type
= NLA_NESTED
},
65 [DCB_ATTR_PFC_STATE
] = {.type
= NLA_U8
},
66 [DCB_ATTR_BCN
] = {.type
= NLA_NESTED
},
69 /* DCB priority flow control to User Priority nested attributes */
70 static struct nla_policy dcbnl_pfc_up_nest
[DCB_PFC_UP_ATTR_MAX
+ 1] = {
71 [DCB_PFC_UP_ATTR_0
] = {.type
= NLA_U8
},
72 [DCB_PFC_UP_ATTR_1
] = {.type
= NLA_U8
},
73 [DCB_PFC_UP_ATTR_2
] = {.type
= NLA_U8
},
74 [DCB_PFC_UP_ATTR_3
] = {.type
= NLA_U8
},
75 [DCB_PFC_UP_ATTR_4
] = {.type
= NLA_U8
},
76 [DCB_PFC_UP_ATTR_5
] = {.type
= NLA_U8
},
77 [DCB_PFC_UP_ATTR_6
] = {.type
= NLA_U8
},
78 [DCB_PFC_UP_ATTR_7
] = {.type
= NLA_U8
},
79 [DCB_PFC_UP_ATTR_ALL
] = {.type
= NLA_FLAG
},
82 /* DCB priority grouping nested attributes */
83 static struct nla_policy dcbnl_pg_nest
[DCB_PG_ATTR_MAX
+ 1] = {
84 [DCB_PG_ATTR_TC_0
] = {.type
= NLA_NESTED
},
85 [DCB_PG_ATTR_TC_1
] = {.type
= NLA_NESTED
},
86 [DCB_PG_ATTR_TC_2
] = {.type
= NLA_NESTED
},
87 [DCB_PG_ATTR_TC_3
] = {.type
= NLA_NESTED
},
88 [DCB_PG_ATTR_TC_4
] = {.type
= NLA_NESTED
},
89 [DCB_PG_ATTR_TC_5
] = {.type
= NLA_NESTED
},
90 [DCB_PG_ATTR_TC_6
] = {.type
= NLA_NESTED
},
91 [DCB_PG_ATTR_TC_7
] = {.type
= NLA_NESTED
},
92 [DCB_PG_ATTR_TC_ALL
] = {.type
= NLA_NESTED
},
93 [DCB_PG_ATTR_BW_ID_0
] = {.type
= NLA_U8
},
94 [DCB_PG_ATTR_BW_ID_1
] = {.type
= NLA_U8
},
95 [DCB_PG_ATTR_BW_ID_2
] = {.type
= NLA_U8
},
96 [DCB_PG_ATTR_BW_ID_3
] = {.type
= NLA_U8
},
97 [DCB_PG_ATTR_BW_ID_4
] = {.type
= NLA_U8
},
98 [DCB_PG_ATTR_BW_ID_5
] = {.type
= NLA_U8
},
99 [DCB_PG_ATTR_BW_ID_6
] = {.type
= NLA_U8
},
100 [DCB_PG_ATTR_BW_ID_7
] = {.type
= NLA_U8
},
101 [DCB_PG_ATTR_BW_ID_ALL
] = {.type
= NLA_FLAG
},
104 /* DCB traffic class nested attributes. */
105 static struct nla_policy dcbnl_tc_param_nest
[DCB_TC_ATTR_PARAM_MAX
+ 1] = {
106 [DCB_TC_ATTR_PARAM_PGID
] = {.type
= NLA_U8
},
107 [DCB_TC_ATTR_PARAM_UP_MAPPING
] = {.type
= NLA_U8
},
108 [DCB_TC_ATTR_PARAM_STRICT_PRIO
] = {.type
= NLA_U8
},
109 [DCB_TC_ATTR_PARAM_BW_PCT
] = {.type
= NLA_U8
},
110 [DCB_TC_ATTR_PARAM_ALL
] = {.type
= NLA_FLAG
},
113 /* DCB capabilities nested attributes. */
114 static struct nla_policy dcbnl_cap_nest
[DCB_CAP_ATTR_MAX
+ 1] = {
115 [DCB_CAP_ATTR_ALL
] = {.type
= NLA_FLAG
},
116 [DCB_CAP_ATTR_PG
] = {.type
= NLA_U8
},
117 [DCB_CAP_ATTR_PFC
] = {.type
= NLA_U8
},
118 [DCB_CAP_ATTR_UP2TC
] = {.type
= NLA_U8
},
119 [DCB_CAP_ATTR_PG_TCS
] = {.type
= NLA_U8
},
120 [DCB_CAP_ATTR_PFC_TCS
] = {.type
= NLA_U8
},
121 [DCB_CAP_ATTR_GSP
] = {.type
= NLA_U8
},
122 [DCB_CAP_ATTR_BCN
] = {.type
= NLA_U8
},
125 /* DCB capabilities nested attributes. */
126 static struct nla_policy dcbnl_numtcs_nest
[DCB_NUMTCS_ATTR_MAX
+ 1] = {
127 [DCB_NUMTCS_ATTR_ALL
] = {.type
= NLA_FLAG
},
128 [DCB_NUMTCS_ATTR_PG
] = {.type
= NLA_U8
},
129 [DCB_NUMTCS_ATTR_PFC
] = {.type
= NLA_U8
},
132 /* DCB BCN nested attributes. */
133 static struct nla_policy dcbnl_bcn_nest
[DCB_BCN_ATTR_MAX
+ 1] = {
134 [DCB_BCN_ATTR_RP_0
] = {.type
= NLA_U8
},
135 [DCB_BCN_ATTR_RP_1
] = {.type
= NLA_U8
},
136 [DCB_BCN_ATTR_RP_2
] = {.type
= NLA_U8
},
137 [DCB_BCN_ATTR_RP_3
] = {.type
= NLA_U8
},
138 [DCB_BCN_ATTR_RP_4
] = {.type
= NLA_U8
},
139 [DCB_BCN_ATTR_RP_5
] = {.type
= NLA_U8
},
140 [DCB_BCN_ATTR_RP_6
] = {.type
= NLA_U8
},
141 [DCB_BCN_ATTR_RP_7
] = {.type
= NLA_U8
},
142 [DCB_BCN_ATTR_RP_ALL
] = {.type
= NLA_FLAG
},
143 [DCB_BCN_ATTR_ALPHA
] = {.type
= NLA_U32
},
144 [DCB_BCN_ATTR_BETA
] = {.type
= NLA_U32
},
145 [DCB_BCN_ATTR_GD
] = {.type
= NLA_U32
},
146 [DCB_BCN_ATTR_GI
] = {.type
= NLA_U32
},
147 [DCB_BCN_ATTR_TMAX
] = {.type
= NLA_U32
},
148 [DCB_BCN_ATTR_TD
] = {.type
= NLA_U32
},
149 [DCB_BCN_ATTR_RMIN
] = {.type
= NLA_U32
},
150 [DCB_BCN_ATTR_W
] = {.type
= NLA_U32
},
151 [DCB_BCN_ATTR_RD
] = {.type
= NLA_U32
},
152 [DCB_BCN_ATTR_RU
] = {.type
= NLA_U32
},
153 [DCB_BCN_ATTR_WRTT
] = {.type
= NLA_U32
},
154 [DCB_BCN_ATTR_RI
] = {.type
= NLA_U32
},
155 [DCB_BCN_ATTR_C
] = {.type
= NLA_U32
},
156 [DCB_BCN_ATTR_ALL
] = {.type
= NLA_FLAG
},
159 /* standard netlink reply call */
160 static int dcbnl_reply(u8 value
, u8 event
, u8 cmd
, u8 attr
, u32 pid
,
163 struct sk_buff
*dcbnl_skb
;
165 struct nlmsghdr
*nlh
;
168 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
172 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, event
, sizeof(*dcb
), flags
);
174 dcb
= NLMSG_DATA(nlh
);
175 dcb
->dcb_family
= AF_UNSPEC
;
179 ret
= nla_put_u8(dcbnl_skb
, attr
, value
);
183 /* end the message, assign the nlmsg_len. */
184 nlmsg_end(dcbnl_skb
, nlh
);
185 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
196 static int dcbnl_getstate(struct net_device
*netdev
, struct nlattr
**tb
,
197 u32 pid
, u32 seq
, u16 flags
)
201 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
202 if (!netdev
->dcbnl_ops
->getstate
)
205 ret
= dcbnl_reply(netdev
->dcbnl_ops
->getstate(netdev
), RTM_GETDCB
,
206 DCB_CMD_GSTATE
, DCB_ATTR_STATE
, pid
, seq
, flags
);
211 static int dcbnl_getpfccfg(struct net_device
*netdev
, struct nlattr
**tb
,
212 u32 pid
, u32 seq
, u16 flags
)
214 struct sk_buff
*dcbnl_skb
;
215 struct nlmsghdr
*nlh
;
217 struct nlattr
*data
[DCB_PFC_UP_ATTR_MAX
+ 1], *nest
;
223 if (!tb
[DCB_ATTR_PFC_CFG
] || !netdev
->dcbnl_ops
->getpfccfg
)
226 ret
= nla_parse_nested(data
, DCB_PFC_UP_ATTR_MAX
,
227 tb
[DCB_ATTR_PFC_CFG
],
232 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
236 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
238 dcb
= NLMSG_DATA(nlh
);
239 dcb
->dcb_family
= AF_UNSPEC
;
240 dcb
->cmd
= DCB_CMD_PFC_GCFG
;
242 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_PFC_CFG
);
246 if (data
[DCB_PFC_UP_ATTR_ALL
])
249 for (i
= DCB_PFC_UP_ATTR_0
; i
<= DCB_PFC_UP_ATTR_7
; i
++) {
250 if (!getall
&& !data
[i
])
253 netdev
->dcbnl_ops
->getpfccfg(netdev
, i
- DCB_PFC_UP_ATTR_0
,
255 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
258 nla_nest_cancel(dcbnl_skb
, nest
);
262 nla_nest_end(dcbnl_skb
, nest
);
264 nlmsg_end(dcbnl_skb
, nlh
);
266 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
278 static int dcbnl_getperm_hwaddr(struct net_device
*netdev
, struct nlattr
**tb
,
279 u32 pid
, u32 seq
, u16 flags
)
281 struct sk_buff
*dcbnl_skb
;
282 struct nlmsghdr
*nlh
;
284 u8 perm_addr
[MAX_ADDR_LEN
];
287 if (!netdev
->dcbnl_ops
->getpermhwaddr
)
290 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
294 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
296 dcb
= NLMSG_DATA(nlh
);
297 dcb
->dcb_family
= AF_UNSPEC
;
298 dcb
->cmd
= DCB_CMD_GPERM_HWADDR
;
300 netdev
->dcbnl_ops
->getpermhwaddr(netdev
, perm_addr
);
302 ret
= nla_put(dcbnl_skb
, DCB_ATTR_PERM_HWADDR
, sizeof(perm_addr
),
305 nlmsg_end(dcbnl_skb
, nlh
);
307 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
320 static int dcbnl_getcap(struct net_device
*netdev
, struct nlattr
**tb
,
321 u32 pid
, u32 seq
, u16 flags
)
323 struct sk_buff
*dcbnl_skb
;
324 struct nlmsghdr
*nlh
;
326 struct nlattr
*data
[DCB_CAP_ATTR_MAX
+ 1], *nest
;
332 if (!tb
[DCB_ATTR_CAP
] || !netdev
->dcbnl_ops
->getcap
)
335 ret
= nla_parse_nested(data
, DCB_CAP_ATTR_MAX
, tb
[DCB_ATTR_CAP
],
340 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
344 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
346 dcb
= NLMSG_DATA(nlh
);
347 dcb
->dcb_family
= AF_UNSPEC
;
348 dcb
->cmd
= DCB_CMD_GCAP
;
350 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_CAP
);
354 if (data
[DCB_CAP_ATTR_ALL
])
357 for (i
= DCB_CAP_ATTR_ALL
+1; i
<= DCB_CAP_ATTR_MAX
; i
++) {
358 if (!getall
&& !data
[i
])
361 if (!netdev
->dcbnl_ops
->getcap(netdev
, i
, &value
)) {
362 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
365 nla_nest_cancel(dcbnl_skb
, nest
);
370 nla_nest_end(dcbnl_skb
, nest
);
372 nlmsg_end(dcbnl_skb
, nlh
);
374 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
386 static int dcbnl_getnumtcs(struct net_device
*netdev
, struct nlattr
**tb
,
387 u32 pid
, u32 seq
, u16 flags
)
389 struct sk_buff
*dcbnl_skb
;
390 struct nlmsghdr
*nlh
;
392 struct nlattr
*data
[DCB_NUMTCS_ATTR_MAX
+ 1], *nest
;
398 if (!tb
[DCB_ATTR_NUMTCS
] || !netdev
->dcbnl_ops
->getnumtcs
)
401 ret
= nla_parse_nested(data
, DCB_NUMTCS_ATTR_MAX
, tb
[DCB_ATTR_NUMTCS
],
408 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
414 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
416 dcb
= NLMSG_DATA(nlh
);
417 dcb
->dcb_family
= AF_UNSPEC
;
418 dcb
->cmd
= DCB_CMD_GNUMTCS
;
420 nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_NUMTCS
);
426 if (data
[DCB_NUMTCS_ATTR_ALL
])
429 for (i
= DCB_NUMTCS_ATTR_ALL
+1; i
<= DCB_NUMTCS_ATTR_MAX
; i
++) {
430 if (!getall
&& !data
[i
])
433 ret
= netdev
->dcbnl_ops
->getnumtcs(netdev
, i
, &value
);
435 ret
= nla_put_u8(dcbnl_skb
, i
, value
);
438 nla_nest_cancel(dcbnl_skb
, nest
);
446 nla_nest_end(dcbnl_skb
, nest
);
448 nlmsg_end(dcbnl_skb
, nlh
);
450 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
464 static int dcbnl_setnumtcs(struct net_device
*netdev
, struct nlattr
**tb
,
465 u32 pid
, u32 seq
, u16 flags
)
467 struct nlattr
*data
[DCB_NUMTCS_ATTR_MAX
+ 1];
472 if (!tb
[DCB_ATTR_NUMTCS
] || !netdev
->dcbnl_ops
->setstate
)
475 ret
= nla_parse_nested(data
, DCB_NUMTCS_ATTR_MAX
, tb
[DCB_ATTR_NUMTCS
],
483 for (i
= DCB_NUMTCS_ATTR_ALL
+1; i
<= DCB_NUMTCS_ATTR_MAX
; i
++) {
487 value
= nla_get_u8(data
[i
]);
489 ret
= netdev
->dcbnl_ops
->setnumtcs(netdev
, i
, value
);
496 ret
= dcbnl_reply(!!ret
, RTM_SETDCB
, DCB_CMD_SNUMTCS
,
497 DCB_ATTR_NUMTCS
, pid
, seq
, flags
);
503 static int dcbnl_getpfcstate(struct net_device
*netdev
, struct nlattr
**tb
,
504 u32 pid
, u32 seq
, u16 flags
)
508 if (!netdev
->dcbnl_ops
->getpfcstate
)
511 ret
= dcbnl_reply(netdev
->dcbnl_ops
->getpfcstate(netdev
), RTM_GETDCB
,
512 DCB_CMD_PFC_GSTATE
, DCB_ATTR_PFC_STATE
,
518 static int dcbnl_setpfcstate(struct net_device
*netdev
, struct nlattr
**tb
,
519 u32 pid
, u32 seq
, u16 flags
)
524 if (!tb
[DCB_ATTR_PFC_STATE
] || !netdev
->dcbnl_ops
->setpfcstate
)
527 value
= nla_get_u8(tb
[DCB_ATTR_PFC_STATE
]);
529 netdev
->dcbnl_ops
->setpfcstate(netdev
, value
);
531 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_PFC_SSTATE
, DCB_ATTR_PFC_STATE
,
537 static int __dcbnl_pg_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
538 u32 pid
, u32 seq
, u16 flags
, int dir
)
540 struct sk_buff
*dcbnl_skb
;
541 struct nlmsghdr
*nlh
;
543 struct nlattr
*pg_nest
, *param_nest
, *data
;
544 struct nlattr
*pg_tb
[DCB_PG_ATTR_MAX
+ 1];
545 struct nlattr
*param_tb
[DCB_TC_ATTR_PARAM_MAX
+ 1];
546 u8 prio
, pgid
, tc_pct
, up_map
;
551 if (!tb
[DCB_ATTR_PG_CFG
] ||
552 !netdev
->dcbnl_ops
->getpgtccfgtx
||
553 !netdev
->dcbnl_ops
->getpgtccfgrx
||
554 !netdev
->dcbnl_ops
->getpgbwgcfgtx
||
555 !netdev
->dcbnl_ops
->getpgbwgcfgrx
)
558 ret
= nla_parse_nested(pg_tb
, DCB_PG_ATTR_MAX
,
559 tb
[DCB_ATTR_PG_CFG
], dcbnl_pg_nest
);
564 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
568 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
570 dcb
= NLMSG_DATA(nlh
);
571 dcb
->dcb_family
= AF_UNSPEC
;
572 dcb
->cmd
= (dir
) ? DCB_CMD_PGRX_GCFG
: DCB_CMD_PGTX_GCFG
;
574 pg_nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_PG_CFG
);
578 if (pg_tb
[DCB_PG_ATTR_TC_ALL
])
581 for (i
= DCB_PG_ATTR_TC_0
; i
<= DCB_PG_ATTR_TC_7
; i
++) {
582 if (!getall
&& !pg_tb
[i
])
585 if (pg_tb
[DCB_PG_ATTR_TC_ALL
])
586 data
= pg_tb
[DCB_PG_ATTR_TC_ALL
];
589 ret
= nla_parse_nested(param_tb
, DCB_TC_ATTR_PARAM_MAX
,
590 data
, dcbnl_tc_param_nest
);
594 param_nest
= nla_nest_start(dcbnl_skb
, i
);
598 pgid
= DCB_ATTR_VALUE_UNDEFINED
;
599 prio
= DCB_ATTR_VALUE_UNDEFINED
;
600 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
601 up_map
= DCB_ATTR_VALUE_UNDEFINED
;
605 netdev
->dcbnl_ops
->getpgtccfgrx(netdev
,
606 i
- DCB_PG_ATTR_TC_0
, &prio
,
607 &pgid
, &tc_pct
, &up_map
);
610 netdev
->dcbnl_ops
->getpgtccfgtx(netdev
,
611 i
- DCB_PG_ATTR_TC_0
, &prio
,
612 &pgid
, &tc_pct
, &up_map
);
615 if (param_tb
[DCB_TC_ATTR_PARAM_PGID
] ||
616 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
617 ret
= nla_put_u8(dcbnl_skb
,
618 DCB_TC_ATTR_PARAM_PGID
, pgid
);
622 if (param_tb
[DCB_TC_ATTR_PARAM_UP_MAPPING
] ||
623 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
624 ret
= nla_put_u8(dcbnl_skb
,
625 DCB_TC_ATTR_PARAM_UP_MAPPING
, up_map
);
629 if (param_tb
[DCB_TC_ATTR_PARAM_STRICT_PRIO
] ||
630 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
631 ret
= nla_put_u8(dcbnl_skb
,
632 DCB_TC_ATTR_PARAM_STRICT_PRIO
, prio
);
636 if (param_tb
[DCB_TC_ATTR_PARAM_BW_PCT
] ||
637 param_tb
[DCB_TC_ATTR_PARAM_ALL
]) {
638 ret
= nla_put_u8(dcbnl_skb
, DCB_TC_ATTR_PARAM_BW_PCT
,
643 nla_nest_end(dcbnl_skb
, param_nest
);
646 if (pg_tb
[DCB_PG_ATTR_BW_ID_ALL
])
651 for (i
= DCB_PG_ATTR_BW_ID_0
; i
<= DCB_PG_ATTR_BW_ID_7
; i
++) {
652 if (!getall
&& !pg_tb
[i
])
655 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
659 netdev
->dcbnl_ops
->getpgbwgcfgrx(netdev
,
660 i
- DCB_PG_ATTR_BW_ID_0
, &tc_pct
);
663 netdev
->dcbnl_ops
->getpgbwgcfgtx(netdev
,
664 i
- DCB_PG_ATTR_BW_ID_0
, &tc_pct
);
666 ret
= nla_put_u8(dcbnl_skb
, i
, tc_pct
);
672 nla_nest_end(dcbnl_skb
, pg_nest
);
674 nlmsg_end(dcbnl_skb
, nlh
);
676 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
683 nla_nest_cancel(dcbnl_skb
, param_nest
);
685 nla_nest_cancel(dcbnl_skb
, pg_nest
);
694 static int dcbnl_pgtx_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
695 u32 pid
, u32 seq
, u16 flags
)
697 return __dcbnl_pg_getcfg(netdev
, tb
, pid
, seq
, flags
, 0);
700 static int dcbnl_pgrx_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
701 u32 pid
, u32 seq
, u16 flags
)
703 return __dcbnl_pg_getcfg(netdev
, tb
, pid
, seq
, flags
, 1);
706 static int dcbnl_setstate(struct net_device
*netdev
, struct nlattr
**tb
,
707 u32 pid
, u32 seq
, u16 flags
)
712 if (!tb
[DCB_ATTR_STATE
] || !netdev
->dcbnl_ops
->setstate
)
715 value
= nla_get_u8(tb
[DCB_ATTR_STATE
]);
717 netdev
->dcbnl_ops
->setstate(netdev
, value
);
719 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_SSTATE
, DCB_ATTR_STATE
,
725 static int dcbnl_setpfccfg(struct net_device
*netdev
, struct nlattr
**tb
,
726 u32 pid
, u32 seq
, u16 flags
)
728 struct nlattr
*data
[DCB_PFC_UP_ATTR_MAX
+ 1];
733 if (!tb
[DCB_ATTR_PFC_CFG
] || !netdev
->dcbnl_ops
->setpfccfg
)
736 ret
= nla_parse_nested(data
, DCB_PFC_UP_ATTR_MAX
,
737 tb
[DCB_ATTR_PFC_CFG
],
742 for (i
= DCB_PFC_UP_ATTR_0
; i
<= DCB_PFC_UP_ATTR_7
; i
++) {
745 value
= nla_get_u8(data
[i
]);
746 netdev
->dcbnl_ops
->setpfccfg(netdev
,
747 data
[i
]->nla_type
- DCB_PFC_UP_ATTR_0
, value
);
750 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_PFC_SCFG
, DCB_ATTR_PFC_CFG
,
756 static int dcbnl_setall(struct net_device
*netdev
, struct nlattr
**tb
,
757 u32 pid
, u32 seq
, u16 flags
)
761 if (!tb
[DCB_ATTR_SET_ALL
] || !netdev
->dcbnl_ops
->setall
)
764 ret
= dcbnl_reply(netdev
->dcbnl_ops
->setall(netdev
), RTM_SETDCB
,
765 DCB_CMD_SET_ALL
, DCB_ATTR_SET_ALL
, pid
, seq
, flags
);
770 static int __dcbnl_pg_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
771 u32 pid
, u32 seq
, u16 flags
, int dir
)
773 struct nlattr
*pg_tb
[DCB_PG_ATTR_MAX
+ 1];
774 struct nlattr
*param_tb
[DCB_TC_ATTR_PARAM_MAX
+ 1];
782 if (!tb
[DCB_ATTR_PG_CFG
] ||
783 !netdev
->dcbnl_ops
->setpgtccfgtx
||
784 !netdev
->dcbnl_ops
->setpgtccfgrx
||
785 !netdev
->dcbnl_ops
->setpgbwgcfgtx
||
786 !netdev
->dcbnl_ops
->setpgbwgcfgrx
)
789 ret
= nla_parse_nested(pg_tb
, DCB_PG_ATTR_MAX
,
790 tb
[DCB_ATTR_PG_CFG
], dcbnl_pg_nest
);
794 for (i
= DCB_PG_ATTR_TC_0
; i
<= DCB_PG_ATTR_TC_7
; i
++) {
798 ret
= nla_parse_nested(param_tb
, DCB_TC_ATTR_PARAM_MAX
,
799 pg_tb
[i
], dcbnl_tc_param_nest
);
803 pgid
= DCB_ATTR_VALUE_UNDEFINED
;
804 prio
= DCB_ATTR_VALUE_UNDEFINED
;
805 tc_pct
= DCB_ATTR_VALUE_UNDEFINED
;
806 up_map
= DCB_ATTR_VALUE_UNDEFINED
;
808 if (param_tb
[DCB_TC_ATTR_PARAM_STRICT_PRIO
])
810 nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_STRICT_PRIO
]);
812 if (param_tb
[DCB_TC_ATTR_PARAM_PGID
])
813 pgid
= nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_PGID
]);
815 if (param_tb
[DCB_TC_ATTR_PARAM_BW_PCT
])
816 tc_pct
= nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_BW_PCT
]);
818 if (param_tb
[DCB_TC_ATTR_PARAM_UP_MAPPING
])
820 nla_get_u8(param_tb
[DCB_TC_ATTR_PARAM_UP_MAPPING
]);
822 /* dir: Tx = 0, Rx = 1 */
825 netdev
->dcbnl_ops
->setpgtccfgrx(netdev
,
826 i
- DCB_PG_ATTR_TC_0
,
827 prio
, pgid
, tc_pct
, up_map
);
830 netdev
->dcbnl_ops
->setpgtccfgtx(netdev
,
831 i
- DCB_PG_ATTR_TC_0
,
832 prio
, pgid
, tc_pct
, up_map
);
836 for (i
= DCB_PG_ATTR_BW_ID_0
; i
<= DCB_PG_ATTR_BW_ID_7
; i
++) {
840 tc_pct
= nla_get_u8(pg_tb
[i
]);
842 /* dir: Tx = 0, Rx = 1 */
845 netdev
->dcbnl_ops
->setpgbwgcfgrx(netdev
,
846 i
- DCB_PG_ATTR_BW_ID_0
, tc_pct
);
849 netdev
->dcbnl_ops
->setpgbwgcfgtx(netdev
,
850 i
- DCB_PG_ATTR_BW_ID_0
, tc_pct
);
854 ret
= dcbnl_reply(0, RTM_SETDCB
,
855 (dir
? DCB_CMD_PGRX_SCFG
: DCB_CMD_PGTX_SCFG
),
856 DCB_ATTR_PG_CFG
, pid
, seq
, flags
);
862 static int dcbnl_pgtx_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
863 u32 pid
, u32 seq
, u16 flags
)
865 return __dcbnl_pg_setcfg(netdev
, tb
, pid
, seq
, flags
, 0);
868 static int dcbnl_pgrx_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
869 u32 pid
, u32 seq
, u16 flags
)
871 return __dcbnl_pg_setcfg(netdev
, tb
, pid
, seq
, flags
, 1);
874 static int dcbnl_bcn_getcfg(struct net_device
*netdev
, struct nlattr
**tb
,
875 u32 pid
, u32 seq
, u16 flags
)
877 struct sk_buff
*dcbnl_skb
;
878 struct nlmsghdr
*nlh
;
880 struct nlattr
*bcn_nest
;
881 struct nlattr
*bcn_tb
[DCB_BCN_ATTR_MAX
+ 1];
888 if (!tb
[DCB_ATTR_BCN
] || !netdev
->dcbnl_ops
->getbcnrp
||
889 !netdev
->dcbnl_ops
->getbcncfg
)
892 ret
= nla_parse_nested(bcn_tb
, DCB_BCN_ATTR_MAX
,
893 tb
[DCB_ATTR_BCN
], dcbnl_bcn_nest
);
898 dcbnl_skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
902 nlh
= NLMSG_NEW(dcbnl_skb
, pid
, seq
, RTM_GETDCB
, sizeof(*dcb
), flags
);
904 dcb
= NLMSG_DATA(nlh
);
905 dcb
->dcb_family
= AF_UNSPEC
;
906 dcb
->cmd
= DCB_CMD_BCN_GCFG
;
908 bcn_nest
= nla_nest_start(dcbnl_skb
, DCB_ATTR_BCN
);
912 if (bcn_tb
[DCB_BCN_ATTR_ALL
])
915 for (i
= DCB_BCN_ATTR_RP_0
; i
<= DCB_BCN_ATTR_RP_7
; i
++) {
916 if (!getall
&& !bcn_tb
[i
])
919 netdev
->dcbnl_ops
->getbcnrp(netdev
, i
- DCB_BCN_ATTR_RP_0
,
921 ret
= nla_put_u8(dcbnl_skb
, i
, value_byte
);
926 for (i
= DCB_BCN_ATTR_ALPHA
; i
<= DCB_BCN_ATTR_RI
; i
++) {
927 if (!getall
&& !bcn_tb
[i
])
930 netdev
->dcbnl_ops
->getbcncfg(netdev
, i
,
932 ret
= nla_put_u32(dcbnl_skb
, i
, value_integer
);
937 nla_nest_end(dcbnl_skb
, bcn_nest
);
939 nlmsg_end(dcbnl_skb
, nlh
);
941 ret
= rtnl_unicast(dcbnl_skb
, &init_net
, pid
);
948 nla_nest_cancel(dcbnl_skb
, bcn_nest
);
957 static int dcbnl_bcn_setcfg(struct net_device
*netdev
, struct nlattr
**tb
,
958 u32 pid
, u32 seq
, u16 flags
)
960 struct nlattr
*data
[DCB_BCN_ATTR_MAX
+ 1];
966 if (!tb
[DCB_ATTR_BCN
] || !netdev
->dcbnl_ops
->setbcncfg
967 || !netdev
->dcbnl_ops
->setbcnrp
)
970 ret
= nla_parse_nested(data
, DCB_BCN_ATTR_MAX
,
976 for (i
= DCB_BCN_ATTR_RP_0
; i
<= DCB_BCN_ATTR_RP_7
; i
++) {
979 value_byte
= nla_get_u8(data
[i
]);
980 netdev
->dcbnl_ops
->setbcnrp(netdev
,
981 data
[i
]->nla_type
- DCB_BCN_ATTR_RP_0
, value_byte
);
984 for (i
= DCB_BCN_ATTR_ALPHA
; i
<= DCB_BCN_ATTR_RI
; i
++) {
987 value_int
= nla_get_u32(data
[i
]);
988 netdev
->dcbnl_ops
->setbcncfg(netdev
,
992 ret
= dcbnl_reply(0, RTM_SETDCB
, DCB_CMD_BCN_SCFG
, DCB_ATTR_BCN
,
998 static int dcb_doit(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1000 struct net
*net
= sock_net(skb
->sk
);
1001 struct net_device
*netdev
;
1002 struct dcbmsg
*dcb
= (struct dcbmsg
*)NLMSG_DATA(nlh
);
1003 struct nlattr
*tb
[DCB_ATTR_MAX
+ 1];
1004 u32 pid
= skb
? NETLINK_CB(skb
).pid
: 0;
1007 if (net
!= &init_net
)
1010 ret
= nlmsg_parse(nlh
, sizeof(*dcb
), tb
, DCB_ATTR_MAX
,
1015 if (!tb
[DCB_ATTR_IFNAME
])
1018 netdev
= dev_get_by_name(&init_net
, nla_data(tb
[DCB_ATTR_IFNAME
]));
1022 if (!netdev
->dcbnl_ops
)
1026 case DCB_CMD_GSTATE
:
1027 ret
= dcbnl_getstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1030 case DCB_CMD_PFC_GCFG
:
1031 ret
= dcbnl_getpfccfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1034 case DCB_CMD_GPERM_HWADDR
:
1035 ret
= dcbnl_getperm_hwaddr(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1038 case DCB_CMD_PGTX_GCFG
:
1039 ret
= dcbnl_pgtx_getcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1042 case DCB_CMD_PGRX_GCFG
:
1043 ret
= dcbnl_pgrx_getcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1046 case DCB_CMD_BCN_GCFG
:
1047 ret
= dcbnl_bcn_getcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1050 case DCB_CMD_SSTATE
:
1051 ret
= dcbnl_setstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1054 case DCB_CMD_PFC_SCFG
:
1055 ret
= dcbnl_setpfccfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1059 case DCB_CMD_SET_ALL
:
1060 ret
= dcbnl_setall(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1063 case DCB_CMD_PGTX_SCFG
:
1064 ret
= dcbnl_pgtx_setcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1067 case DCB_CMD_PGRX_SCFG
:
1068 ret
= dcbnl_pgrx_setcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1072 ret
= dcbnl_getcap(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1075 case DCB_CMD_GNUMTCS
:
1076 ret
= dcbnl_getnumtcs(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1079 case DCB_CMD_SNUMTCS
:
1080 ret
= dcbnl_setnumtcs(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1083 case DCB_CMD_PFC_GSTATE
:
1084 ret
= dcbnl_getpfcstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1087 case DCB_CMD_PFC_SSTATE
:
1088 ret
= dcbnl_setpfcstate(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1091 case DCB_CMD_BCN_SCFG
:
1092 ret
= dcbnl_bcn_setcfg(netdev
, tb
, pid
, nlh
->nlmsg_seq
,
1105 static int __init
dcbnl_init(void)
1107 rtnl_register(PF_UNSPEC
, RTM_GETDCB
, dcb_doit
, NULL
);
1108 rtnl_register(PF_UNSPEC
, RTM_SETDCB
, dcb_doit
, NULL
);
1112 module_init(dcbnl_init
);
1114 static void __exit
dcbnl_exit(void)
1116 rtnl_unregister(PF_UNSPEC
, RTM_GETDCB
);
1117 rtnl_unregister(PF_UNSPEC
, RTM_SETDCB
);
1119 module_exit(dcbnl_exit
);