1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
5 #include <linux/mlx5/macsec.h>
7 struct mlx5_reserved_gids
{
9 const struct ib_gid_attr
*physical_gid
;
12 struct mlx5_roce_gids
{
13 struct list_head roce_gid_list_entry
;
16 struct sockaddr_in sockaddr_in
;
17 struct sockaddr_in6 sockaddr_in6
;
21 struct mlx5_macsec_device
{
22 struct list_head macsec_devices_list_entry
;
24 struct list_head macsec_roce_gids
;
25 struct list_head tx_rules_list
;
26 struct list_head rx_rules_list
;
29 static void cleanup_macsec_device(struct mlx5_macsec_device
*macsec_device
)
31 if (!list_empty(&macsec_device
->tx_rules_list
) ||
32 !list_empty(&macsec_device
->rx_rules_list
) ||
33 !list_empty(&macsec_device
->macsec_roce_gids
))
36 list_del(&macsec_device
->macsec_devices_list_entry
);
40 static struct mlx5_macsec_device
*get_macsec_device(void *macdev
,
41 struct list_head
*macsec_devices_list
)
43 struct mlx5_macsec_device
*iter
, *macsec_device
= NULL
;
45 list_for_each_entry(iter
, macsec_devices_list
, macsec_devices_list_entry
) {
46 if (iter
->macdev
== macdev
) {
55 macsec_device
= kzalloc(sizeof(*macsec_device
), GFP_KERNEL
);
59 macsec_device
->macdev
= macdev
;
60 INIT_LIST_HEAD(&macsec_device
->tx_rules_list
);
61 INIT_LIST_HEAD(&macsec_device
->rx_rules_list
);
62 INIT_LIST_HEAD(&macsec_device
->macsec_roce_gids
);
63 list_add(&macsec_device
->macsec_devices_list_entry
, macsec_devices_list
);
68 static void mlx5_macsec_del_roce_gid(struct mlx5_macsec_device
*macsec_device
, u16 gid_idx
)
70 struct mlx5_roce_gids
*current_gid
, *next_gid
;
72 list_for_each_entry_safe(current_gid
, next_gid
, &macsec_device
->macsec_roce_gids
,
74 if (current_gid
->gid_idx
== gid_idx
) {
75 list_del(¤t_gid
->roce_gid_list_entry
);
80 static void mlx5_macsec_save_roce_gid(struct mlx5_macsec_device
*macsec_device
,
81 const struct sockaddr
*addr
, u16 gid_idx
)
83 struct mlx5_roce_gids
*roce_gids
;
85 roce_gids
= kzalloc(sizeof(*roce_gids
), GFP_KERNEL
);
89 roce_gids
->gid_idx
= gid_idx
;
90 if (addr
->sa_family
== AF_INET
)
91 memcpy(&roce_gids
->addr
.sockaddr_in
, addr
, sizeof(roce_gids
->addr
.sockaddr_in
));
93 memcpy(&roce_gids
->addr
.sockaddr_in6
, addr
, sizeof(roce_gids
->addr
.sockaddr_in6
));
95 list_add_tail(&roce_gids
->roce_gid_list_entry
, &macsec_device
->macsec_roce_gids
);
98 static void handle_macsec_gids(struct list_head
*macsec_devices_list
,
99 struct mlx5_macsec_event_data
*data
)
101 struct mlx5_macsec_device
*macsec_device
;
102 struct mlx5_roce_gids
*gid
;
104 macsec_device
= get_macsec_device(data
->macdev
, macsec_devices_list
);
108 list_for_each_entry(gid
, &macsec_device
->macsec_roce_gids
, roce_gid_list_entry
) {
109 mlx5_macsec_add_roce_sa_rules(data
->fs_id
, (struct sockaddr
*)&gid
->addr
,
110 gid
->gid_idx
, &macsec_device
->tx_rules_list
,
111 &macsec_device
->rx_rules_list
, data
->macsec_fs
,
116 static void del_sa_roce_rule(struct list_head
*macsec_devices_list
,
117 struct mlx5_macsec_event_data
*data
)
119 struct mlx5_macsec_device
*macsec_device
;
121 macsec_device
= get_macsec_device(data
->macdev
, macsec_devices_list
);
122 WARN_ON(!macsec_device
);
124 mlx5_macsec_del_roce_sa_rules(data
->fs_id
, data
->macsec_fs
,
125 &macsec_device
->tx_rules_list
,
126 &macsec_device
->rx_rules_list
, data
->is_tx
);
129 static int macsec_event(struct notifier_block
*nb
, unsigned long event
, void *data
)
131 struct mlx5_macsec
*macsec
= container_of(nb
, struct mlx5_macsec
, blocking_events_nb
);
133 mutex_lock(&macsec
->lock
);
135 case MLX5_DRIVER_EVENT_MACSEC_SA_ADDED
:
136 handle_macsec_gids(&macsec
->macsec_devices_list
, data
);
138 case MLX5_DRIVER_EVENT_MACSEC_SA_DELETED
:
139 del_sa_roce_rule(&macsec
->macsec_devices_list
, data
);
142 mutex_unlock(&macsec
->lock
);
145 mutex_unlock(&macsec
->lock
);
149 void mlx5r_macsec_event_register(struct mlx5_ib_dev
*dev
)
151 if (!mlx5_is_macsec_roce_supported(dev
->mdev
)) {
152 mlx5_ib_dbg(dev
, "RoCE MACsec not supported due to capabilities\n");
156 dev
->macsec
.blocking_events_nb
.notifier_call
= macsec_event
;
157 blocking_notifier_chain_register(&dev
->mdev
->macsec_nh
,
158 &dev
->macsec
.blocking_events_nb
);
161 void mlx5r_macsec_event_unregister(struct mlx5_ib_dev
*dev
)
163 if (!mlx5_is_macsec_roce_supported(dev
->mdev
)) {
164 mlx5_ib_dbg(dev
, "RoCE MACsec not supported due to capabilities\n");
168 blocking_notifier_chain_unregister(&dev
->mdev
->macsec_nh
,
169 &dev
->macsec
.blocking_events_nb
);
172 int mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev
*dev
)
176 if (!mlx5_is_macsec_roce_supported(dev
->mdev
)) {
177 mlx5_ib_dbg(dev
, "RoCE MACsec not supported due to capabilities\n");
181 max_gids
= MLX5_CAP_ROCE(dev
->mdev
, roce_address_table_size
);
182 for (i
= 0; i
< dev
->num_ports
; i
++) {
183 dev
->port
[i
].reserved_gids
= kcalloc(max_gids
,
184 sizeof(*dev
->port
[i
].reserved_gids
),
186 if (!dev
->port
[i
].reserved_gids
)
189 for (j
= 0; j
< max_gids
; j
++)
190 dev
->port
[i
].reserved_gids
[j
].macsec_index
= -1;
193 INIT_LIST_HEAD(&dev
->macsec
.macsec_devices_list
);
194 mutex_init(&dev
->macsec
.lock
);
199 kfree(dev
->port
[i
].reserved_gids
);
205 void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev
*dev
)
209 if (!mlx5_is_macsec_roce_supported(dev
->mdev
))
210 mlx5_ib_dbg(dev
, "RoCE MACsec not supported due to capabilities\n");
212 for (i
= 0; i
< dev
->num_ports
; i
++)
213 kfree(dev
->port
[i
].reserved_gids
);
215 mutex_destroy(&dev
->macsec
.lock
);
218 int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr
*attr
)
220 struct mlx5_ib_dev
*dev
= to_mdev(attr
->device
);
221 struct mlx5_macsec_device
*macsec_device
;
222 const struct ib_gid_attr
*physical_gid
;
223 struct mlx5_reserved_gids
*mgids
;
224 struct net_device
*ndev
;
227 struct sockaddr_in sockaddr_in
;
228 struct sockaddr_in6 sockaddr_in6
;
231 if (attr
->gid_type
!= IB_GID_TYPE_ROCE_UDP_ENCAP
)
234 if (!mlx5_is_macsec_roce_supported(dev
->mdev
)) {
235 mlx5_ib_dbg(dev
, "RoCE MACsec not supported due to capabilities\n");
240 ndev
= rcu_dereference(attr
->ndev
);
246 if (!netif_is_macsec(ndev
) || !macsec_netdev_is_offloaded(ndev
)) {
253 mutex_lock(&dev
->macsec
.lock
);
254 macsec_device
= get_macsec_device(ndev
, &dev
->macsec
.macsec_devices_list
);
255 if (!macsec_device
) {
260 physical_gid
= rdma_find_gid(attr
->device
, &attr
->gid
,
261 attr
->gid_type
, NULL
);
262 if (!IS_ERR(physical_gid
)) {
263 ret
= set_roce_addr(to_mdev(physical_gid
->device
),
264 physical_gid
->port_num
,
265 physical_gid
->index
, NULL
,
270 mgids
= &dev
->port
[attr
->port_num
- 1].reserved_gids
[physical_gid
->index
];
271 mgids
->macsec_index
= attr
->index
;
272 mgids
->physical_gid
= physical_gid
;
275 /* Proceed with adding steering rules, regardless if there was gid ambiguity or not.*/
276 rdma_gid2ip((struct sockaddr
*)&addr
, &attr
->gid
);
277 ret
= mlx5_macsec_add_roce_rule(ndev
, (struct sockaddr
*)&addr
, attr
->index
,
278 &macsec_device
->tx_rules_list
,
279 &macsec_device
->rx_rules_list
, dev
->mdev
->macsec_fs
);
280 if (ret
&& !IS_ERR(physical_gid
))
283 mlx5_macsec_save_roce_gid(macsec_device
, (struct sockaddr
*)&addr
, attr
->index
);
286 mutex_unlock(&dev
->macsec
.lock
);
290 set_roce_addr(to_mdev(physical_gid
->device
), physical_gid
->port_num
,
291 physical_gid
->index
, &physical_gid
->gid
, physical_gid
);
292 mgids
->macsec_index
= -1;
294 rdma_put_gid_attr(physical_gid
);
295 cleanup_macsec_device(macsec_device
);
298 mutex_unlock(&dev
->macsec
.lock
);
302 void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr
*attr
)
304 struct mlx5_ib_dev
*dev
= to_mdev(attr
->device
);
305 struct mlx5_macsec_device
*macsec_device
;
306 struct mlx5_reserved_gids
*mgids
;
307 struct net_device
*ndev
;
310 if (attr
->gid_type
!= IB_GID_TYPE_ROCE_UDP_ENCAP
)
313 if (!mlx5_is_macsec_roce_supported(dev
->mdev
)) {
314 mlx5_ib_dbg(dev
, "RoCE MACsec not supported due to capabilities\n");
318 mgids
= &dev
->port
[attr
->port_num
- 1].reserved_gids
[attr
->index
];
319 if (mgids
->macsec_index
!= -1) { /* Checking if physical gid has ambiguous IP */
320 rdma_put_gid_attr(mgids
->physical_gid
);
321 mgids
->macsec_index
= -1;
326 ndev
= rcu_dereference(attr
->ndev
);
332 if (!netif_is_macsec(ndev
) || !macsec_netdev_is_offloaded(ndev
)) {
339 mutex_lock(&dev
->macsec
.lock
);
340 max_gids
= MLX5_CAP_ROCE(dev
->mdev
, roce_address_table_size
);
341 for (i
= 0; i
< max_gids
; i
++) { /* Checking if macsec gid has ambiguous IP */
342 mgids
= &dev
->port
[attr
->port_num
- 1].reserved_gids
[i
];
343 if (mgids
->macsec_index
== attr
->index
) {
344 const struct ib_gid_attr
*physical_gid
= mgids
->physical_gid
;
346 set_roce_addr(to_mdev(physical_gid
->device
),
347 physical_gid
->port_num
,
349 &physical_gid
->gid
, physical_gid
);
351 rdma_put_gid_attr(physical_gid
);
352 mgids
->macsec_index
= -1;
356 macsec_device
= get_macsec_device(ndev
, &dev
->macsec
.macsec_devices_list
);
357 mlx5_macsec_del_roce_rule(attr
->index
, dev
->mdev
->macsec_fs
,
358 &macsec_device
->tx_rules_list
, &macsec_device
->rx_rules_list
);
359 mlx5_macsec_del_roce_gid(macsec_device
, attr
->index
);
360 cleanup_macsec_device(macsec_device
);
363 mutex_unlock(&dev
->macsec
.lock
);