2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <linux/atomic.h>
39 #include <net/devlink.h>
40 #include <linux/mlx5/device.h>
41 #include <linux/mlx5/eswitch.h>
42 #include <linux/mlx5/vport.h>
43 #include <linux/mlx5/fs.h>
45 #include "lib/fs_chains.h"
48 #ifdef CONFIG_MLX5_ESWITCH
50 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
52 #define MLX5_MAX_UC_PER_VPORT(dev) \
53 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
55 #define MLX5_MAX_MC_PER_VPORT(dev) \
56 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
58 #define MLX5_MIN_BW_SHARE 1
60 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
61 min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
63 #define mlx5_esw_has_fwd_fdb(dev) \
64 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
66 #define esw_chains(esw) \
67 ((esw)->fdb_table.offloads.esw_chains_priv)
69 struct vport_ingress
{
70 struct mlx5_flow_table
*acl
;
71 struct mlx5_flow_handle
*allow_rule
;
73 struct mlx5_flow_group
*allow_spoofchk_only_grp
;
74 struct mlx5_flow_group
*allow_untagged_spoofchk_grp
;
75 struct mlx5_flow_group
*allow_untagged_only_grp
;
76 struct mlx5_flow_group
*drop_grp
;
77 struct mlx5_flow_handle
*drop_rule
;
78 struct mlx5_fc
*drop_counter
;
81 /* Optional group to add an FTE to do internal priority
82 * tagging on ingress packets.
84 struct mlx5_flow_group
*metadata_prio_tag_grp
;
85 /* Group to add default match-all FTE entry to tag ingress
86 * packet with metadata.
88 struct mlx5_flow_group
*metadata_allmatch_grp
;
89 struct mlx5_modify_hdr
*modify_metadata
;
90 struct mlx5_flow_handle
*modify_metadata_rule
;
95 struct mlx5_flow_table
*acl
;
96 struct mlx5_flow_handle
*allowed_vlan
;
97 struct mlx5_flow_group
*vlan_grp
;
100 struct mlx5_flow_group
*drop_grp
;
101 struct mlx5_flow_handle
*drop_rule
;
102 struct mlx5_fc
*drop_counter
;
105 struct mlx5_flow_group
*fwd_grp
;
106 struct mlx5_flow_handle
*fwd_rule
;
111 struct mlx5_vport_drop_stats
{
116 struct mlx5_vport_info
{
128 /* Vport context events */
129 enum mlx5_eswitch_vport_event
{
130 MLX5_VPORT_UC_ADDR_CHANGE
= BIT(0),
131 MLX5_VPORT_MC_ADDR_CHANGE
= BIT(1),
132 MLX5_VPORT_PROMISC_CHANGE
= BIT(3),
136 struct mlx5_core_dev
*dev
;
138 struct hlist_head uc_list
[MLX5_L2_ADDR_HASH_SIZE
];
139 struct hlist_head mc_list
[MLX5_L2_ADDR_HASH_SIZE
];
140 struct mlx5_flow_handle
*promisc_rule
;
141 struct mlx5_flow_handle
*allmulti_rule
;
142 struct work_struct vport_change_handler
;
144 struct vport_ingress ingress
;
145 struct vport_egress egress
;
146 u32 default_metadata
;
149 struct mlx5_vport_info info
;
158 enum mlx5_eswitch_vport_event enabled_events
;
159 struct devlink_port
*dl_port
;
162 struct mlx5_eswitch_fdb
{
165 struct mlx5_flow_table
*fdb
;
166 struct mlx5_flow_group
*addr_grp
;
167 struct mlx5_flow_group
*allmulti_grp
;
168 struct mlx5_flow_group
*promisc_grp
;
169 struct mlx5_flow_table
*vepa_fdb
;
170 struct mlx5_flow_handle
*vepa_uplink_rule
;
171 struct mlx5_flow_handle
*vepa_star_rule
;
174 struct offloads_fdb
{
175 struct mlx5_flow_namespace
*ns
;
176 struct mlx5_flow_table
*slow_fdb
;
177 struct mlx5_flow_group
*send_to_vport_grp
;
178 struct mlx5_flow_group
*peer_miss_grp
;
179 struct mlx5_flow_handle
**peer_miss_rules
;
180 struct mlx5_flow_group
*miss_grp
;
181 struct mlx5_flow_handle
*miss_rule_uni
;
182 struct mlx5_flow_handle
*miss_rule_multi
;
183 int vlan_push_pop_refcount
;
185 struct mlx5_fs_chains
*esw_chains_priv
;
187 DECLARE_HASHTABLE(table
, 8);
188 /* Protects vports.table */
197 struct mlx5_esw_offload
{
198 struct mlx5_flow_table
*ft_offloads_restore
;
199 struct mlx5_flow_group
*restore_group
;
200 struct mlx5_modify_hdr
*restore_copy_hdr_id
;
202 struct mlx5_flow_table
*ft_offloads
;
203 struct mlx5_flow_group
*vport_rx_group
;
204 struct mlx5_eswitch_rep
*vport_reps
;
205 struct list_head peer_flows
;
206 struct mutex peer_mutex
;
207 struct mutex encap_tbl_lock
; /* protects encap_tbl */
208 DECLARE_HASHTABLE(encap_tbl
, 8);
209 struct mutex decap_tbl_lock
; /* protects decap_tbl */
210 DECLARE_HASHTABLE(decap_tbl
, 8);
211 struct mod_hdr_tbl mod_hdr
;
212 DECLARE_HASHTABLE(termtbl_tbl
, 8);
213 struct mutex termtbl_mutex
; /* protects termtbl hash */
214 const struct mlx5_eswitch_rep_ops
*rep_ops
[NUM_REP_TYPES
];
216 atomic64_t num_flows
;
217 enum devlink_eswitch_encap_mode encap
;
218 struct ida vport_metadata_ida
;
219 unsigned int host_number
; /* ECPF supports one external host */
222 /* E-Switch MC FDB table hash node */
223 struct esw_mc_addr
{ /* SRIOV only */
224 struct l2addr_node node
;
225 struct mlx5_flow_handle
*uplink_rule
; /* Forward to uplink rule */
229 struct mlx5_host_work
{
230 struct work_struct work
;
231 struct mlx5_eswitch
*esw
;
234 struct mlx5_esw_functions
{
240 MLX5_ESWITCH_VPORT_MATCH_METADATA
= BIT(0),
241 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED
= BIT(1),
244 struct mlx5_eswitch
{
245 struct mlx5_core_dev
*dev
;
247 struct mlx5_eswitch_fdb fdb_table
;
248 /* legacy data structures */
249 struct hlist_head mc_table
[MLX5_L2_ADDR_HASH_SIZE
];
250 struct esw_mc_addr mc_promisc
;
252 struct workqueue_struct
*work_queue
;
253 struct mlx5_vport
*vports
;
257 /* Synchronize between vport change events
258 * and async SRIOV admin state changes
260 struct mutex state_lock
;
262 /* Protects eswitch mode change that occurs via one or more
263 * user commands, i.e. sriov state change, devlink commands.
265 struct mutex mode_lock
;
272 struct mlx5_esw_offload offloads
;
275 u16 first_host_vport
;
276 struct mlx5_esw_functions esw_funcs
;
282 void esw_offloads_disable(struct mlx5_eswitch
*esw
);
283 int esw_offloads_enable(struct mlx5_eswitch
*esw
);
284 void esw_offloads_cleanup_reps(struct mlx5_eswitch
*esw
);
285 int esw_offloads_init_reps(struct mlx5_eswitch
*esw
);
287 u32
mlx5_esw_match_metadata_alloc(struct mlx5_eswitch
*esw
);
288 void mlx5_esw_match_metadata_free(struct mlx5_eswitch
*esw
, u32 metadata
);
290 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch
*esw
, u16 vport_num
,
294 int mlx5_eswitch_init(struct mlx5_core_dev
*dev
);
295 void mlx5_eswitch_cleanup(struct mlx5_eswitch
*esw
);
297 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
298 int mlx5_eswitch_enable_locked(struct mlx5_eswitch
*esw
, int mode
, int num_vfs
);
299 int mlx5_eswitch_enable(struct mlx5_eswitch
*esw
, int num_vfs
);
300 void mlx5_eswitch_disable_locked(struct mlx5_eswitch
*esw
, bool clear_vf
);
301 void mlx5_eswitch_disable(struct mlx5_eswitch
*esw
, bool clear_vf
);
302 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch
*esw
,
303 u16 vport
, const u8
*mac
);
304 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch
*esw
,
305 u16 vport
, int link_state
);
306 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
307 u16 vport
, u16 vlan
, u8 qos
);
308 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch
*esw
,
309 u16 vport
, bool spoofchk
);
310 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch
*esw
,
311 u16 vport_num
, bool setting
);
312 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch
*esw
, u16 vport
,
313 u32 max_rate
, u32 min_rate
);
314 int mlx5_eswitch_set_vepa(struct mlx5_eswitch
*esw
, u8 setting
);
315 int mlx5_eswitch_get_vepa(struct mlx5_eswitch
*esw
, u8
*setting
);
316 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch
*esw
,
317 u16 vport
, struct ifla_vf_info
*ivi
);
318 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch
*esw
,
320 struct ifla_vf_stats
*vf_stats
);
321 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle
*rule
);
323 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev
*dev
, u16 vport
,
324 bool other_vport
, void *in
);
326 struct mlx5_flow_spec
;
327 struct mlx5_esw_flow_attr
;
328 struct mlx5_termtbl_handle
;
331 mlx5_eswitch_termtbl_required(struct mlx5_eswitch
*esw
,
332 struct mlx5_flow_attr
*attr
,
333 struct mlx5_flow_act
*flow_act
,
334 struct mlx5_flow_spec
*spec
);
336 struct mlx5_flow_handle
*
337 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch
*esw
,
338 struct mlx5_flow_table
*ft
,
339 struct mlx5_flow_spec
*spec
,
340 struct mlx5_esw_flow_attr
*attr
,
341 struct mlx5_flow_act
*flow_act
,
342 struct mlx5_flow_destination
*dest
,
346 mlx5_eswitch_termtbl_put(struct mlx5_eswitch
*esw
,
347 struct mlx5_termtbl_handle
*tt
);
349 struct mlx5_flow_handle
*
350 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch
*esw
,
351 struct mlx5_flow_spec
*spec
,
352 struct mlx5_flow_attr
*attr
);
353 struct mlx5_flow_handle
*
354 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch
*esw
,
355 struct mlx5_flow_spec
*spec
,
356 struct mlx5_flow_attr
*attr
);
358 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch
*esw
,
359 struct mlx5_flow_handle
*rule
,
360 struct mlx5_flow_attr
*attr
);
362 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch
*esw
,
363 struct mlx5_flow_handle
*rule
,
364 struct mlx5_flow_attr
*attr
);
366 struct mlx5_flow_handle
*
367 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, u16 vport
,
368 struct mlx5_flow_destination
*dest
);
371 SET_VLAN_STRIP
= BIT(0),
372 SET_VLAN_INSERT
= BIT(1)
375 enum mlx5_flow_match_level
{
376 MLX5_MATCH_NONE
= MLX5_INLINE_MODE_NONE
,
377 MLX5_MATCH_L2
= MLX5_INLINE_MODE_L2
,
378 MLX5_MATCH_L3
= MLX5_INLINE_MODE_IP
,
379 MLX5_MATCH_L4
= MLX5_INLINE_MODE_TCP_UDP
,
382 /* current maximum for flow based vport multicasting */
383 #define MLX5_MAX_FLOW_FWD_VPORTS 2
386 MLX5_ESW_DEST_ENCAP
= BIT(0),
387 MLX5_ESW_DEST_ENCAP_VALID
= BIT(1),
391 MLX5_ESW_ATTR_FLAG_VLAN_HANDLED
= BIT(0),
392 MLX5_ESW_ATTR_FLAG_SLOW_PATH
= BIT(1),
393 MLX5_ESW_ATTR_FLAG_NO_IN_PORT
= BIT(2),
396 struct mlx5_esw_flow_attr
{
397 struct mlx5_eswitch_rep
*in_rep
;
398 struct mlx5_core_dev
*in_mdev
;
399 struct mlx5_core_dev
*counter_dev
;
404 __be16 vlan_proto
[MLX5_FS_VLAN_DEPTH
];
405 u16 vlan_vid
[MLX5_FS_VLAN_DEPTH
];
406 u8 vlan_prio
[MLX5_FS_VLAN_DEPTH
];
410 struct mlx5_eswitch_rep
*rep
;
411 struct mlx5_pkt_reformat
*pkt_reformat
;
412 struct mlx5_core_dev
*mdev
;
413 struct mlx5_termtbl_handle
*termtbl
;
414 } dests
[MLX5_MAX_FLOW_FWD_VPORTS
];
415 struct mlx5_pkt_reformat
*decap_pkt_reformat
;
418 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
,
419 struct netlink_ext_ack
*extack
);
420 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
);
421 int mlx5_devlink_eswitch_inline_mode_set(struct devlink
*devlink
, u8 mode
,
422 struct netlink_ext_ack
*extack
);
423 int mlx5_devlink_eswitch_inline_mode_get(struct devlink
*devlink
, u8
*mode
);
424 int mlx5_devlink_eswitch_encap_mode_set(struct devlink
*devlink
,
425 enum devlink_eswitch_encap_mode encap
,
426 struct netlink_ext_ack
*extack
);
427 int mlx5_devlink_eswitch_encap_mode_get(struct devlink
*devlink
,
428 enum devlink_eswitch_encap_mode
*encap
);
429 int mlx5_devlink_port_function_hw_addr_get(struct devlink
*devlink
,
430 struct devlink_port
*port
,
431 u8
*hw_addr
, int *hw_addr_len
,
432 struct netlink_ext_ack
*extack
);
433 int mlx5_devlink_port_function_hw_addr_set(struct devlink
*devlink
,
434 struct devlink_port
*port
,
435 const u8
*hw_addr
, int hw_addr_len
,
436 struct netlink_ext_ack
*extack
);
438 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch
*esw
, u8 rep_type
);
440 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch
*esw
,
441 struct mlx5_flow_attr
*attr
);
442 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch
*esw
,
443 struct mlx5_flow_attr
*attr
);
444 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
445 u16 vport
, u16 vlan
, u8 qos
, u8 set_flags
);
447 static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch
*esw
)
449 return esw
->qos
.enabled
;
452 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev
*dev
,
455 bool ret
= MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, pop_vlan
) &&
456 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, push_vlan
);
461 return ret
&& MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, pop_vlan_2
) &&
462 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, push_vlan_2
);
465 bool mlx5_esw_lag_prereq(struct mlx5_core_dev
*dev0
,
466 struct mlx5_core_dev
*dev1
);
467 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev
*dev0
,
468 struct mlx5_core_dev
*dev1
);
470 const u32
*mlx5_esw_query_functions(struct mlx5_core_dev
*dev
);
472 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
474 #define esw_info(__dev, format, ...) \
475 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
477 #define esw_warn(__dev, format, ...) \
478 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
480 #define esw_debug(dev, format, ...) \
481 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
483 /* The returned number is valid only when the dev is eswitch manager. */
484 static inline u16
mlx5_eswitch_manager_vport(struct mlx5_core_dev
*dev
)
486 return mlx5_core_is_ecpf_esw_manager(dev
) ?
487 MLX5_VPORT_ECPF
: MLX5_VPORT_PF
;
491 mlx5_esw_is_manager_vport(const struct mlx5_eswitch
*esw
, u16 vport_num
)
493 return esw
->manager_vport
== vport_num
;
496 static inline u16
mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev
*dev
)
498 return mlx5_core_is_ecpf_esw_manager(dev
) ?
499 MLX5_VPORT_PF
: MLX5_VPORT_FIRST_VF
;
502 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev
*dev
)
504 return mlx5_core_is_ecpf_esw_manager(dev
);
507 static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch
*esw
)
509 /* Uplink always locate at the last element of the array.*/
510 return esw
->total_vports
- 1;
513 static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch
*esw
)
515 return esw
->total_vports
- 2;
518 static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch
*esw
,
521 if (vport_num
== MLX5_VPORT_ECPF
) {
522 if (!mlx5_ecpf_vport_exists(esw
->dev
))
523 esw_warn(esw
->dev
, "ECPF vport doesn't exist!\n");
524 return mlx5_eswitch_ecpf_idx(esw
);
527 if (vport_num
== MLX5_VPORT_UPLINK
)
528 return mlx5_eswitch_uplink_idx(esw
);
533 static inline u16
mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch
*esw
,
536 if (index
== mlx5_eswitch_ecpf_idx(esw
) &&
537 mlx5_ecpf_vport_exists(esw
->dev
))
538 return MLX5_VPORT_ECPF
;
540 if (index
== mlx5_eswitch_uplink_idx(esw
))
541 return MLX5_VPORT_UPLINK
;
546 static inline unsigned int
547 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev
*dev
,
550 return (MLX5_CAP_GEN(dev
, vhca_id
) << 16) | vport_num
;
554 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index
)
556 return dl_port_index
& 0xffff;
559 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
560 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch
*esw
);
562 /* The vport getter/iterator are only valid after esw->total_vports
563 * and vport->vport are initialized in mlx5_eswitch_init.
565 #define mlx5_esw_for_all_vports(esw, i, vport) \
566 for ((i) = MLX5_VPORT_PF; \
567 (vport) = &(esw)->vports[i], \
568 (i) < (esw)->total_vports; (i)++)
570 #define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
571 for ((i) = (esw)->total_vports - 1; \
572 (vport) = &(esw)->vports[i], \
573 (i) >= MLX5_VPORT_PF; (i)--)
575 #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
576 for ((i) = MLX5_VPORT_FIRST_VF; \
577 (vport) = &(esw)->vports[(i)], \
578 (i) <= (nvfs); (i)++)
580 #define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
582 (vport) = &(esw)->vports[(i)], \
583 (i) >= MLX5_VPORT_FIRST_VF; (i)--)
585 /* The rep getter/iterator are only valid after esw->total_vports
586 * and vport->vport are initialized in mlx5_eswitch_init.
588 #define mlx5_esw_for_all_reps(esw, i, rep) \
589 for ((i) = MLX5_VPORT_PF; \
590 (rep) = &(esw)->offloads.vport_reps[i], \
591 (i) < (esw)->total_vports; (i)++)
593 #define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
594 for ((i) = MLX5_VPORT_FIRST_VF; \
595 (rep) = &(esw)->offloads.vport_reps[i], \
596 (i) <= (nvfs); (i)++)
598 #define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
600 (rep) = &(esw)->offloads.vport_reps[i], \
601 (i) >= MLX5_VPORT_FIRST_VF; (i)--)
603 #define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \
604 for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
606 #define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
607 for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
609 /* Includes host PF (vport 0) if it's not esw manager. */
610 #define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
611 for ((i) = (esw)->first_host_vport; \
612 (rep) = &(esw)->offloads.vport_reps[i], \
613 (i) <= (nvfs); (i)++)
615 #define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
617 (rep) = &(esw)->offloads.vport_reps[i], \
618 (i) >= (esw)->first_host_vport; (i)--)
620 #define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
621 for ((vport) = (esw)->first_host_vport; \
622 (vport) <= (nvfs); (vport)++)
624 #define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
625 for ((vport) = (nvfs); \
626 (vport) >= (esw)->first_host_vport; (vport)--)
628 struct mlx5_eswitch
*mlx5_devlink_eswitch_get(struct devlink
*devlink
);
629 struct mlx5_vport
*__must_check
630 mlx5_eswitch_get_vport(struct mlx5_eswitch
*esw
, u16 vport_num
);
632 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch
*esw
, u16 vport_num
);
634 int mlx5_esw_funcs_changed_handler(struct notifier_block
*nb
, unsigned long type
, void *data
);
637 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch
*esw
,
638 enum mlx5_eswitch_vport_event enabled_events
);
639 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch
*esw
);
642 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch
*esw
,
643 struct mlx5_vport
*vport
);
645 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch
*esw
,
646 struct mlx5_vport
*vport
);
648 int mlx5_esw_vport_tbl_get(struct mlx5_eswitch
*esw
);
649 void mlx5_esw_vport_tbl_put(struct mlx5_eswitch
*esw
);
651 struct mlx5_flow_handle
*
652 esw_add_restore_rule(struct mlx5_eswitch
*esw
, u32 tag
);
654 esw_get_max_restore_tag(struct mlx5_eswitch
*esw
);
656 int esw_offloads_load_rep(struct mlx5_eswitch
*esw
, u16 vport_num
);
657 void esw_offloads_unload_rep(struct mlx5_eswitch
*esw
, u16 vport_num
);
659 int mlx5_eswitch_load_vport(struct mlx5_eswitch
*esw
, u16 vport_num
,
660 enum mlx5_eswitch_vport_event enabled_events
);
661 void mlx5_eswitch_unload_vport(struct mlx5_eswitch
*esw
, u16 vport_num
);
663 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch
*esw
, u16 num_vfs
,
664 enum mlx5_eswitch_vport_event enabled_events
);
665 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch
*esw
, u16 num_vfs
);
667 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch
*esw
, u16 vport_num
);
668 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch
*esw
, u16 vport_num
);
669 struct devlink_port
*mlx5_esw_offloads_devlink_port(struct mlx5_eswitch
*esw
, u16 vport_num
);
670 #else /* CONFIG_MLX5_ESWITCH */
671 /* eswitch API stubs */
672 static inline int mlx5_eswitch_init(struct mlx5_core_dev
*dev
) { return 0; }
673 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch
*esw
) {}
674 static inline int mlx5_eswitch_enable(struct mlx5_eswitch
*esw
, int num_vfs
) { return 0; }
675 static inline void mlx5_eswitch_disable(struct mlx5_eswitch
*esw
, bool clear_vf
) {}
676 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev
*dev0
, struct mlx5_core_dev
*dev1
) { return true; }
677 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev
*dev
) { return false; }
679 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch
*esw
, u16 vport
, int link_state
) { return 0; }
680 static inline const u32
*mlx5_esw_query_functions(struct mlx5_core_dev
*dev
)
682 return ERR_PTR(-EOPNOTSUPP
);
685 static inline struct mlx5_flow_handle
*
686 esw_add_restore_rule(struct mlx5_eswitch
*esw
, u32 tag
)
688 return ERR_PTR(-EOPNOTSUPP
);
690 #endif /* CONFIG_MLX5_ESWITCH */
692 #endif /* __MLX5_ESWITCH_H__ */