1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
10 #include "hw_channel.h"
12 /* Microsoft Azure Network Adapter (MANA)'s definitions
14 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
15 * them are naturally aligned and hence don't need __packed.
18 /* MANA protocol version */
19 #define MANA_MAJOR_VERSION 0
20 #define MANA_MINOR_VERSION 1
21 #define MANA_MICRO_VERSION 1
23 typedef u64 mana_handle_t
;
24 #define INVALID_MANA_HANDLE ((mana_handle_t)-1)
27 TRI_STATE_UNKNOWN
= -1,
32 /* Number of entries for hardware indirection table must be in power of 2 */
33 #define MANA_INDIRECT_TABLE_MAX_SIZE 512
34 #define MANA_INDIRECT_TABLE_DEF_SIZE 64
36 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
37 #define MANA_HASH_KEY_SIZE 40
39 #define COMP_ENTRY_SIZE 64
41 /* This Max value for RX buffers is derived from __alloc_page()'s max page
42 * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer
43 * size beyond this value gets rejected by __alloc_page() call.
45 #define MAX_RX_BUFFERS_PER_QUEUE 8192
46 #define DEF_RX_BUFFERS_PER_QUEUE 1024
47 #define MIN_RX_BUFFERS_PER_QUEUE 128
49 /* This max value for TX buffers is derived as the maximum allocatable
50 * pages supported on host per guest through testing. TX buffer size beyond
51 * this value is rejected by the hardware.
53 #define MAX_TX_BUFFERS_PER_QUEUE 16384
54 #define DEF_TX_BUFFERS_PER_QUEUE 256
55 #define MIN_TX_BUFFERS_PER_QUEUE 128
57 #define EQ_SIZE (8 * MANA_PAGE_SIZE)
59 #define LOG2_EQ_THROTTLE 3
61 #define MAX_PORTS_IN_MANA_DEV 256
63 /* Update this count whenever the respective structures are changed */
64 #define MANA_STATS_RX_COUNT 5
65 #define MANA_STATS_TX_COUNT 11
67 struct mana_stats_rx
{
73 struct u64_stats_sync syncp
;
76 struct mana_stats_tx
{
82 u64 tso_inner_packets
;
88 struct u64_stats_sync syncp
;
92 struct gdma_queue
*gdma_sq
;
105 struct net_device
*ndev
;
107 /* The SKBs are sent to the HW and we are waiting for the CQEs. */
108 struct sk_buff_head pending_skbs
;
109 struct netdev_queue
*net_txq
;
111 atomic_t pending_sends
;
113 bool napi_initialized
;
115 struct mana_stats_tx stats
;
118 /* skb data and frags dma mappings */
119 struct mana_skb_head
{
120 /* GSO pkts may have 2 SGEs for the linear part*/
121 dma_addr_t dma_handle
[MAX_SKB_FRAGS
+ 2];
123 u32 size
[MAX_SKB_FRAGS
+ 2];
126 #define MANA_HEADROOM sizeof(struct mana_skb_head)
128 enum mana_tx_pkt_format
{
129 MANA_SHORT_PKT_FMT
= 0,
130 MANA_LONG_PKT_FMT
= 1,
133 struct mana_tx_short_oob
{
135 u32 is_outer_ipv4
: 1;
136 u32 is_outer_ipv6
: 1;
137 u32 comp_iphdr_csum
: 1;
138 u32 comp_tcp_csum
: 1;
139 u32 comp_udp_csum
: 1;
140 u32 supress_txcqe_gen
: 1;
143 u32 trans_off
: 10; /* Transport header offset */
145 u32 short_vp_offset
: 8;
148 struct mana_tx_long_oob
{
150 u32 inner_is_ipv6
: 1;
151 u32 inner_tcp_opt
: 1;
152 u32 inject_vlan_pri_tag
: 1;
154 u32 pcp
: 3; /* 802.1Q */
155 u32 dei
: 1; /* 802.1Q */
156 u32 vlan_id
: 12; /* 802.1Q */
158 u32 inner_frame_offset
: 10;
159 u32 inner_ip_rel_offset
: 6;
160 u32 long_vp_offset
: 12;
168 struct mana_tx_short_oob s_oob
;
169 struct mana_tx_long_oob l_oob
;
180 CQE_RX_COALESCED_4
= 2,
181 CQE_RX_OBJECT_FENCE
= 3,
182 CQE_RX_TRUNCATED
= 4,
186 CQE_TX_MTU_DROP
= 34,
187 CQE_TX_INVALID_OOB
= 35,
188 CQE_TX_INVALID_ETH_TYPE
= 36,
189 CQE_TX_HDR_PROCESSING_ERROR
= 37,
190 CQE_TX_VF_DISABLED
= 38,
191 CQE_TX_VPORT_IDX_OUT_OF_RANGE
= 39,
192 CQE_TX_VPORT_DISABLED
= 40,
193 CQE_TX_VLAN_TAGGING_VIOLATION
= 41,
196 #define MANA_CQE_COMPLETION 1
198 struct mana_cqe_header
{
204 /* NDIS HASH Types */
205 #define NDIS_HASH_IPV4 BIT(0)
206 #define NDIS_HASH_TCP_IPV4 BIT(1)
207 #define NDIS_HASH_UDP_IPV4 BIT(2)
208 #define NDIS_HASH_IPV6 BIT(3)
209 #define NDIS_HASH_TCP_IPV6 BIT(4)
210 #define NDIS_HASH_UDP_IPV6 BIT(5)
211 #define NDIS_HASH_IPV6_EX BIT(6)
212 #define NDIS_HASH_TCP_IPV6_EX BIT(7)
213 #define NDIS_HASH_UDP_IPV6_EX BIT(8)
215 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
216 #define MANA_HASH_L4 \
217 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
218 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
220 struct mana_rxcomp_perpkt_info
{
227 #define MANA_RXCOMP_OOB_NUM_PPI 4
229 /* Receive completion OOB */
230 struct mana_rxcomp_oob
{
231 struct mana_cqe_header cqe_hdr
;
234 u32 rx_vlantag_present
: 1;
235 u32 rx_outer_iphdr_csum_succeed
: 1;
236 u32 rx_outer_iphdr_csum_fail
: 1;
239 u32 rx_iphdr_csum_succeed
: 1;
240 u32 rx_iphdr_csum_fail
: 1;
241 u32 rx_tcp_csum_succeed
: 1;
242 u32 rx_tcp_csum_fail
: 1;
243 u32 rx_udp_csum_succeed
: 1;
244 u32 rx_udp_csum_fail
: 1;
247 struct mana_rxcomp_perpkt_info ppi
[MANA_RXCOMP_OOB_NUM_PPI
];
252 struct mana_tx_comp_oob
{
253 struct mana_cqe_header cqe_hdr
;
257 u32 tx_sgl_offset
: 5;
258 u32 tx_wqe_offset
: 27;
265 #define CQE_POLLING_BUFFER 512
268 struct gdma_queue
*gdma_cq
;
270 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
273 /* Type of the CQ: TX or RX */
274 enum mana_cq_type type
;
276 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
277 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
279 struct mana_rxq
*rxq
;
281 /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
282 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
284 struct mana_txq
*txq
;
286 /* Buffer which the CQ handler can copy the CQE's into. */
287 struct gdma_comp gdma_comp_buf
[CQE_POLLING_BUFFER
];
290 struct napi_struct napi
;
292 int work_done_since_doorbell
;
296 struct mana_recv_buf_oob
{
297 /* A valid GDMA work request representing the data buffer. */
298 struct gdma_wqe_request wqe_req
;
301 bool from_pool
; /* allocated from a page pool */
303 /* SGL of the buffer going to be sent as part of the work request. */
305 struct gdma_sge sgl
[MAX_RX_WQE_SGL_ENTRIES
];
307 /* Required to store the result of mana_gd_post_work_request.
308 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
309 * work queue when the WQE is consumed.
311 struct gdma_posted_wqe_info wqe_inf
;
314 #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
317 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
320 struct gdma_queue
*gdma_rq
;
321 /* Cache the gdma receive queue id */
324 /* Index of RQ in the vPort, not gdma receive queue id */
333 struct mana_cq rx_cq
;
335 struct completion fence_event
;
337 struct net_device
*ndev
;
339 /* Total number of receive buffers to be allocated */
344 struct mana_stats_rx stats
;
346 struct bpf_prog __rcu
*bpf_prog
;
347 struct xdp_rxq_info xdp_rxq
;
348 void *xdp_save_va
; /* for reusing */
350 int xdp_rc
; /* XDP redirect return code */
352 struct page_pool
*page_pool
;
353 struct dentry
*mana_rx_debugfs
;
355 /* MUST BE THE LAST MEMBER:
356 * Each receive buffer has an associated mana_recv_buf_oob.
358 struct mana_recv_buf_oob rx_oobs
[] __counted_by(num_rx_buf
);
364 struct mana_cq tx_cq
;
366 mana_handle_t tx_object
;
368 struct dentry
*mana_tx_debugfs
;
371 struct mana_ethtool_stats
{
374 u64 hc_rx_discards_no_wqe
;
375 u64 hc_rx_err_vport_disabled
;
377 u64 hc_rx_ucast_pkts
;
378 u64 hc_rx_ucast_bytes
;
379 u64 hc_rx_bcast_pkts
;
380 u64 hc_rx_bcast_bytes
;
381 u64 hc_rx_mcast_pkts
;
382 u64 hc_rx_mcast_bytes
;
383 u64 hc_tx_err_gf_disabled
;
384 u64 hc_tx_err_vport_disabled
;
385 u64 hc_tx_err_inval_vportoffset_pkt
;
386 u64 hc_tx_err_vlan_enforcement
;
387 u64 hc_tx_err_eth_type_enforcement
;
388 u64 hc_tx_err_sa_enforcement
;
389 u64 hc_tx_err_sqpdid_enforcement
;
390 u64 hc_tx_err_cqpdid_enforcement
;
391 u64 hc_tx_err_mtu_violation
;
392 u64 hc_tx_err_inval_oob
;
394 u64 hc_tx_ucast_pkts
;
395 u64 hc_tx_ucast_bytes
;
396 u64 hc_tx_bcast_pkts
;
397 u64 hc_tx_bcast_bytes
;
398 u64 hc_tx_mcast_pkts
;
399 u64 hc_tx_mcast_bytes
;
402 u64 tx_cqe_unknown_type
;
403 u64 rx_coalesced_err
;
404 u64 rx_cqe_unknown_type
;
407 struct mana_context
{
408 struct gdma_dev
*gdma_dev
;
413 struct dentry
*mana_eqs_debugfs
;
415 struct net_device
*ports
[MAX_PORTS_IN_MANA_DEV
];
418 struct mana_port_context
{
419 struct mana_context
*ac
;
420 struct net_device
*ndev
;
422 u8 mac_addr
[ETH_ALEN
];
424 enum TRI_STATE rss_state
;
426 mana_handle_t default_rxobj
;
427 bool tx_shortform_allowed
;
430 struct mana_tx_qp
*tx_qp
;
432 /* Indirection Table for RX & TX. The values are queue indexes */
436 /* Indirection table containing RxObject Handles */
437 mana_handle_t
*rxobj_table
;
439 /* Hash key used by the NIC */
440 u8 hashkey
[MANA_HASH_KEY_SIZE
];
442 /* This points to an array of num_queues of RQ pointers. */
443 struct mana_rxq
**rxqs
;
445 /* pre-allocated rx buffer array */
450 u32 rxbpre_alloc_size
;
453 struct bpf_prog
*bpf_prog
;
455 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
456 unsigned int max_queues
;
457 unsigned int num_queues
;
459 unsigned int rx_queue_size
;
460 unsigned int tx_queue_size
;
462 mana_handle_t port_handle
;
463 mana_handle_t pf_filter_handle
;
465 /* Mutex for sharing access to vport_use_count */
466 struct mutex vport_mutex
;
472 bool port_st_save
; /* Saved port state */
474 struct mana_ethtool_stats eth_stats
;
477 struct dentry
*mana_port_debugfs
;
480 netdev_tx_t
mana_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
);
481 int mana_config_rss(struct mana_port_context
*ac
, enum TRI_STATE rx
,
482 bool update_hash
, bool update_tab
);
484 int mana_alloc_queues(struct net_device
*ndev
);
485 int mana_attach(struct net_device
*ndev
);
486 int mana_detach(struct net_device
*ndev
, bool from_close
);
488 int mana_probe(struct gdma_dev
*gd
, bool resuming
);
489 void mana_remove(struct gdma_dev
*gd
, bool suspending
);
491 void mana_xdp_tx(struct sk_buff
*skb
, struct net_device
*ndev
);
492 int mana_xdp_xmit(struct net_device
*ndev
, int n
, struct xdp_frame
**frames
,
494 u32
mana_run_xdp(struct net_device
*ndev
, struct mana_rxq
*rxq
,
495 struct xdp_buff
*xdp
, void *buf_va
, uint pkt_len
);
496 struct bpf_prog
*mana_xdp_get(struct mana_port_context
*apc
);
497 void mana_chn_setxdp(struct mana_port_context
*apc
, struct bpf_prog
*prog
);
498 int mana_bpf(struct net_device
*ndev
, struct netdev_bpf
*bpf
);
499 void mana_query_gf_stats(struct mana_port_context
*apc
);
500 int mana_pre_alloc_rxbufs(struct mana_port_context
*apc
, int mtu
, int num_queues
);
501 void mana_pre_dealloc_rxbufs(struct mana_port_context
*apc
);
503 extern const struct ethtool_ops mana_ethtool_ops
;
504 extern struct dentry
*mana_debugfs_root
;
506 /* A CQ can be created not associated with any EQ */
507 #define GDMA_CQ_NO_EQ 0xffff
509 struct mana_obj_spec
{
517 enum mana_command_code
{
518 MANA_QUERY_DEV_CONFIG
= 0x20001,
519 MANA_QUERY_GF_STAT
= 0x20002,
520 MANA_CONFIG_VPORT_TX
= 0x20003,
521 MANA_CREATE_WQ_OBJ
= 0x20004,
522 MANA_DESTROY_WQ_OBJ
= 0x20005,
523 MANA_FENCE_RQ
= 0x20006,
524 MANA_CONFIG_VPORT_RX
= 0x20007,
525 MANA_QUERY_VPORT_CONFIG
= 0x20008,
527 /* Privileged commands for the PF mode */
528 MANA_REGISTER_FILTER
= 0x28000,
529 MANA_DEREGISTER_FILTER
= 0x28001,
530 MANA_REGISTER_HW_PORT
= 0x28003,
531 MANA_DEREGISTER_HW_PORT
= 0x28004,
534 /* Query Device Configuration */
535 struct mana_query_device_cfg_req
{
536 struct gdma_req_hdr hdr
;
538 /* MANA Nic Driver Capability flags */
539 u64 mn_drv_cap_flags1
;
540 u64 mn_drv_cap_flags2
;
541 u64 mn_drv_cap_flags3
;
542 u64 mn_drv_cap_flags4
;
551 struct mana_query_device_cfg_resp
{
552 struct gdma_resp_hdr hdr
;
569 /* Query vPort Configuration */
570 struct mana_query_vport_cfg_req
{
571 struct gdma_req_hdr hdr
;
575 struct mana_query_vport_cfg_resp
{
576 struct gdma_resp_hdr hdr
;
579 u32 num_indirection_ent
;
586 /* Configure vPort */
587 struct mana_config_vport_req
{
588 struct gdma_req_hdr hdr
;
594 struct mana_config_vport_resp
{
595 struct gdma_resp_hdr hdr
;
597 u8 short_form_allowed
;
601 /* Create WQ Object */
602 struct mana_create_wqobj_req
{
603 struct gdma_req_hdr hdr
;
611 u32 cq_moderation_ctx_id
;
615 struct mana_create_wqobj_resp
{
616 struct gdma_resp_hdr hdr
;
619 mana_handle_t wq_obj
;
622 /* Destroy WQ Object */
623 struct mana_destroy_wqobj_req
{
624 struct gdma_req_hdr hdr
;
627 mana_handle_t wq_obj_handle
;
630 struct mana_destroy_wqobj_resp
{
631 struct gdma_resp_hdr hdr
;
635 struct mana_fence_rq_req
{
636 struct gdma_req_hdr hdr
;
637 mana_handle_t wq_obj_handle
;
640 struct mana_fence_rq_resp
{
641 struct gdma_resp_hdr hdr
;
645 struct mana_query_gf_stat_req
{
646 struct gdma_req_hdr hdr
;
650 struct mana_query_gf_stat_resp
{
651 struct gdma_resp_hdr hdr
;
653 /* rx errors/discards */
654 u64 rx_discards_nowqe
;
655 u64 rx_err_vport_disabled
;
656 /* rx bytes/packets */
658 u64 hc_rx_ucast_pkts
;
659 u64 hc_rx_ucast_bytes
;
660 u64 hc_rx_bcast_pkts
;
661 u64 hc_rx_bcast_bytes
;
662 u64 hc_rx_mcast_pkts
;
663 u64 hc_rx_mcast_bytes
;
665 u64 tx_err_gf_disabled
;
666 u64 tx_err_vport_disabled
;
667 u64 tx_err_inval_vport_offset_pkt
;
668 u64 tx_err_vlan_enforcement
;
669 u64 tx_err_ethtype_enforcement
;
670 u64 tx_err_SA_enforcement
;
671 u64 tx_err_SQPDID_enforcement
;
672 u64 tx_err_CQPDID_enforcement
;
673 u64 tx_err_mtu_violation
;
674 u64 tx_err_inval_oob
;
675 /* tx bytes/packets */
677 u64 hc_tx_ucast_pkts
;
678 u64 hc_tx_ucast_bytes
;
679 u64 hc_tx_bcast_pkts
;
680 u64 hc_tx_bcast_bytes
;
681 u64 hc_tx_mcast_pkts
;
682 u64 hc_tx_mcast_bytes
;
687 /* Configure vPort Rx Steering */
688 struct mana_cfg_rx_steer_req_v2
{
689 struct gdma_req_hdr hdr
;
691 u16 num_indir_entries
;
692 u16 indir_tab_offset
;
695 u8 update_default_rxobj
;
699 mana_handle_t default_rxobj
;
700 u8 hashkey
[MANA_HASH_KEY_SIZE
];
701 u8 cqe_coalescing_enable
;
703 mana_handle_t indir_tab
[] __counted_by(num_indir_entries
);
706 struct mana_cfg_rx_steer_resp
{
707 struct gdma_resp_hdr hdr
;
710 /* Register HW vPort */
711 struct mana_register_hw_vport_req
{
712 struct gdma_req_hdr hdr
;
714 u8 is_pf_default_vport
;
716 u8 allow_all_ether_types
;
722 struct mana_register_hw_vport_resp
{
723 struct gdma_resp_hdr hdr
;
724 mana_handle_t hw_vport_handle
;
727 /* Deregister HW vPort */
728 struct mana_deregister_hw_vport_req
{
729 struct gdma_req_hdr hdr
;
730 mana_handle_t hw_vport_handle
;
733 struct mana_deregister_hw_vport_resp
{
734 struct gdma_resp_hdr hdr
;
737 /* Register filter */
738 struct mana_register_filter_req
{
739 struct gdma_req_hdr hdr
;
752 struct mana_register_filter_resp
{
753 struct gdma_resp_hdr hdr
;
754 mana_handle_t filter_handle
;
757 /* Deregister filter */
758 struct mana_deregister_filter_req
{
759 struct gdma_req_hdr hdr
;
760 mana_handle_t filter_handle
;
763 struct mana_deregister_filter_resp
{
764 struct gdma_resp_hdr hdr
;
767 /* Requested GF stats Flags */
768 /* Rx discards/Errors */
769 #define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001
770 #define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002
772 #define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004
773 #define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008
774 #define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010
775 #define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020
776 #define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040
777 #define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080
778 #define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100
780 #define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200
781 #define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400
782 #define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \
784 #define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000
785 #define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \
787 #define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000
788 #define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000
789 #define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000
790 #define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000
791 #define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000
793 #define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000
794 #define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000
795 #define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000
796 #define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000
797 #define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000
798 #define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000
799 #define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000
801 #define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000
803 #define MANA_MAX_NUM_QUEUES 64
805 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
807 struct mana_tx_package
{
808 struct gdma_wqe_request wqe_req
;
809 struct gdma_sge sgl_array
[5];
810 struct gdma_sge
*sgl_ptr
;
812 struct mana_tx_oob tx_oob
;
814 struct gdma_posted_wqe_info wqe_info
;
817 int mana_create_wq_obj(struct mana_port_context
*apc
,
819 u32 wq_type
, struct mana_obj_spec
*wq_spec
,
820 struct mana_obj_spec
*cq_spec
,
821 mana_handle_t
*wq_obj
);
823 void mana_destroy_wq_obj(struct mana_port_context
*apc
, u32 wq_type
,
824 mana_handle_t wq_obj
);
826 int mana_cfg_vport(struct mana_port_context
*apc
, u32 protection_dom_id
,
828 void mana_uncfg_vport(struct mana_port_context
*apc
);
830 struct net_device
*mana_get_primary_netdev_rcu(struct mana_context
*ac
, u32 port_index
);