1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019, Intel Corporation. */
11 #ifdef CONFIG_XDP_SOCKETS
12 int ice_xsk_pool_setup(struct ice_vsi
*vsi
, struct xsk_buff_pool
*pool
,
14 int ice_clean_rx_irq_zc(struct ice_ring
*rx_ring
, int budget
);
15 bool ice_clean_tx_irq_zc(struct ice_ring
*xdp_ring
, int budget
);
16 int ice_xsk_wakeup(struct net_device
*netdev
, u32 queue_id
, u32 flags
);
17 bool ice_alloc_rx_bufs_zc(struct ice_ring
*rx_ring
, u16 count
);
18 bool ice_xsk_any_rx_ring_ena(struct ice_vsi
*vsi
);
19 void ice_xsk_clean_rx_ring(struct ice_ring
*rx_ring
);
20 void ice_xsk_clean_xdp_ring(struct ice_ring
*xdp_ring
);
23 ice_xsk_pool_setup(struct ice_vsi __always_unused
*vsi
,
24 struct xsk_buff_pool __always_unused
*pool
,
25 u16 __always_unused qid
)
31 ice_clean_rx_irq_zc(struct ice_ring __always_unused
*rx_ring
,
32 int __always_unused budget
)
38 ice_clean_tx_irq_zc(struct ice_ring __always_unused
*xdp_ring
,
39 int __always_unused budget
)
45 ice_alloc_rx_bufs_zc(struct ice_ring __always_unused
*rx_ring
,
46 u16 __always_unused count
)
51 static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused
*vsi
)
57 ice_xsk_wakeup(struct net_device __always_unused
*netdev
,
58 u32 __always_unused queue_id
, u32 __always_unused flags
)
63 #define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
64 #define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0)
65 #endif /* CONFIG_XDP_SOCKETS */
66 #endif /* !_ICE_XSK_H_ */