1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <rdma/ib_verbs.h>
12 #include <rdma/ib_umem.h>
13 #include <rdma/ib_smi.h>
14 #include <linux/mlx5/driver.h>
15 #include <linux/mlx5/cq.h>
16 #include <linux/mlx5/fs.h>
17 #include <linux/mlx5/qp.h>
18 #include <linux/types.h>
19 #include <linux/mlx5/transobj.h>
20 #include <rdma/ib_user_verbs.h>
21 #include <rdma/mlx5-abi.h>
22 #include <rdma/uverbs_ioctl.h>
23 #include <rdma/mlx5_user_ioctl_cmds.h>
24 #include <rdma/mlx5_user_ioctl_verbs.h>
28 #define mlx5_ib_dbg(_dev, format, arg...) \
29 dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
30 __LINE__, current->pid, ##arg)
32 #define mlx5_ib_err(_dev, format, arg...) \
33 dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
34 __LINE__, current->pid, ##arg)
36 #define mlx5_ib_warn(_dev, format, arg...) \
37 dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
38 __LINE__, current->pid, ##arg)
40 #define MLX5_IB_DEFAULT_UIDX 0xffffff
41 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
43 static __always_inline
unsigned long
44 __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits
,
45 unsigned int pgsz_shift
)
47 unsigned int largest_pg_shift
=
48 min_t(unsigned long, (1ULL << log_pgsz_bits
) - 1 + pgsz_shift
,
52 * Despite a command allowing it, the device does not support lower than
55 pgsz_shift
= max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT
, pgsz_shift
);
56 return GENMASK(largest_pg_shift
, pgsz_shift
);
60 * For mkc users, instead of a page_offset the command has a start_iova which
61 * specifies both the page_offset and the on-the-wire IOVA
63 #define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \
64 ib_umem_find_best_pgsz(umem, \
65 __mlx5_log_page_size_to_bitmap( \
66 __mlx5_bit_sz(typ, log_pgsz_fld), \
70 static __always_inline
unsigned long
71 __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits
,
72 unsigned int offset_shift
)
74 unsigned int largest_offset_shift
=
75 min_t(unsigned long, page_offset_bits
- 1 + offset_shift
,
78 return GENMASK(largest_offset_shift
, offset_shift
);
82 * QP/CQ/WQ/etc type commands take a page offset that satisifies:
83 * page_offset_quantized * (page_size/scale) = page_offset
84 * Which restricts allowed page sizes to ones that satisify the above.
86 unsigned long __mlx5_umem_find_best_quantized_pgoff(
87 struct ib_umem
*umem
, unsigned long pgsz_bitmap
,
88 unsigned int page_offset_bits
, u64 pgoff_bitmask
, unsigned int scale
,
89 unsigned int *page_offset_quantized
);
90 #define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld, \
91 pgsz_shift, page_offset_fld, \
92 scale, page_offset_quantized) \
93 __mlx5_umem_find_best_quantized_pgoff( \
95 __mlx5_log_page_size_to_bitmap( \
96 __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
97 __mlx5_bit_sz(typ, page_offset_fld), \
98 GENMASK(31, order_base_2(scale)), scale, \
99 page_offset_quantized)
101 #define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld, \
102 pgsz_shift, page_offset_fld, \
103 scale, page_offset_quantized) \
104 __mlx5_umem_find_best_quantized_pgoff( \
106 __mlx5_log_page_size_to_bitmap( \
107 __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
108 __mlx5_bit_sz(typ, page_offset_fld), 0, scale, \
109 page_offset_quantized)
112 MLX5_IB_MMAP_OFFSET_START
= 9,
113 MLX5_IB_MMAP_OFFSET_END
= 255,
117 MLX5_IB_MMAP_CMD_SHIFT
= 8,
118 MLX5_IB_MMAP_CMD_MASK
= 0xff,
122 MLX5_RES_SCAT_DATA32_CQE
= 0x1,
123 MLX5_RES_SCAT_DATA64_CQE
= 0x2,
124 MLX5_REQ_SCAT_DATA32_CQE
= 0x11,
125 MLX5_REQ_SCAT_DATA64_CQE
= 0x22,
128 enum mlx5_ib_mad_ifc_flags
{
129 MLX5_MAD_IFC_IGNORE_MKEY
= 1,
130 MLX5_MAD_IFC_IGNORE_BKEY
= 2,
131 MLX5_MAD_IFC_NET_VIEW
= 4,
135 MLX5_CROSS_CHANNEL_BFREG
= 0,
144 MLX5_TM_MAX_RNDV_MSG_SIZE
= 64,
149 MLX5_IB_INVALID_UAR_INDEX
= BIT(31),
150 MLX5_IB_INVALID_BFREG
= BIT(31),
154 MLX5_MAX_MEMIC_PAGES
= 0x100,
155 MLX5_MEMIC_ALLOC_SIZE_MASK
= 0x3f,
159 MLX5_MEMIC_BASE_ALIGN
= 6,
160 MLX5_MEMIC_BASE_SIZE
= 1 << MLX5_MEMIC_BASE_ALIGN
,
163 enum mlx5_ib_mmap_type
{
164 MLX5_IB_MMAP_TYPE_MEMIC
= 1,
165 MLX5_IB_MMAP_TYPE_VAR
= 2,
166 MLX5_IB_MMAP_TYPE_UAR_WC
= 3,
167 MLX5_IB_MMAP_TYPE_UAR_NC
= 4,
170 struct mlx5_bfreg_info
{
172 int num_low_latency_bfregs
;
176 * protect bfreg allocation data structs
183 u32 num_static_sys_pages
;
184 u32 total_num_bfregs
;
188 struct mlx5_ib_ucontext
{
189 struct ib_ucontext ibucontext
;
190 struct list_head db_page_list
;
192 /* protect doorbell record alloc/free
194 struct mutex db_page_mutex
;
195 struct mlx5_bfreg_info bfregi
;
197 /* Transport Domain number */
202 /* For RoCE LAG TX affinity */
203 atomic_t tx_port_affinity
;
206 static inline struct mlx5_ib_ucontext
*to_mucontext(struct ib_ucontext
*ibucontext
)
208 return container_of(ibucontext
, struct mlx5_ib_ucontext
, ibucontext
);
218 MLX5_IB_FLOW_ACTION_MODIFY_HEADER
,
219 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT
,
220 MLX5_IB_FLOW_ACTION_DECAP
,
223 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
224 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
225 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
226 #error "Invalid number of bypass priorities"
228 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
230 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
231 #define MLX5_IB_NUM_SNIFFER_FTS 2
232 #define MLX5_IB_NUM_EGRESS_FTS 1
233 struct mlx5_ib_flow_prio
{
234 struct mlx5_flow_table
*flow_table
;
235 unsigned int refcount
;
238 struct mlx5_ib_flow_handler
{
239 struct list_head list
;
240 struct ib_flow ibflow
;
241 struct mlx5_ib_flow_prio
*prio
;
242 struct mlx5_flow_handle
*rule
;
243 struct ib_counters
*ibcounters
;
244 struct mlx5_ib_dev
*dev
;
245 struct mlx5_ib_flow_matcher
*flow_matcher
;
248 struct mlx5_ib_flow_matcher
{
249 struct mlx5_ib_match_params matcher_mask
;
251 enum mlx5_ib_flow_type flow_type
;
252 enum mlx5_flow_namespace_type ns_type
;
254 struct mlx5_core_dev
*mdev
;
256 u8 match_criteria_enable
;
261 struct mlx5_core_dev
*mdev
;
264 struct mlx5_ib_flow_db
{
265 struct mlx5_ib_flow_prio prios
[MLX5_IB_NUM_FLOW_FT
];
266 struct mlx5_ib_flow_prio egress_prios
[MLX5_IB_NUM_FLOW_FT
];
267 struct mlx5_ib_flow_prio sniffer
[MLX5_IB_NUM_SNIFFER_FTS
];
268 struct mlx5_ib_flow_prio egress
[MLX5_IB_NUM_EGRESS_FTS
];
269 struct mlx5_ib_flow_prio fdb
;
270 struct mlx5_ib_flow_prio rdma_rx
[MLX5_IB_NUM_FLOW_FT
];
271 struct mlx5_ib_flow_prio rdma_tx
[MLX5_IB_NUM_FLOW_FT
];
272 struct mlx5_flow_table
*lag_demux_ft
;
273 /* Protect flow steering bypass flow tables
274 * when add/del flow rules.
275 * only single add/removal of flow steering rule could be done
281 /* Use macros here so that don't have to duplicate
282 * enum ib_send_flags and enum ib_qp_type for low-level driver
285 #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
286 #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
287 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
288 #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
289 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
290 #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
292 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
294 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
295 * creates the actual hardware QP.
297 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
298 #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
299 #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
300 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
302 #define MLX5_IB_UMR_OCTOWORD 16
303 #define MLX5_IB_UMR_XLT_ALIGNMENT 64
305 #define MLX5_IB_UPD_XLT_ZAP BIT(0)
306 #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
307 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
308 #define MLX5_IB_UPD_XLT_ADDR BIT(3)
309 #define MLX5_IB_UPD_XLT_PD BIT(4)
310 #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
311 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
313 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
315 * These flags are intended for internal use by the mlx5_ib driver, and they
316 * rely on the range reserved for that use in the ib_qp_create_flags enum.
318 #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
319 #define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
326 enum mlx5_ib_rq_flags
{
327 MLX5_IB_RQ_CVLAN_STRIPPING
= 1 << 0,
328 MLX5_IB_RQ_PCI_WRITE_END_PADDING
= 1 << 1,
332 struct mlx5_frag_buf_ctrl fbc
;
335 struct wr_list
*w_list
;
339 /* serialize post to the work queue
354 enum mlx5_ib_wq_flags
{
355 MLX5_IB_WQ_FLAGS_DELAY_DROP
= 0x1,
356 MLX5_IB_WQ_FLAGS_STRIDING_RQ
= 0x2,
359 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
360 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
361 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
362 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
363 #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
367 struct mlx5_core_qp core_qp
;
374 u32 two_byte_shift_en
;
375 u32 single_stride_log_num_of_bytes
;
376 struct ib_umem
*umem
;
378 unsigned int page_shift
;
384 u32 create_flags
; /* Use enum mlx5_ib_wq_flags */
387 struct mlx5_ib_rwq_ind_table
{
388 struct ib_rwq_ind_table ib_rwq_ind_tbl
;
393 struct mlx5_ib_ubuffer
{
394 struct ib_umem
*umem
;
399 struct mlx5_ib_qp_base
{
400 struct mlx5_ib_qp
*container_mibqp
;
401 struct mlx5_core_qp mqp
;
402 struct mlx5_ib_ubuffer ubuffer
;
405 struct mlx5_ib_qp_trans
{
406 struct mlx5_ib_qp_base base
;
413 struct mlx5_ib_rss_qp
{
418 struct mlx5_ib_qp_base base
;
419 struct mlx5_ib_wq
*rq
;
420 struct mlx5_ib_ubuffer ubuffer
;
421 struct mlx5_db
*doorbell
;
428 struct mlx5_ib_qp_base base
;
429 struct mlx5_ib_wq
*sq
;
430 struct mlx5_ib_ubuffer ubuffer
;
431 struct mlx5_db
*doorbell
;
432 struct mlx5_flow_handle
*flow_rule
;
437 struct mlx5_ib_raw_packet_qp
{
438 struct mlx5_ib_sq sq
;
439 struct mlx5_ib_rq rq
;
444 unsigned long offset
;
445 struct mlx5_sq_bfreg
*bfreg
;
449 struct mlx5_core_dct mdct
;
453 struct mlx5_ib_gsi_qp
{
456 struct ib_qp_cap cap
;
458 struct mlx5_ib_gsi_wr
*outstanding_wrs
;
459 u32 outstanding_pi
, outstanding_ci
;
461 /* Protects access to the tx_qps. Post send operations synchronize
462 * with tx_qp creation in setup_qp(). Also protects the
463 * outstanding_wrs array and indices.
466 struct ib_qp
**tx_qps
;
472 struct mlx5_ib_qp_trans trans_qp
;
473 struct mlx5_ib_raw_packet_qp raw_packet_qp
;
474 struct mlx5_ib_rss_qp rss_qp
;
475 struct mlx5_ib_dct dct
;
476 struct mlx5_ib_gsi_qp gsi
;
478 struct mlx5_frag_buf buf
;
481 struct mlx5_ib_wq rq
;
485 struct mlx5_ib_wq sq
;
487 /* serialize qp state modifications
490 /* cached variant of create_flags from struct ib_qp_init_attr */
499 /* only for user space QPs. For kernel
500 * we have it from the bf object
504 struct list_head qps_list
;
505 struct list_head cq_recv_list
;
506 struct list_head cq_send_list
;
507 struct mlx5_rate_limit rl
;
511 * IB/core doesn't store low-level QP types, so
512 * store both MLX and IBTA types in the field below.
513 * IB_QPT_DRIVER will be break to DCI/DCT subtypes.
515 enum ib_qp_type type
;
516 /* A flag to indicate if there's a new counter is configured
517 * but not take effective
523 struct mlx5_ib_cq_buf
{
524 struct mlx5_frag_buf_ctrl fbc
;
525 struct mlx5_frag_buf frag_buf
;
526 struct ib_umem
*umem
;
532 struct ib_send_wr wr
;
536 unsigned int page_shift
;
537 unsigned int xlt_size
;
541 u8 ignore_free_state
:1;
544 static inline const struct mlx5_umr_wr
*umr_wr(const struct ib_send_wr
*wr
)
546 return container_of(wr
, struct mlx5_umr_wr
, wr
);
549 struct mlx5_shared_mr_info
{
551 struct ib_umem
*umem
;
554 enum mlx5_ib_cq_pr_flags
{
555 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD
= 1 << 0,
560 struct mlx5_core_cq mcq
;
561 struct mlx5_ib_cq_buf buf
;
564 /* serialize access to the CQ
570 struct mutex resize_mutex
;
571 struct mlx5_ib_cq_buf
*resize_buf
;
572 struct ib_umem
*resize_umem
;
574 struct list_head list_send_qp
;
575 struct list_head list_recv_qp
;
577 struct list_head wc_list
;
578 enum ib_cq_notify_flags notify_flags
;
579 struct work_struct notify_work
;
580 u16 private_flags
; /* Use mlx5_ib_cq_pr_flags */
585 struct list_head list
;
590 struct mlx5_core_srq msrq
;
591 struct mlx5_frag_buf buf
;
593 struct mlx5_frag_buf_ctrl fbc
;
595 /* protect SRQ hanlding
601 struct ib_umem
*umem
;
602 /* serialize arming a SRQ
608 struct mlx5_ib_xrcd
{
609 struct ib_xrcd ibxrcd
;
613 enum mlx5_ib_mtt_access_flags
{
614 MLX5_IB_MTT_READ
= (1 << 0),
615 MLX5_IB_MTT_WRITE
= (1 << 1),
618 struct mlx5_user_mmap_entry
{
619 struct rdma_user_mmap_entry rdma_entry
;
627 phys_addr_t dev_addr
;
634 /* other dm types specific params should be added here */
636 struct mlx5_user_mmap_entry mentry
;
639 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
641 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
642 IB_ACCESS_REMOTE_WRITE |\
643 IB_ACCESS_REMOTE_READ |\
644 IB_ACCESS_REMOTE_ATOMIC |\
647 #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
648 IB_ACCESS_REMOTE_WRITE |\
649 IB_ACCESS_REMOTE_READ |\
652 #define mlx5_update_odp_stats(mr, counter_name, value) \
653 atomic64_add(value, &((mr)->odp_stats.counter_name))
666 unsigned int page_shift
;
667 struct mlx5_core_mkey mmkey
;
668 struct ib_umem
*umem
;
669 struct mlx5_shared_mr_info
*smr_info
;
670 struct list_head list
;
671 struct mlx5_cache_ent
*cache_ent
;
672 u32 out
[MLX5_ST_SZ_DW(create_mkey_out
)];
673 struct mlx5_core_sig_ctx
*sig
;
675 int access_flags
; /* Needed for rereg MR */
677 struct mlx5_ib_mr
*parent
;
678 /* Needed for IB_MR_TYPE_INTEGRITY */
679 struct mlx5_ib_mr
*pi_mr
;
680 struct mlx5_ib_mr
*klm_mr
;
681 struct mlx5_ib_mr
*mtt_mr
;
685 /* For ODP and implicit */
686 atomic_t num_deferred_work
;
687 wait_queue_head_t q_deferred_work
;
688 struct xarray implicit_children
;
691 struct list_head elm
;
692 struct work_struct work
;
694 struct ib_odp_counters odp_stats
;
695 bool is_odp_implicit
;
697 struct mlx5_async_work cb_work
;
700 static inline bool is_odp_mr(struct mlx5_ib_mr
*mr
)
702 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
) && mr
->umem
&&
708 struct mlx5_core_mkey mmkey
;
712 struct mlx5_ib_devx_mr
{
713 struct mlx5_core_mkey mmkey
;
717 struct mlx5_ib_umr_context
{
719 enum ib_wc_status status
;
720 struct completion done
;
727 /* control access to UMR QP
729 struct semaphore sem
;
732 struct mlx5_cache_ent
{
733 struct list_head head
;
734 /* sync access to the cahce entry
746 u8 fill_to_high_water
:1;
749 * - available_mrs is the length of list head, ie the number of MRs
750 * available for immediate allocation.
751 * - total_mrs is available_mrs plus all in use MRs that could be
752 * returned to the cache.
753 * - limit is the low water mark for available_mrs, 2* limit is the
755 * - pending is the number of MRs currently being created
765 struct mlx5_ib_dev
*dev
;
766 struct work_struct work
;
767 struct delayed_work dwork
;
770 struct mlx5_mr_cache
{
771 struct workqueue_struct
*wq
;
772 struct mlx5_cache_ent ent
[MAX_MR_CACHE_ENTRIES
];
774 unsigned long last_add
;
777 struct mlx5_ib_port_resources
{
778 struct mlx5_ib_gsi_qp
*gsi
;
779 struct work_struct pkey_change_work
;
782 struct mlx5_ib_resources
{
789 struct mlx5_ib_port_resources ports
[2];
790 /* Protects changes to the port resources */
794 struct mlx5_ib_counters
{
798 u32 num_cong_counters
;
799 u32 num_ext_ppcnt_counters
;
803 struct mlx5_ib_multiport_info
;
805 struct mlx5_ib_multiport
{
806 struct mlx5_ib_multiport_info
*mpi
;
807 /* To be held when accessing the multiport info */
812 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
815 rwlock_t netdev_lock
;
816 struct net_device
*netdev
;
817 struct notifier_block nb
;
818 atomic_t tx_port_affinity
;
819 enum ib_port_state last_port_state
;
820 struct mlx5_ib_dev
*dev
;
824 struct mlx5_ib_port
{
825 struct mlx5_ib_counters cnts
;
826 struct mlx5_ib_multiport mp
;
827 struct mlx5_ib_dbg_cc_params
*dbg_cc_params
;
828 struct mlx5_roce roce
;
829 struct mlx5_eswitch_rep
*rep
;
832 struct mlx5_ib_dbg_param
{
834 struct mlx5_ib_dev
*dev
;
835 struct dentry
*dentry
;
839 enum mlx5_ib_dbg_cc_types
{
840 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE
,
841 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI
,
842 MLX5_IB_DBG_CC_RP_TIME_RESET
,
843 MLX5_IB_DBG_CC_RP_BYTE_RESET
,
844 MLX5_IB_DBG_CC_RP_THRESHOLD
,
845 MLX5_IB_DBG_CC_RP_AI_RATE
,
846 MLX5_IB_DBG_CC_RP_MAX_RATE
,
847 MLX5_IB_DBG_CC_RP_HAI_RATE
,
848 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC
,
849 MLX5_IB_DBG_CC_RP_MIN_RATE
,
850 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP
,
851 MLX5_IB_DBG_CC_RP_DCE_TCP_G
,
852 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT
,
853 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD
,
854 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE
,
855 MLX5_IB_DBG_CC_RP_GD
,
856 MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS
,
857 MLX5_IB_DBG_CC_NP_CNP_DSCP
,
858 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE
,
859 MLX5_IB_DBG_CC_NP_CNP_PRIO
,
863 struct mlx5_ib_dbg_cc_params
{
865 struct mlx5_ib_dbg_param params
[MLX5_IB_DBG_CC_MAX
];
869 MLX5_MAX_DELAY_DROP_TIMEOUT_MS
= 100,
872 struct mlx5_ib_delay_drop
{
873 struct mlx5_ib_dev
*dev
;
874 struct work_struct delay_drop_work
;
875 /* serialize setting of delay drop */
881 struct dentry
*dir_debugfs
;
884 enum mlx5_ib_stages
{
888 MLX5_IB_STAGE_NON_DEFAULT_CB
,
892 MLX5_IB_STAGE_DEVICE_RESOURCES
,
893 MLX5_IB_STAGE_DEVICE_NOTIFIER
,
895 MLX5_IB_STAGE_COUNTERS
,
896 MLX5_IB_STAGE_CONG_DEBUGFS
,
899 MLX5_IB_STAGE_PRE_IB_REG_UMR
,
900 MLX5_IB_STAGE_WHITELIST_UID
,
901 MLX5_IB_STAGE_IB_REG
,
902 MLX5_IB_STAGE_POST_IB_REG_UMR
,
903 MLX5_IB_STAGE_DELAY_DROP
,
904 MLX5_IB_STAGE_RESTRACK
,
908 struct mlx5_ib_stage
{
909 int (*init
)(struct mlx5_ib_dev
*dev
);
910 void (*cleanup
)(struct mlx5_ib_dev
*dev
);
913 #define STAGE_CREATE(_stage, _init, _cleanup) \
914 .stage[_stage] = {.init = _init, .cleanup = _cleanup}
916 struct mlx5_ib_profile
{
917 struct mlx5_ib_stage stage
[MLX5_IB_STAGE_MAX
];
920 struct mlx5_ib_multiport_info
{
921 struct list_head list
;
922 struct mlx5_ib_dev
*ibdev
;
923 struct mlx5_core_dev
*mdev
;
924 struct notifier_block mdev_events
;
925 struct completion unref_comp
;
932 struct mlx5_ib_flow_action
{
933 struct ib_flow_action ib_action
;
937 struct mlx5_accel_esp_xfrm
*ctx
;
940 struct mlx5_ib_dev
*dev
;
943 struct mlx5_modify_hdr
*modify_hdr
;
944 struct mlx5_pkt_reformat
*pkt_reformat
;
951 struct mlx5_core_dev
*dev
;
952 /* This lock is used to protect the access to the shared
953 * allocation map when concurrent requests by different
954 * processes are handled.
957 DECLARE_BITMAP(memic_alloc_pages
, MLX5_MAX_MEMIC_PAGES
);
960 struct mlx5_read_counters_attr
{
961 struct mlx5_fc
*hw_cntrs_hndl
;
966 enum mlx5_ib_counters_type
{
967 MLX5_IB_COUNTERS_FLOW
,
970 struct mlx5_ib_mcounters
{
971 struct ib_counters ibcntrs
;
972 enum mlx5_ib_counters_type type
;
973 /* number of counters supported for this counters type */
975 struct mlx5_fc
*hw_cntrs_hndl
;
976 /* read function for this counters type */
977 int (*read_counters
)(struct ib_device
*ibdev
,
978 struct mlx5_read_counters_attr
*read_attr
);
979 /* max index set as part of create_flow */
981 /* number of counters data entries (<description,index> pair) */
983 /* counters data array for descriptions and indexes */
984 struct mlx5_ib_flow_counters_desc
*counters_data
;
985 /* protects access to mcounters internal data */
986 struct mutex mcntrs_mutex
;
989 static inline struct mlx5_ib_mcounters
*
990 to_mcounters(struct ib_counters
*ibcntrs
)
992 return container_of(ibcntrs
, struct mlx5_ib_mcounters
, ibcntrs
);
995 int parse_flow_flow_action(struct mlx5_ib_flow_action
*maction
,
997 struct mlx5_flow_act
*action
);
998 struct mlx5_ib_lb_state
{
999 /* protect the user_td */
1006 struct mlx5_ib_pf_eq
{
1007 struct notifier_block irq_nb
;
1008 struct mlx5_ib_dev
*dev
;
1009 struct mlx5_eq
*core
;
1010 struct work_struct work
;
1011 spinlock_t lock
; /* Pagefaults spinlock */
1012 struct workqueue_struct
*wq
;
1016 struct mlx5_devx_event_table
{
1017 struct mlx5_nb devx_nb
;
1018 /* serialize updating the event_xa */
1019 struct mutex event_xa_lock
;
1020 struct xarray event_xa
;
1023 struct mlx5_var_table
{
1024 /* serialize updating the bitmap */
1025 struct mutex bitmap_lock
;
1026 unsigned long *bitmap
;
1029 u64 num_var_hw_entries
;
1032 struct mlx5_ib_dev
{
1033 struct ib_device ib_dev
;
1034 struct mlx5_core_dev
*mdev
;
1035 struct notifier_block mdev_events
;
1037 /* serialize update of capability mask
1039 struct mutex cap_mask_mutex
;
1045 struct umr_common umrc
;
1046 /* sync used page count stats
1048 struct mlx5_ib_resources devr
;
1051 struct mlx5_mr_cache cache
;
1052 struct timer_list delay_timer
;
1053 /* Prevents soft lock on massive reg MRs */
1054 struct mutex slow_path_mutex
;
1055 struct ib_odp_caps odp_caps
;
1057 struct mlx5_ib_pf_eq odp_pf_eq
;
1060 * Sleepable RCU that prevents destruction of MRs while they are still
1061 * being used by a page fault handler.
1063 struct srcu_struct odp_srcu
;
1064 struct xarray odp_mkeys
;
1067 struct mlx5_ib_flow_db
*flow_db
;
1068 /* protect resources needed as part of reset flow */
1069 spinlock_t reset_flow_resource_lock
;
1070 struct list_head qp_list
;
1071 /* Array with num_ports elements */
1072 struct mlx5_ib_port
*port
;
1073 struct mlx5_sq_bfreg bfreg
;
1074 struct mlx5_sq_bfreg wc_bfreg
;
1075 struct mlx5_sq_bfreg fp_bfreg
;
1076 struct mlx5_ib_delay_drop delay_drop
;
1077 const struct mlx5_ib_profile
*profile
;
1079 struct mlx5_ib_lb_state lb
;
1081 struct list_head ib_dev_list
;
1084 u16 devx_whitelist_uid
;
1085 struct mlx5_srq_table srq_table
;
1086 struct mlx5_qp_table qp_table
;
1087 struct mlx5_async_ctx async_ctx
;
1088 struct mlx5_devx_event_table devx_event_table
;
1089 struct mlx5_var_table var_table
;
1091 struct xarray sig_mrs
;
1094 static inline struct mlx5_ib_cq
*to_mibcq(struct mlx5_core_cq
*mcq
)
1096 return container_of(mcq
, struct mlx5_ib_cq
, mcq
);
1099 static inline struct mlx5_ib_xrcd
*to_mxrcd(struct ib_xrcd
*ibxrcd
)
1101 return container_of(ibxrcd
, struct mlx5_ib_xrcd
, ibxrcd
);
1104 static inline struct mlx5_ib_dev
*to_mdev(struct ib_device
*ibdev
)
1106 return container_of(ibdev
, struct mlx5_ib_dev
, ib_dev
);
1109 static inline struct mlx5_ib_dev
*mr_to_mdev(struct mlx5_ib_mr
*mr
)
1111 return to_mdev(mr
->ibmr
.device
);
1114 static inline struct mlx5_ib_dev
*mlx5_udata_to_mdev(struct ib_udata
*udata
)
1116 struct mlx5_ib_ucontext
*context
= rdma_udata_to_drv_context(
1117 udata
, struct mlx5_ib_ucontext
, ibucontext
);
1119 return to_mdev(context
->ibucontext
.device
);
1122 static inline struct mlx5_ib_cq
*to_mcq(struct ib_cq
*ibcq
)
1124 return container_of(ibcq
, struct mlx5_ib_cq
, ibcq
);
1127 static inline struct mlx5_ib_qp
*to_mibqp(struct mlx5_core_qp
*mqp
)
1129 return container_of(mqp
, struct mlx5_ib_qp_base
, mqp
)->container_mibqp
;
1132 static inline struct mlx5_ib_rwq
*to_mibrwq(struct mlx5_core_qp
*core_qp
)
1134 return container_of(core_qp
, struct mlx5_ib_rwq
, core_qp
);
1137 static inline struct mlx5_ib_pd
*to_mpd(struct ib_pd
*ibpd
)
1139 return container_of(ibpd
, struct mlx5_ib_pd
, ibpd
);
1142 static inline struct mlx5_ib_srq
*to_msrq(struct ib_srq
*ibsrq
)
1144 return container_of(ibsrq
, struct mlx5_ib_srq
, ibsrq
);
1147 static inline struct mlx5_ib_qp
*to_mqp(struct ib_qp
*ibqp
)
1149 return container_of(ibqp
, struct mlx5_ib_qp
, ibqp
);
1152 static inline struct mlx5_ib_rwq
*to_mrwq(struct ib_wq
*ibwq
)
1154 return container_of(ibwq
, struct mlx5_ib_rwq
, ibwq
);
1157 static inline struct mlx5_ib_rwq_ind_table
*to_mrwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
1159 return container_of(ib_rwq_ind_tbl
, struct mlx5_ib_rwq_ind_table
, ib_rwq_ind_tbl
);
1162 static inline struct mlx5_ib_srq
*to_mibsrq(struct mlx5_core_srq
*msrq
)
1164 return container_of(msrq
, struct mlx5_ib_srq
, msrq
);
1167 static inline struct mlx5_ib_dm
*to_mdm(struct ib_dm
*ibdm
)
1169 return container_of(ibdm
, struct mlx5_ib_dm
, ibdm
);
1172 static inline struct mlx5_ib_mr
*to_mmr(struct ib_mr
*ibmr
)
1174 return container_of(ibmr
, struct mlx5_ib_mr
, ibmr
);
1177 static inline struct mlx5_ib_mw
*to_mmw(struct ib_mw
*ibmw
)
1179 return container_of(ibmw
, struct mlx5_ib_mw
, ibmw
);
1182 static inline struct mlx5_ib_flow_action
*
1183 to_mflow_act(struct ib_flow_action
*ibact
)
1185 return container_of(ibact
, struct mlx5_ib_flow_action
, ib_action
);
1188 static inline struct mlx5_user_mmap_entry
*
1189 to_mmmap(struct rdma_user_mmap_entry
*rdma_entry
)
1191 return container_of(rdma_entry
,
1192 struct mlx5_user_mmap_entry
, rdma_entry
);
1195 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext
*context
,
1196 struct ib_udata
*udata
, unsigned long virt
,
1197 struct mlx5_db
*db
);
1198 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext
*context
, struct mlx5_db
*db
);
1199 void __mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
);
1200 void mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
);
1201 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq
*srq
, int wqe_index
);
1202 int mlx5_ib_create_ah(struct ib_ah
*ah
, struct rdma_ah_init_attr
*init_attr
,
1203 struct ib_udata
*udata
);
1204 int mlx5_ib_query_ah(struct ib_ah
*ibah
, struct rdma_ah_attr
*ah_attr
);
1205 static inline int mlx5_ib_destroy_ah(struct ib_ah
*ah
, u32 flags
)
1209 int mlx5_ib_create_srq(struct ib_srq
*srq
, struct ib_srq_init_attr
*init_attr
,
1210 struct ib_udata
*udata
);
1211 int mlx5_ib_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
1212 enum ib_srq_attr_mask attr_mask
, struct ib_udata
*udata
);
1213 int mlx5_ib_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*srq_attr
);
1214 int mlx5_ib_destroy_srq(struct ib_srq
*srq
, struct ib_udata
*udata
);
1215 int mlx5_ib_post_srq_recv(struct ib_srq
*ibsrq
, const struct ib_recv_wr
*wr
,
1216 const struct ib_recv_wr
**bad_wr
);
1217 int mlx5_ib_enable_lb(struct mlx5_ib_dev
*dev
, bool td
, bool qp
);
1218 void mlx5_ib_disable_lb(struct mlx5_ib_dev
*dev
, bool td
, bool qp
);
1219 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
1220 struct ib_qp_init_attr
*init_attr
,
1221 struct ib_udata
*udata
);
1222 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1223 int attr_mask
, struct ib_udata
*udata
);
1224 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
1225 struct ib_qp_init_attr
*qp_init_attr
);
1226 int mlx5_ib_destroy_qp(struct ib_qp
*qp
, struct ib_udata
*udata
);
1227 void mlx5_ib_drain_sq(struct ib_qp
*qp
);
1228 void mlx5_ib_drain_rq(struct ib_qp
*qp
);
1229 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp
*qp
, int wqe_index
, void *buffer
,
1230 size_t buflen
, size_t *bc
);
1231 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp
*qp
, int wqe_index
, void *buffer
,
1232 size_t buflen
, size_t *bc
);
1233 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq
*srq
, int wqe_index
, void *buffer
,
1234 size_t buflen
, size_t *bc
);
1235 int mlx5_ib_create_cq(struct ib_cq
*ibcq
, const struct ib_cq_init_attr
*attr
,
1236 struct ib_udata
*udata
);
1237 int mlx5_ib_destroy_cq(struct ib_cq
*cq
, struct ib_udata
*udata
);
1238 int mlx5_ib_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
);
1239 int mlx5_ib_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
);
1240 int mlx5_ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
);
1241 int mlx5_ib_resize_cq(struct ib_cq
*ibcq
, int entries
, struct ib_udata
*udata
);
1242 struct ib_mr
*mlx5_ib_get_dma_mr(struct ib_pd
*pd
, int acc
);
1243 struct ib_mr
*mlx5_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
1244 u64 virt_addr
, int access_flags
,
1245 struct ib_udata
*udata
);
1246 int mlx5_ib_advise_mr(struct ib_pd
*pd
,
1247 enum ib_uverbs_advise_mr_advice advice
,
1249 struct ib_sge
*sg_list
,
1251 struct uverbs_attr_bundle
*attrs
);
1252 int mlx5_ib_alloc_mw(struct ib_mw
*mw
, struct ib_udata
*udata
);
1253 int mlx5_ib_dealloc_mw(struct ib_mw
*mw
);
1254 int mlx5_ib_update_xlt(struct mlx5_ib_mr
*mr
, u64 idx
, int npages
,
1255 int page_shift
, int flags
);
1256 struct mlx5_ib_mr
*mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd
*pd
,
1257 struct ib_udata
*udata
,
1259 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr
*mr
);
1260 void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr
*mr
);
1261 struct ib_mr
*mlx5_ib_rereg_user_mr(struct ib_mr
*ib_mr
, int flags
, u64 start
,
1262 u64 length
, u64 virt_addr
, int access_flags
,
1263 struct ib_pd
*pd
, struct ib_udata
*udata
);
1264 int mlx5_ib_dereg_mr(struct ib_mr
*ibmr
, struct ib_udata
*udata
);
1265 struct ib_mr
*mlx5_ib_alloc_mr(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
1267 struct ib_mr
*mlx5_ib_alloc_mr_integrity(struct ib_pd
*pd
,
1269 u32 max_num_meta_sg
);
1270 int mlx5_ib_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
1271 unsigned int *sg_offset
);
1272 int mlx5_ib_map_mr_sg_pi(struct ib_mr
*ibmr
, struct scatterlist
*data_sg
,
1273 int data_sg_nents
, unsigned int *data_sg_offset
,
1274 struct scatterlist
*meta_sg
, int meta_sg_nents
,
1275 unsigned int *meta_sg_offset
);
1276 int mlx5_ib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port_num
,
1277 const struct ib_wc
*in_wc
, const struct ib_grh
*in_grh
,
1278 const struct ib_mad
*in
, struct ib_mad
*out
,
1279 size_t *out_mad_size
, u16
*out_mad_pkey_index
);
1280 int mlx5_ib_alloc_xrcd(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
);
1281 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
);
1282 int mlx5_query_ext_port_caps(struct mlx5_ib_dev
*dev
, u8 port
);
1283 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device
*ibdev
,
1284 struct ib_smp
*out_mad
);
1285 int mlx5_query_mad_ifc_system_image_guid(struct ib_device
*ibdev
,
1286 __be64
*sys_image_guid
);
1287 int mlx5_query_mad_ifc_max_pkeys(struct ib_device
*ibdev
,
1289 int mlx5_query_mad_ifc_vendor_id(struct ib_device
*ibdev
,
1291 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev
*dev
, char *node_desc
);
1292 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev
*dev
, __be64
*node_guid
);
1293 int mlx5_query_mad_ifc_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
1295 int mlx5_query_mad_ifc_gids(struct ib_device
*ibdev
, u8 port
, int index
,
1297 int mlx5_query_mad_ifc_port(struct ib_device
*ibdev
, u8 port
,
1298 struct ib_port_attr
*props
);
1299 int mlx5_ib_query_port(struct ib_device
*ibdev
, u8 port
,
1300 struct ib_port_attr
*props
);
1301 void mlx5_ib_populate_pas(struct ib_umem
*umem
, size_t page_size
, __be64
*pas
,
1303 void mlx5_ib_copy_pas(u64
*old
, u64
*new, int step
, int num
);
1304 int mlx5_ib_get_cqe_size(struct ib_cq
*ibcq
);
1305 int mlx5_mr_cache_init(struct mlx5_ib_dev
*dev
);
1306 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev
*dev
);
1308 struct mlx5_ib_mr
*mlx5_mr_cache_alloc(struct mlx5_ib_dev
*dev
,
1309 unsigned int entry
, int access_flags
);
1310 void mlx5_mr_cache_free(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
);
1311 int mlx5_mr_cache_invalidate(struct mlx5_ib_mr
*mr
);
1313 int mlx5_ib_check_mr_status(struct ib_mr
*ibmr
, u32 check_mask
,
1314 struct ib_mr_status
*mr_status
);
1315 struct ib_wq
*mlx5_ib_create_wq(struct ib_pd
*pd
,
1316 struct ib_wq_init_attr
*init_attr
,
1317 struct ib_udata
*udata
);
1318 int mlx5_ib_destroy_wq(struct ib_wq
*wq
, struct ib_udata
*udata
);
1319 int mlx5_ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
1320 u32 wq_attr_mask
, struct ib_udata
*udata
);
1321 int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_table
,
1322 struct ib_rwq_ind_table_init_attr
*init_attr
,
1323 struct ib_udata
*udata
);
1324 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*wq_ind_table
);
1325 struct ib_dm
*mlx5_ib_alloc_dm(struct ib_device
*ibdev
,
1326 struct ib_ucontext
*context
,
1327 struct ib_dm_alloc_attr
*attr
,
1328 struct uverbs_attr_bundle
*attrs
);
1329 int mlx5_ib_dealloc_dm(struct ib_dm
*ibdm
, struct uverbs_attr_bundle
*attrs
);
1330 struct ib_mr
*mlx5_ib_reg_dm_mr(struct ib_pd
*pd
, struct ib_dm
*dm
,
1331 struct ib_dm_mr_attr
*attr
,
1332 struct uverbs_attr_bundle
*attrs
);
1334 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1335 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
);
1336 int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
);
1337 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev
*ibdev
);
1338 int __init
mlx5_ib_odp_init(void);
1339 void mlx5_ib_odp_cleanup(void);
1340 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent
*ent
);
1341 void mlx5_odp_populate_xlt(void *xlt
, size_t idx
, size_t nentries
,
1342 struct mlx5_ib_mr
*mr
, int flags
);
1344 int mlx5_ib_advise_mr_prefetch(struct ib_pd
*pd
,
1345 enum ib_uverbs_advise_mr_advice advice
,
1346 u32 flags
, struct ib_sge
*sg_list
, u32 num_sge
);
1347 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr
*mr
);
1348 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1349 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
)
1354 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
) { return 0; }
1355 static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev
*ibdev
) {}
1356 static inline int mlx5_ib_odp_init(void) { return 0; }
1357 static inline void mlx5_ib_odp_cleanup(void) {}
1358 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent
*ent
) {}
1359 static inline void mlx5_odp_populate_xlt(void *xlt
, size_t idx
, size_t nentries
,
1360 struct mlx5_ib_mr
*mr
, int flags
) {}
1363 mlx5_ib_advise_mr_prefetch(struct ib_pd
*pd
,
1364 enum ib_uverbs_advise_mr_advice advice
, u32 flags
,
1365 struct ib_sge
*sg_list
, u32 num_sge
)
1369 static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr
*mr
)
1373 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1375 extern const struct mmu_interval_notifier_ops mlx5_mn_ops
;
1377 /* Needed for rep profile */
1378 void __mlx5_ib_remove(struct mlx5_ib_dev
*dev
,
1379 const struct mlx5_ib_profile
*profile
,
1381 int __mlx5_ib_add(struct mlx5_ib_dev
*dev
,
1382 const struct mlx5_ib_profile
*profile
);
1384 int mlx5_ib_get_vf_config(struct ib_device
*device
, int vf
,
1385 u8 port
, struct ifla_vf_info
*info
);
1386 int mlx5_ib_set_vf_link_state(struct ib_device
*device
, int vf
,
1387 u8 port
, int state
);
1388 int mlx5_ib_get_vf_stats(struct ib_device
*device
, int vf
,
1389 u8 port
, struct ifla_vf_stats
*stats
);
1390 int mlx5_ib_get_vf_guid(struct ib_device
*device
, int vf
, u8 port
,
1391 struct ifla_vf_guid
*node_guid
,
1392 struct ifla_vf_guid
*port_guid
);
1393 int mlx5_ib_set_vf_guid(struct ib_device
*device
, int vf
, u8 port
,
1394 u64 guid
, int type
);
1396 __be16
mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev
*dev
,
1397 const struct ib_gid_attr
*attr
);
1399 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev
*dev
, u8 port_num
);
1400 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev
*dev
, u8 port_num
);
1402 /* GSI QP helper functions */
1403 int mlx5_ib_create_gsi(struct ib_pd
*pd
, struct mlx5_ib_qp
*mqp
,
1404 struct ib_qp_init_attr
*attr
);
1405 int mlx5_ib_destroy_gsi(struct mlx5_ib_qp
*mqp
);
1406 int mlx5_ib_gsi_modify_qp(struct ib_qp
*qp
, struct ib_qp_attr
*attr
,
1408 int mlx5_ib_gsi_query_qp(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
1410 struct ib_qp_init_attr
*qp_init_attr
);
1411 int mlx5_ib_gsi_post_send(struct ib_qp
*qp
, const struct ib_send_wr
*wr
,
1412 const struct ib_send_wr
**bad_wr
);
1413 int mlx5_ib_gsi_post_recv(struct ib_qp
*qp
, const struct ib_recv_wr
*wr
,
1414 const struct ib_recv_wr
**bad_wr
);
1415 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp
*gsi
);
1417 int mlx5_ib_generate_wc(struct ib_cq
*ibcq
, struct ib_wc
*wc
);
1419 void mlx5_ib_free_bfreg(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
,
1421 struct mlx5_ib_dev
*mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info
*mpi
);
1422 struct mlx5_core_dev
*mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev
*dev
,
1424 u8
*native_port_num
);
1425 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev
*dev
,
1428 extern const struct uapi_definition mlx5_ib_devx_defs
[];
1429 extern const struct uapi_definition mlx5_ib_flow_defs
[];
1430 extern const struct uapi_definition mlx5_ib_qos_defs
[];
1431 extern const struct uapi_definition mlx5_ib_std_types_defs
[];
1433 static inline void init_query_mad(struct ib_smp
*mad
)
1435 mad
->base_version
= 1;
1436 mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
1437 mad
->class_version
= 1;
1438 mad
->method
= IB_MGMT_METHOD_GET
;
1441 static inline int is_qp1(enum ib_qp_type qp_type
)
1443 return qp_type
== MLX5_IB_QPT_HW_GSI
|| qp_type
== IB_QPT_GSI
;
1446 #define MLX5_MAX_UMR_SHIFT 16
1447 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1449 static inline u32
check_cq_create_flags(u32 flags
)
1452 * It returns non-zero value for unsupported CQ
1453 * create flags, otherwise it returns zero.
1455 return (flags
& ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN
|
1456 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
));
1459 static inline int verify_assign_uidx(u8 cqe_version
, u32 cmd_uidx
,
1463 if ((cmd_uidx
== MLX5_IB_DEFAULT_UIDX
) ||
1464 (cmd_uidx
& ~MLX5_USER_ASSIGNED_UIDX_MASK
))
1466 *user_index
= cmd_uidx
;
1468 *user_index
= MLX5_IB_DEFAULT_UIDX
;
1474 static inline int get_qp_user_index(struct mlx5_ib_ucontext
*ucontext
,
1475 struct mlx5_ib_create_qp
*ucmd
,
1479 u8 cqe_version
= ucontext
->cqe_version
;
1481 if ((offsetofend(typeof(*ucmd
), uidx
) <= inlen
) && !cqe_version
&&
1482 (ucmd
->uidx
== MLX5_IB_DEFAULT_UIDX
))
1485 if ((offsetofend(typeof(*ucmd
), uidx
) <= inlen
) != !!cqe_version
)
1488 return verify_assign_uidx(cqe_version
, ucmd
->uidx
, user_index
);
1491 static inline int get_srq_user_index(struct mlx5_ib_ucontext
*ucontext
,
1492 struct mlx5_ib_create_srq
*ucmd
,
1496 u8 cqe_version
= ucontext
->cqe_version
;
1498 if ((offsetofend(typeof(*ucmd
), uidx
) <= inlen
) && !cqe_version
&&
1499 (ucmd
->uidx
== MLX5_IB_DEFAULT_UIDX
))
1502 if ((offsetofend(typeof(*ucmd
), uidx
) <= inlen
) != !!cqe_version
)
1505 return verify_assign_uidx(cqe_version
, ucmd
->uidx
, user_index
);
1508 static inline int get_uars_per_sys_page(struct mlx5_ib_dev
*dev
, bool lib_support
)
1510 return lib_support
&& MLX5_CAP_GEN(dev
->mdev
, uar_4k
) ?
1511 MLX5_UARS_IN_PAGE
: 1;
1514 static inline int get_num_static_uars(struct mlx5_ib_dev
*dev
,
1515 struct mlx5_bfreg_info
*bfregi
)
1517 return get_uars_per_sys_page(dev
, bfregi
->lib_uar_4k
) * bfregi
->num_static_sys_pages
;
1520 extern void *xlt_emergency_page
;
1522 int bfregn_to_uar_index(struct mlx5_ib_dev
*dev
,
1523 struct mlx5_bfreg_info
*bfregi
, u32 bfregn
,
1526 static inline bool mlx5_ib_can_load_pas_with_umr(struct mlx5_ib_dev
*dev
,
1530 * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
1531 * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
1532 * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
1533 * can never be enabled without this capability. Simplify this weird
1534 * quirky hardware by just saying it can't use PAS lists with UMR at
1537 if (MLX5_CAP_GEN(dev
->mdev
, umr_modify_entity_size_disabled
))
1541 * length is the size of the MR in bytes when mlx5_ib_update_xlt() is
1544 if (!MLX5_CAP_GEN(dev
->mdev
, umr_extended_translation_offset
) &&
1545 length
>= MLX5_MAX_UMR_PAGES
* PAGE_SIZE
)
1551 * true if an existing MR can be reconfigured to new access_flags using UMR.
1552 * Older HW cannot use UMR to update certain elements of the MKC. See
1553 * umr_check_mkey_mask(), get_umr_update_access_mask() and umr_check_mkey_mask()
1555 static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev
*dev
,
1556 unsigned int current_access_flags
,
1557 unsigned int target_access_flags
)
1559 unsigned int diffs
= current_access_flags
^ target_access_flags
;
1561 if ((diffs
& IB_ACCESS_REMOTE_ATOMIC
) &&
1562 MLX5_CAP_GEN(dev
->mdev
, atomic
) &&
1563 MLX5_CAP_GEN(dev
->mdev
, umr_modify_atomic_disabled
))
1566 if ((diffs
& IB_ACCESS_RELAXED_ORDERING
) &&
1567 MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_write
) &&
1568 !MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_write_umr
))
1571 if ((diffs
& IB_ACCESS_RELAXED_ORDERING
) &&
1572 MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_read
) &&
1573 !MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_read_umr
))
1579 int mlx5_ib_test_wc(struct mlx5_ib_dev
*dev
);
1581 static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev
*dev
)
1583 return dev
->lag_active
||
1584 (MLX5_CAP_GEN(dev
->mdev
, num_lag_ports
) > 1 &&
1585 MLX5_CAP_GEN(dev
->mdev
, lag_tx_port_affinity
));
1587 #endif /* MLX5_IB_H */