2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_smi.h>
41 #include <linux/mlx5/driver.h>
42 #include <linux/mlx5/cq.h>
43 #include <linux/mlx5/fs.h>
44 #include <linux/mlx5/qp.h>
45 #include <linux/types.h>
46 #include <linux/mlx5/transobj.h>
47 #include <rdma/ib_user_verbs.h>
48 #include <rdma/mlx5-abi.h>
49 #include <rdma/uverbs_ioctl.h>
50 #include <rdma/mlx5_user_ioctl_cmds.h>
51 #include <rdma/mlx5_user_ioctl_verbs.h>
55 #define mlx5_ib_dbg(_dev, format, arg...) \
56 dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
57 __LINE__, current->pid, ##arg)
59 #define mlx5_ib_err(_dev, format, arg...) \
60 dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
61 __LINE__, current->pid, ##arg)
63 #define mlx5_ib_warn(_dev, format, arg...) \
64 dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
65 __LINE__, current->pid, ##arg)
67 #define MLX5_IB_DEFAULT_UIDX 0xffffff
68 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
70 #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
73 MLX5_IB_MMAP_OFFSET_START
= 9,
74 MLX5_IB_MMAP_OFFSET_END
= 255,
78 MLX5_IB_MMAP_CMD_SHIFT
= 8,
79 MLX5_IB_MMAP_CMD_MASK
= 0xff,
83 MLX5_RES_SCAT_DATA32_CQE
= 0x1,
84 MLX5_RES_SCAT_DATA64_CQE
= 0x2,
85 MLX5_REQ_SCAT_DATA32_CQE
= 0x11,
86 MLX5_REQ_SCAT_DATA64_CQE
= 0x22,
89 enum mlx5_ib_mad_ifc_flags
{
90 MLX5_MAD_IFC_IGNORE_MKEY
= 1,
91 MLX5_MAD_IFC_IGNORE_BKEY
= 2,
92 MLX5_MAD_IFC_NET_VIEW
= 4,
96 MLX5_CROSS_CHANNEL_BFREG
= 0,
105 MLX5_TM_MAX_RNDV_MSG_SIZE
= 64,
110 MLX5_IB_INVALID_UAR_INDEX
= BIT(31),
111 MLX5_IB_INVALID_BFREG
= BIT(31),
115 MLX5_MAX_MEMIC_PAGES
= 0x100,
116 MLX5_MEMIC_ALLOC_SIZE_MASK
= 0x3f,
120 MLX5_MEMIC_BASE_ALIGN
= 6,
121 MLX5_MEMIC_BASE_SIZE
= 1 << MLX5_MEMIC_BASE_ALIGN
,
124 enum mlx5_ib_mmap_type
{
125 MLX5_IB_MMAP_TYPE_MEMIC
= 1,
126 MLX5_IB_MMAP_TYPE_VAR
= 2,
127 MLX5_IB_MMAP_TYPE_UAR_WC
= 3,
128 MLX5_IB_MMAP_TYPE_UAR_NC
= 4,
131 struct mlx5_bfreg_info
{
133 int num_low_latency_bfregs
;
137 * protect bfreg allocation data structs
144 u32 num_static_sys_pages
;
145 u32 total_num_bfregs
;
149 struct mlx5_ib_ucontext
{
150 struct ib_ucontext ibucontext
;
151 struct list_head db_page_list
;
153 /* protect doorbell record alloc/free
155 struct mutex db_page_mutex
;
156 struct mlx5_bfreg_info bfregi
;
158 /* Transport Domain number */
163 /* For RoCE LAG TX affinity */
164 atomic_t tx_port_affinity
;
167 static inline struct mlx5_ib_ucontext
*to_mucontext(struct ib_ucontext
*ibucontext
)
169 return container_of(ibucontext
, struct mlx5_ib_ucontext
, ibucontext
);
179 MLX5_IB_FLOW_ACTION_MODIFY_HEADER
,
180 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT
,
181 MLX5_IB_FLOW_ACTION_DECAP
,
184 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
185 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
186 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
187 #error "Invalid number of bypass priorities"
189 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
191 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
192 #define MLX5_IB_NUM_SNIFFER_FTS 2
193 #define MLX5_IB_NUM_EGRESS_FTS 1
194 struct mlx5_ib_flow_prio
{
195 struct mlx5_flow_table
*flow_table
;
196 unsigned int refcount
;
199 struct mlx5_ib_flow_handler
{
200 struct list_head list
;
201 struct ib_flow ibflow
;
202 struct mlx5_ib_flow_prio
*prio
;
203 struct mlx5_flow_handle
*rule
;
204 struct ib_counters
*ibcounters
;
205 struct mlx5_ib_dev
*dev
;
206 struct mlx5_ib_flow_matcher
*flow_matcher
;
209 struct mlx5_ib_flow_matcher
{
210 struct mlx5_ib_match_params matcher_mask
;
212 enum mlx5_ib_flow_type flow_type
;
213 enum mlx5_flow_namespace_type ns_type
;
215 struct mlx5_core_dev
*mdev
;
217 u8 match_criteria_enable
;
222 struct mlx5_core_dev
*mdev
;
225 struct mlx5_ib_flow_db
{
226 struct mlx5_ib_flow_prio prios
[MLX5_IB_NUM_FLOW_FT
];
227 struct mlx5_ib_flow_prio egress_prios
[MLX5_IB_NUM_FLOW_FT
];
228 struct mlx5_ib_flow_prio sniffer
[MLX5_IB_NUM_SNIFFER_FTS
];
229 struct mlx5_ib_flow_prio egress
[MLX5_IB_NUM_EGRESS_FTS
];
230 struct mlx5_ib_flow_prio fdb
;
231 struct mlx5_ib_flow_prio rdma_rx
[MLX5_IB_NUM_FLOW_FT
];
232 struct mlx5_ib_flow_prio rdma_tx
[MLX5_IB_NUM_FLOW_FT
];
233 struct mlx5_flow_table
*lag_demux_ft
;
234 /* Protect flow steering bypass flow tables
235 * when add/del flow rules.
236 * only single add/removal of flow steering rule could be done
242 /* Use macros here so that don't have to duplicate
243 * enum ib_send_flags and enum ib_qp_type for low-level driver
246 #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
247 #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
248 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
249 #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
250 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
251 #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
253 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
255 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
256 * creates the actual hardware QP.
258 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
259 #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
260 #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
261 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
263 #define MLX5_IB_UMR_OCTOWORD 16
264 #define MLX5_IB_UMR_XLT_ALIGNMENT 64
266 #define MLX5_IB_UPD_XLT_ZAP BIT(0)
267 #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
268 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
269 #define MLX5_IB_UPD_XLT_ADDR BIT(3)
270 #define MLX5_IB_UPD_XLT_PD BIT(4)
271 #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
272 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
274 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
276 * These flags are intended for internal use by the mlx5_ib driver, and they
277 * rely on the range reserved for that use in the ib_qp_create_flags enum.
279 #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
280 #define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
287 enum mlx5_ib_rq_flags
{
288 MLX5_IB_RQ_CVLAN_STRIPPING
= 1 << 0,
289 MLX5_IB_RQ_PCI_WRITE_END_PADDING
= 1 << 1,
293 struct mlx5_frag_buf_ctrl fbc
;
296 struct wr_list
*w_list
;
300 /* serialize post to the work queue
315 enum mlx5_ib_wq_flags
{
316 MLX5_IB_WQ_FLAGS_DELAY_DROP
= 0x1,
317 MLX5_IB_WQ_FLAGS_STRIDING_RQ
= 0x2,
320 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
321 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
322 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
323 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
324 #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
328 struct mlx5_core_qp core_qp
;
335 u32 two_byte_shift_en
;
336 u32 single_stride_log_num_of_bytes
;
337 struct ib_umem
*umem
;
339 unsigned int page_shift
;
346 u32 create_flags
; /* Use enum mlx5_ib_wq_flags */
360 struct mlx5_ib_rwq_ind_table
{
361 struct ib_rwq_ind_table ib_rwq_ind_tbl
;
366 struct mlx5_ib_ubuffer
{
367 struct ib_umem
*umem
;
372 struct mlx5_ib_qp_base
{
373 struct mlx5_ib_qp
*container_mibqp
;
374 struct mlx5_core_qp mqp
;
375 struct mlx5_ib_ubuffer ubuffer
;
378 struct mlx5_ib_qp_trans
{
379 struct mlx5_ib_qp_base base
;
386 struct mlx5_ib_rss_qp
{
391 struct mlx5_ib_qp_base base
;
392 struct mlx5_ib_wq
*rq
;
393 struct mlx5_ib_ubuffer ubuffer
;
394 struct mlx5_db
*doorbell
;
401 struct mlx5_ib_qp_base base
;
402 struct mlx5_ib_wq
*sq
;
403 struct mlx5_ib_ubuffer ubuffer
;
404 struct mlx5_db
*doorbell
;
405 struct mlx5_flow_handle
*flow_rule
;
410 struct mlx5_ib_raw_packet_qp
{
411 struct mlx5_ib_sq sq
;
412 struct mlx5_ib_rq rq
;
417 unsigned long offset
;
418 struct mlx5_sq_bfreg
*bfreg
;
422 struct mlx5_core_dct mdct
;
429 struct mlx5_ib_qp_trans trans_qp
;
430 struct mlx5_ib_raw_packet_qp raw_packet_qp
;
431 struct mlx5_ib_rss_qp rss_qp
;
432 struct mlx5_ib_dct dct
;
434 struct mlx5_frag_buf buf
;
437 struct mlx5_ib_wq rq
;
441 struct mlx5_ib_wq sq
;
443 /* serialize qp state modifications
455 /* only for user space QPs. For kernel
456 * we have it from the bf object
462 struct list_head qps_list
;
463 struct list_head cq_recv_list
;
464 struct list_head cq_send_list
;
465 struct mlx5_rate_limit rl
;
468 /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
469 enum ib_qp_type qp_sub_type
;
470 /* A flag to indicate if there's a new counter is configured
471 * but not take effective
476 struct mlx5_ib_cq_buf
{
477 struct mlx5_frag_buf_ctrl fbc
;
478 struct mlx5_frag_buf frag_buf
;
479 struct ib_umem
*umem
;
484 enum mlx5_ib_qp_flags
{
485 MLX5_IB_QP_LSO
= IB_QP_CREATE_IPOIB_UD_LSO
,
486 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
,
487 MLX5_IB_QP_CROSS_CHANNEL
= IB_QP_CREATE_CROSS_CHANNEL
,
488 MLX5_IB_QP_MANAGED_SEND
= IB_QP_CREATE_MANAGED_SEND
,
489 MLX5_IB_QP_MANAGED_RECV
= IB_QP_CREATE_MANAGED_RECV
,
490 MLX5_IB_QP_SIGNATURE_HANDLING
= 1 << 5,
491 /* QP uses 1 as its source QP number */
492 MLX5_IB_QP_SQPN_QP1
= 1 << 6,
493 MLX5_IB_QP_CAP_SCATTER_FCS
= 1 << 7,
494 MLX5_IB_QP_RSS
= 1 << 8,
495 MLX5_IB_QP_CVLAN_STRIPPING
= 1 << 9,
496 MLX5_IB_QP_UNDERLAY
= 1 << 10,
497 MLX5_IB_QP_PCI_WRITE_END_PADDING
= 1 << 11,
498 MLX5_IB_QP_TUNNEL_OFFLOAD
= 1 << 12,
499 MLX5_IB_QP_PACKET_BASED_CREDIT
= 1 << 13,
503 struct ib_send_wr wr
;
507 unsigned int page_shift
;
508 unsigned int xlt_size
;
512 u8 ignore_free_state
:1;
515 static inline const struct mlx5_umr_wr
*umr_wr(const struct ib_send_wr
*wr
)
517 return container_of(wr
, struct mlx5_umr_wr
, wr
);
520 struct mlx5_shared_mr_info
{
522 struct ib_umem
*umem
;
525 enum mlx5_ib_cq_pr_flags
{
526 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD
= 1 << 0,
531 struct mlx5_core_cq mcq
;
532 struct mlx5_ib_cq_buf buf
;
535 /* serialize access to the CQ
541 struct mutex resize_mutex
;
542 struct mlx5_ib_cq_buf
*resize_buf
;
543 struct ib_umem
*resize_umem
;
545 struct list_head list_send_qp
;
546 struct list_head list_recv_qp
;
548 struct list_head wc_list
;
549 enum ib_cq_notify_flags notify_flags
;
550 struct work_struct notify_work
;
551 u16 private_flags
; /* Use mlx5_ib_cq_pr_flags */
556 struct list_head list
;
561 struct mlx5_core_srq msrq
;
562 struct mlx5_frag_buf buf
;
564 struct mlx5_frag_buf_ctrl fbc
;
566 /* protect SRQ hanlding
572 struct ib_umem
*umem
;
573 /* serialize arming a SRQ
579 struct mlx5_ib_xrcd
{
580 struct ib_xrcd ibxrcd
;
584 enum mlx5_ib_mtt_access_flags
{
585 MLX5_IB_MTT_READ
= (1 << 0),
586 MLX5_IB_MTT_WRITE
= (1 << 1),
589 struct mlx5_user_mmap_entry
{
590 struct rdma_user_mmap_entry rdma_entry
;
598 phys_addr_t dev_addr
;
605 /* other dm types specific params should be added here */
607 struct mlx5_user_mmap_entry mentry
;
610 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
612 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
613 IB_ACCESS_REMOTE_WRITE |\
614 IB_ACCESS_REMOTE_READ |\
615 IB_ACCESS_REMOTE_ATOMIC |\
618 #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
619 IB_ACCESS_REMOTE_WRITE |\
620 IB_ACCESS_REMOTE_READ |\
623 #define mlx5_update_odp_stats(mr, counter_name, value) \
624 atomic64_add(value, &((mr)->odp_stats.counter_name))
637 struct mlx5_core_mkey mmkey
;
638 struct ib_umem
*umem
;
639 struct mlx5_shared_mr_info
*smr_info
;
640 struct list_head list
;
642 struct mlx5_cache_ent
*cache_ent
;
644 struct mlx5_ib_dev
*dev
;
645 u32 out
[MLX5_ST_SZ_DW(create_mkey_out
)];
646 struct mlx5_core_sig_ctx
*sig
;
648 int access_flags
; /* Needed for rereg MR */
650 struct mlx5_ib_mr
*parent
;
651 /* Needed for IB_MR_TYPE_INTEGRITY */
652 struct mlx5_ib_mr
*pi_mr
;
653 struct mlx5_ib_mr
*klm_mr
;
654 struct mlx5_ib_mr
*mtt_mr
;
658 /* For ODP and implicit */
659 atomic_t num_deferred_work
;
660 wait_queue_head_t q_deferred_work
;
661 struct xarray implicit_children
;
664 struct list_head elm
;
665 struct work_struct work
;
667 struct ib_odp_counters odp_stats
;
668 bool is_odp_implicit
;
670 struct mlx5_async_work cb_work
;
673 static inline bool is_odp_mr(struct mlx5_ib_mr
*mr
)
675 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
) && mr
->umem
&&
681 struct mlx5_core_mkey mmkey
;
685 struct mlx5_ib_devx_mr
{
686 struct mlx5_core_mkey mmkey
;
690 struct mlx5_ib_umr_context
{
692 enum ib_wc_status status
;
693 struct completion done
;
700 /* control access to UMR QP
702 struct semaphore sem
;
711 struct mlx5_cache_ent
{
712 struct list_head head
;
713 /* sync access to the cahce entry
725 u8 fill_to_high_water
:1;
728 * - available_mrs is the length of list head, ie the number of MRs
729 * available for immediate allocation.
730 * - total_mrs is available_mrs plus all in use MRs that could be
731 * returned to the cache.
732 * - limit is the low water mark for available_mrs, 2* limit is the
734 * - pending is the number of MRs currently being created
744 struct mlx5_ib_dev
*dev
;
745 struct work_struct work
;
746 struct delayed_work dwork
;
749 struct mlx5_mr_cache
{
750 struct workqueue_struct
*wq
;
751 struct mlx5_cache_ent ent
[MAX_MR_CACHE_ENTRIES
];
753 unsigned long last_add
;
756 struct mlx5_ib_gsi_qp
;
758 struct mlx5_ib_port_resources
{
759 struct mlx5_ib_resources
*devr
;
760 struct mlx5_ib_gsi_qp
*gsi
;
761 struct work_struct pkey_change_work
;
764 struct mlx5_ib_resources
{
771 struct mlx5_ib_port_resources ports
[2];
772 /* Protects changes to the port resources */
776 struct mlx5_ib_counters
{
780 u32 num_cong_counters
;
781 u32 num_ext_ppcnt_counters
;
786 struct mlx5_ib_multiport_info
;
788 struct mlx5_ib_multiport
{
789 struct mlx5_ib_multiport_info
*mpi
;
790 /* To be held when accessing the multiport info */
795 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
798 rwlock_t netdev_lock
;
799 struct net_device
*netdev
;
800 struct notifier_block nb
;
801 atomic_t tx_port_affinity
;
802 enum ib_port_state last_port_state
;
803 struct mlx5_ib_dev
*dev
;
807 struct mlx5_ib_port
{
808 struct mlx5_ib_counters cnts
;
809 struct mlx5_ib_multiport mp
;
810 struct mlx5_ib_dbg_cc_params
*dbg_cc_params
;
811 struct mlx5_roce roce
;
812 struct mlx5_eswitch_rep
*rep
;
815 struct mlx5_ib_dbg_param
{
817 struct mlx5_ib_dev
*dev
;
818 struct dentry
*dentry
;
822 enum mlx5_ib_dbg_cc_types
{
823 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE
,
824 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI
,
825 MLX5_IB_DBG_CC_RP_TIME_RESET
,
826 MLX5_IB_DBG_CC_RP_BYTE_RESET
,
827 MLX5_IB_DBG_CC_RP_THRESHOLD
,
828 MLX5_IB_DBG_CC_RP_AI_RATE
,
829 MLX5_IB_DBG_CC_RP_MAX_RATE
,
830 MLX5_IB_DBG_CC_RP_HAI_RATE
,
831 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC
,
832 MLX5_IB_DBG_CC_RP_MIN_RATE
,
833 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP
,
834 MLX5_IB_DBG_CC_RP_DCE_TCP_G
,
835 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT
,
836 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD
,
837 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE
,
838 MLX5_IB_DBG_CC_RP_GD
,
839 MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS
,
840 MLX5_IB_DBG_CC_NP_CNP_DSCP
,
841 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE
,
842 MLX5_IB_DBG_CC_NP_CNP_PRIO
,
846 struct mlx5_ib_dbg_cc_params
{
848 struct mlx5_ib_dbg_param params
[MLX5_IB_DBG_CC_MAX
];
852 MLX5_MAX_DELAY_DROP_TIMEOUT_MS
= 100,
855 struct mlx5_ib_delay_drop
{
856 struct mlx5_ib_dev
*dev
;
857 struct work_struct delay_drop_work
;
858 /* serialize setting of delay drop */
864 struct dentry
*dir_debugfs
;
867 enum mlx5_ib_stages
{
869 MLX5_IB_STAGE_FLOW_DB
,
871 MLX5_IB_STAGE_NON_DEFAULT_CB
,
874 MLX5_IB_STAGE_DEVICE_RESOURCES
,
875 MLX5_IB_STAGE_DEVICE_NOTIFIER
,
877 MLX5_IB_STAGE_COUNTERS
,
878 MLX5_IB_STAGE_CONG_DEBUGFS
,
881 MLX5_IB_STAGE_PRE_IB_REG_UMR
,
882 MLX5_IB_STAGE_WHITELIST_UID
,
883 MLX5_IB_STAGE_IB_REG
,
884 MLX5_IB_STAGE_POST_IB_REG_UMR
,
885 MLX5_IB_STAGE_DELAY_DROP
,
886 MLX5_IB_STAGE_CLASS_ATTR
,
890 struct mlx5_ib_stage
{
891 int (*init
)(struct mlx5_ib_dev
*dev
);
892 void (*cleanup
)(struct mlx5_ib_dev
*dev
);
895 #define STAGE_CREATE(_stage, _init, _cleanup) \
896 .stage[_stage] = {.init = _init, .cleanup = _cleanup}
898 struct mlx5_ib_profile
{
899 struct mlx5_ib_stage stage
[MLX5_IB_STAGE_MAX
];
902 struct mlx5_ib_multiport_info
{
903 struct list_head list
;
904 struct mlx5_ib_dev
*ibdev
;
905 struct mlx5_core_dev
*mdev
;
906 struct notifier_block mdev_events
;
907 struct completion unref_comp
;
914 struct mlx5_ib_flow_action
{
915 struct ib_flow_action ib_action
;
919 struct mlx5_accel_esp_xfrm
*ctx
;
922 struct mlx5_ib_dev
*dev
;
925 struct mlx5_modify_hdr
*modify_hdr
;
926 struct mlx5_pkt_reformat
*pkt_reformat
;
933 struct mlx5_core_dev
*dev
;
934 /* This lock is used to protect the access to the shared
935 * allocation map when concurrent requests by different
936 * processes are handled.
939 DECLARE_BITMAP(memic_alloc_pages
, MLX5_MAX_MEMIC_PAGES
);
942 struct mlx5_read_counters_attr
{
943 struct mlx5_fc
*hw_cntrs_hndl
;
948 enum mlx5_ib_counters_type
{
949 MLX5_IB_COUNTERS_FLOW
,
952 struct mlx5_ib_mcounters
{
953 struct ib_counters ibcntrs
;
954 enum mlx5_ib_counters_type type
;
955 /* number of counters supported for this counters type */
957 struct mlx5_fc
*hw_cntrs_hndl
;
958 /* read function for this counters type */
959 int (*read_counters
)(struct ib_device
*ibdev
,
960 struct mlx5_read_counters_attr
*read_attr
);
961 /* max index set as part of create_flow */
963 /* number of counters data entries (<description,index> pair) */
965 /* counters data array for descriptions and indexes */
966 struct mlx5_ib_flow_counters_desc
*counters_data
;
967 /* protects access to mcounters internal data */
968 struct mutex mcntrs_mutex
;
971 static inline struct mlx5_ib_mcounters
*
972 to_mcounters(struct ib_counters
*ibcntrs
)
974 return container_of(ibcntrs
, struct mlx5_ib_mcounters
, ibcntrs
);
977 int parse_flow_flow_action(struct mlx5_ib_flow_action
*maction
,
979 struct mlx5_flow_act
*action
);
980 struct mlx5_ib_lb_state
{
981 /* protect the user_td */
988 struct mlx5_ib_pf_eq
{
989 struct notifier_block irq_nb
;
990 struct mlx5_ib_dev
*dev
;
991 struct mlx5_eq
*core
;
992 struct work_struct work
;
993 spinlock_t lock
; /* Pagefaults spinlock */
994 struct workqueue_struct
*wq
;
998 struct mlx5_devx_event_table
{
999 struct mlx5_nb devx_nb
;
1000 /* serialize updating the event_xa */
1001 struct mutex event_xa_lock
;
1002 struct xarray event_xa
;
1005 struct mlx5_var_table
{
1006 /* serialize updating the bitmap */
1007 struct mutex bitmap_lock
;
1008 unsigned long *bitmap
;
1011 u64 num_var_hw_entries
;
1014 struct mlx5_ib_dev
{
1015 struct ib_device ib_dev
;
1016 struct mlx5_core_dev
*mdev
;
1017 struct notifier_block mdev_events
;
1019 /* serialize update of capability mask
1021 struct mutex cap_mask_mutex
;
1027 struct umr_common umrc
;
1028 /* sync used page count stats
1030 struct mlx5_ib_resources devr
;
1033 struct mlx5_mr_cache cache
;
1034 struct timer_list delay_timer
;
1035 /* Prevents soft lock on massive reg MRs */
1036 struct mutex slow_path_mutex
;
1037 struct ib_odp_caps odp_caps
;
1039 struct mlx5_ib_pf_eq odp_pf_eq
;
1042 * Sleepable RCU that prevents destruction of MRs while they are still
1043 * being used by a page fault handler.
1045 struct srcu_struct odp_srcu
;
1046 struct xarray odp_mkeys
;
1049 struct mlx5_ib_flow_db
*flow_db
;
1050 /* protect resources needed as part of reset flow */
1051 spinlock_t reset_flow_resource_lock
;
1052 struct list_head qp_list
;
1053 /* Array with num_ports elements */
1054 struct mlx5_ib_port
*port
;
1055 struct mlx5_sq_bfreg bfreg
;
1056 struct mlx5_sq_bfreg wc_bfreg
;
1057 struct mlx5_sq_bfreg fp_bfreg
;
1058 struct mlx5_ib_delay_drop delay_drop
;
1059 const struct mlx5_ib_profile
*profile
;
1061 struct mlx5_ib_lb_state lb
;
1063 struct list_head ib_dev_list
;
1066 u16 devx_whitelist_uid
;
1067 struct mlx5_srq_table srq_table
;
1068 struct mlx5_async_ctx async_ctx
;
1069 struct mlx5_devx_event_table devx_event_table
;
1070 struct mlx5_var_table var_table
;
1072 struct xarray sig_mrs
;
1075 static inline struct mlx5_ib_cq
*to_mibcq(struct mlx5_core_cq
*mcq
)
1077 return container_of(mcq
, struct mlx5_ib_cq
, mcq
);
1080 static inline struct mlx5_ib_xrcd
*to_mxrcd(struct ib_xrcd
*ibxrcd
)
1082 return container_of(ibxrcd
, struct mlx5_ib_xrcd
, ibxrcd
);
1085 static inline struct mlx5_ib_dev
*to_mdev(struct ib_device
*ibdev
)
1087 return container_of(ibdev
, struct mlx5_ib_dev
, ib_dev
);
1090 static inline struct mlx5_ib_dev
*mlx5_udata_to_mdev(struct ib_udata
*udata
)
1092 struct mlx5_ib_ucontext
*context
= rdma_udata_to_drv_context(
1093 udata
, struct mlx5_ib_ucontext
, ibucontext
);
1095 return to_mdev(context
->ibucontext
.device
);
1098 static inline struct mlx5_ib_cq
*to_mcq(struct ib_cq
*ibcq
)
1100 return container_of(ibcq
, struct mlx5_ib_cq
, ibcq
);
1103 static inline struct mlx5_ib_qp
*to_mibqp(struct mlx5_core_qp
*mqp
)
1105 return container_of(mqp
, struct mlx5_ib_qp_base
, mqp
)->container_mibqp
;
1108 static inline struct mlx5_ib_rwq
*to_mibrwq(struct mlx5_core_qp
*core_qp
)
1110 return container_of(core_qp
, struct mlx5_ib_rwq
, core_qp
);
1113 static inline struct mlx5_ib_mr
*to_mibmr(struct mlx5_core_mkey
*mmkey
)
1115 return container_of(mmkey
, struct mlx5_ib_mr
, mmkey
);
1118 static inline struct mlx5_ib_pd
*to_mpd(struct ib_pd
*ibpd
)
1120 return container_of(ibpd
, struct mlx5_ib_pd
, ibpd
);
1123 static inline struct mlx5_ib_srq
*to_msrq(struct ib_srq
*ibsrq
)
1125 return container_of(ibsrq
, struct mlx5_ib_srq
, ibsrq
);
1128 static inline struct mlx5_ib_qp
*to_mqp(struct ib_qp
*ibqp
)
1130 return container_of(ibqp
, struct mlx5_ib_qp
, ibqp
);
1133 static inline struct mlx5_ib_rwq
*to_mrwq(struct ib_wq
*ibwq
)
1135 return container_of(ibwq
, struct mlx5_ib_rwq
, ibwq
);
1138 static inline struct mlx5_ib_rwq_ind_table
*to_mrwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
1140 return container_of(ib_rwq_ind_tbl
, struct mlx5_ib_rwq_ind_table
, ib_rwq_ind_tbl
);
1143 static inline struct mlx5_ib_srq
*to_mibsrq(struct mlx5_core_srq
*msrq
)
1145 return container_of(msrq
, struct mlx5_ib_srq
, msrq
);
1148 static inline struct mlx5_ib_dm
*to_mdm(struct ib_dm
*ibdm
)
1150 return container_of(ibdm
, struct mlx5_ib_dm
, ibdm
);
1153 static inline struct mlx5_ib_mr
*to_mmr(struct ib_mr
*ibmr
)
1155 return container_of(ibmr
, struct mlx5_ib_mr
, ibmr
);
1158 static inline struct mlx5_ib_mw
*to_mmw(struct ib_mw
*ibmw
)
1160 return container_of(ibmw
, struct mlx5_ib_mw
, ibmw
);
1163 static inline struct mlx5_ib_flow_action
*
1164 to_mflow_act(struct ib_flow_action
*ibact
)
1166 return container_of(ibact
, struct mlx5_ib_flow_action
, ib_action
);
1169 static inline struct mlx5_user_mmap_entry
*
1170 to_mmmap(struct rdma_user_mmap_entry
*rdma_entry
)
1172 return container_of(rdma_entry
,
1173 struct mlx5_user_mmap_entry
, rdma_entry
);
1176 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext
*context
,
1177 struct ib_udata
*udata
, unsigned long virt
,
1178 struct mlx5_db
*db
);
1179 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext
*context
, struct mlx5_db
*db
);
1180 void __mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
);
1181 void mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
);
1182 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq
*srq
, int wqe_index
);
1183 int mlx5_ib_create_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
, u32 flags
,
1184 struct ib_udata
*udata
);
1185 int mlx5_ib_query_ah(struct ib_ah
*ibah
, struct rdma_ah_attr
*ah_attr
);
1186 void mlx5_ib_destroy_ah(struct ib_ah
*ah
, u32 flags
);
1187 int mlx5_ib_create_srq(struct ib_srq
*srq
, struct ib_srq_init_attr
*init_attr
,
1188 struct ib_udata
*udata
);
1189 int mlx5_ib_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
1190 enum ib_srq_attr_mask attr_mask
, struct ib_udata
*udata
);
1191 int mlx5_ib_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*srq_attr
);
1192 void mlx5_ib_destroy_srq(struct ib_srq
*srq
, struct ib_udata
*udata
);
1193 int mlx5_ib_post_srq_recv(struct ib_srq
*ibsrq
, const struct ib_recv_wr
*wr
,
1194 const struct ib_recv_wr
**bad_wr
);
1195 int mlx5_ib_enable_lb(struct mlx5_ib_dev
*dev
, bool td
, bool qp
);
1196 void mlx5_ib_disable_lb(struct mlx5_ib_dev
*dev
, bool td
, bool qp
);
1197 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
1198 struct ib_qp_init_attr
*init_attr
,
1199 struct ib_udata
*udata
);
1200 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1201 int attr_mask
, struct ib_udata
*udata
);
1202 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
1203 struct ib_qp_init_attr
*qp_init_attr
);
1204 int mlx5_ib_destroy_qp(struct ib_qp
*qp
, struct ib_udata
*udata
);
1205 void mlx5_ib_drain_sq(struct ib_qp
*qp
);
1206 void mlx5_ib_drain_rq(struct ib_qp
*qp
);
1207 int mlx5_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
1208 const struct ib_send_wr
**bad_wr
);
1209 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
1210 const struct ib_recv_wr
**bad_wr
);
1211 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp
*qp
, int wqe_index
, void *buffer
,
1212 size_t buflen
, size_t *bc
);
1213 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp
*qp
, int wqe_index
, void *buffer
,
1214 size_t buflen
, size_t *bc
);
1215 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq
*srq
, int wqe_index
, void *buffer
,
1216 size_t buflen
, size_t *bc
);
1217 int mlx5_ib_create_cq(struct ib_cq
*ibcq
, const struct ib_cq_init_attr
*attr
,
1218 struct ib_udata
*udata
);
1219 void mlx5_ib_destroy_cq(struct ib_cq
*cq
, struct ib_udata
*udata
);
1220 int mlx5_ib_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
);
1221 int mlx5_ib_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
);
1222 int mlx5_ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
);
1223 int mlx5_ib_resize_cq(struct ib_cq
*ibcq
, int entries
, struct ib_udata
*udata
);
1224 struct ib_mr
*mlx5_ib_get_dma_mr(struct ib_pd
*pd
, int acc
);
1225 struct ib_mr
*mlx5_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
1226 u64 virt_addr
, int access_flags
,
1227 struct ib_udata
*udata
);
1228 int mlx5_ib_advise_mr(struct ib_pd
*pd
,
1229 enum ib_uverbs_advise_mr_advice advice
,
1231 struct ib_sge
*sg_list
,
1233 struct uverbs_attr_bundle
*attrs
);
1234 struct ib_mw
*mlx5_ib_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
,
1235 struct ib_udata
*udata
);
1236 int mlx5_ib_dealloc_mw(struct ib_mw
*mw
);
1237 int mlx5_ib_update_xlt(struct mlx5_ib_mr
*mr
, u64 idx
, int npages
,
1238 int page_shift
, int flags
);
1239 struct mlx5_ib_mr
*mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd
*pd
,
1240 struct ib_udata
*udata
,
1242 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr
*mr
);
1243 void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr
*mr
);
1244 int mlx5_ib_rereg_user_mr(struct ib_mr
*ib_mr
, int flags
, u64 start
,
1245 u64 length
, u64 virt_addr
, int access_flags
,
1246 struct ib_pd
*pd
, struct ib_udata
*udata
);
1247 int mlx5_ib_dereg_mr(struct ib_mr
*ibmr
, struct ib_udata
*udata
);
1248 struct ib_mr
*mlx5_ib_alloc_mr(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
1249 u32 max_num_sg
, struct ib_udata
*udata
);
1250 struct ib_mr
*mlx5_ib_alloc_mr_integrity(struct ib_pd
*pd
,
1252 u32 max_num_meta_sg
);
1253 int mlx5_ib_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
1254 unsigned int *sg_offset
);
1255 int mlx5_ib_map_mr_sg_pi(struct ib_mr
*ibmr
, struct scatterlist
*data_sg
,
1256 int data_sg_nents
, unsigned int *data_sg_offset
,
1257 struct scatterlist
*meta_sg
, int meta_sg_nents
,
1258 unsigned int *meta_sg_offset
);
1259 int mlx5_ib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port_num
,
1260 const struct ib_wc
*in_wc
, const struct ib_grh
*in_grh
,
1261 const struct ib_mad
*in
, struct ib_mad
*out
,
1262 size_t *out_mad_size
, u16
*out_mad_pkey_index
);
1263 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
1264 struct ib_udata
*udata
);
1265 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
);
1266 int mlx5_ib_get_buf_offset(u64 addr
, int page_shift
, u32
*offset
);
1267 int mlx5_query_ext_port_caps(struct mlx5_ib_dev
*dev
, u8 port
);
1268 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device
*ibdev
,
1269 struct ib_smp
*out_mad
);
1270 int mlx5_query_mad_ifc_system_image_guid(struct ib_device
*ibdev
,
1271 __be64
*sys_image_guid
);
1272 int mlx5_query_mad_ifc_max_pkeys(struct ib_device
*ibdev
,
1274 int mlx5_query_mad_ifc_vendor_id(struct ib_device
*ibdev
,
1276 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev
*dev
, char *node_desc
);
1277 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev
*dev
, __be64
*node_guid
);
1278 int mlx5_query_mad_ifc_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
1280 int mlx5_query_mad_ifc_gids(struct ib_device
*ibdev
, u8 port
, int index
,
1282 int mlx5_query_mad_ifc_port(struct ib_device
*ibdev
, u8 port
,
1283 struct ib_port_attr
*props
);
1284 int mlx5_ib_query_port(struct ib_device
*ibdev
, u8 port
,
1285 struct ib_port_attr
*props
);
1286 int mlx5_ib_init_fmr(struct mlx5_ib_dev
*dev
);
1287 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev
*dev
);
1288 void mlx5_ib_cont_pages(struct ib_umem
*umem
, u64 addr
,
1289 unsigned long max_page_shift
,
1290 int *count
, int *shift
,
1291 int *ncont
, int *order
);
1292 void __mlx5_ib_populate_pas(struct mlx5_ib_dev
*dev
, struct ib_umem
*umem
,
1293 int page_shift
, size_t offset
, size_t num_pages
,
1294 __be64
*pas
, int access_flags
);
1295 void mlx5_ib_populate_pas(struct mlx5_ib_dev
*dev
, struct ib_umem
*umem
,
1296 int page_shift
, __be64
*pas
, int access_flags
);
1297 void mlx5_ib_copy_pas(u64
*old
, u64
*new, int step
, int num
);
1298 int mlx5_ib_get_cqe_size(struct ib_cq
*ibcq
);
1299 int mlx5_mr_cache_init(struct mlx5_ib_dev
*dev
);
1300 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev
*dev
);
1302 struct mlx5_ib_mr
*mlx5_mr_cache_alloc(struct mlx5_ib_dev
*dev
,
1303 unsigned int entry
);
1304 void mlx5_mr_cache_free(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
);
1305 int mlx5_mr_cache_invalidate(struct mlx5_ib_mr
*mr
);
1307 int mlx5_ib_check_mr_status(struct ib_mr
*ibmr
, u32 check_mask
,
1308 struct ib_mr_status
*mr_status
);
1309 struct ib_wq
*mlx5_ib_create_wq(struct ib_pd
*pd
,
1310 struct ib_wq_init_attr
*init_attr
,
1311 struct ib_udata
*udata
);
1312 void mlx5_ib_destroy_wq(struct ib_wq
*wq
, struct ib_udata
*udata
);
1313 int mlx5_ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
1314 u32 wq_attr_mask
, struct ib_udata
*udata
);
1315 struct ib_rwq_ind_table
*mlx5_ib_create_rwq_ind_table(struct ib_device
*device
,
1316 struct ib_rwq_ind_table_init_attr
*init_attr
,
1317 struct ib_udata
*udata
);
1318 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*wq_ind_table
);
1319 struct ib_dm
*mlx5_ib_alloc_dm(struct ib_device
*ibdev
,
1320 struct ib_ucontext
*context
,
1321 struct ib_dm_alloc_attr
*attr
,
1322 struct uverbs_attr_bundle
*attrs
);
1323 int mlx5_ib_dealloc_dm(struct ib_dm
*ibdm
, struct uverbs_attr_bundle
*attrs
);
1324 struct ib_mr
*mlx5_ib_reg_dm_mr(struct ib_pd
*pd
, struct ib_dm
*dm
,
1325 struct ib_dm_mr_attr
*attr
,
1326 struct uverbs_attr_bundle
*attrs
);
1328 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1329 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
);
1330 int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
);
1331 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev
*ibdev
);
1332 int __init
mlx5_ib_odp_init(void);
1333 void mlx5_ib_odp_cleanup(void);
1334 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent
*ent
);
1335 void mlx5_odp_populate_xlt(void *xlt
, size_t idx
, size_t nentries
,
1336 struct mlx5_ib_mr
*mr
, int flags
);
1338 int mlx5_ib_advise_mr_prefetch(struct ib_pd
*pd
,
1339 enum ib_uverbs_advise_mr_advice advice
,
1340 u32 flags
, struct ib_sge
*sg_list
, u32 num_sge
);
1341 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1342 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
)
1347 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
) { return 0; }
1348 static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev
*ibdev
) {}
1349 static inline int mlx5_ib_odp_init(void) { return 0; }
1350 static inline void mlx5_ib_odp_cleanup(void) {}
1351 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent
*ent
) {}
1352 static inline void mlx5_odp_populate_xlt(void *xlt
, size_t idx
, size_t nentries
,
1353 struct mlx5_ib_mr
*mr
, int flags
) {}
1356 mlx5_ib_advise_mr_prefetch(struct ib_pd
*pd
,
1357 enum ib_uverbs_advise_mr_advice advice
, u32 flags
,
1358 struct ib_sge
*sg_list
, u32 num_sge
)
1362 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1364 extern const struct mmu_interval_notifier_ops mlx5_mn_ops
;
1366 /* Needed for rep profile */
1367 void __mlx5_ib_remove(struct mlx5_ib_dev
*dev
,
1368 const struct mlx5_ib_profile
*profile
,
1370 void *__mlx5_ib_add(struct mlx5_ib_dev
*dev
,
1371 const struct mlx5_ib_profile
*profile
);
1373 int mlx5_ib_get_vf_config(struct ib_device
*device
, int vf
,
1374 u8 port
, struct ifla_vf_info
*info
);
1375 int mlx5_ib_set_vf_link_state(struct ib_device
*device
, int vf
,
1376 u8 port
, int state
);
1377 int mlx5_ib_get_vf_stats(struct ib_device
*device
, int vf
,
1378 u8 port
, struct ifla_vf_stats
*stats
);
1379 int mlx5_ib_get_vf_guid(struct ib_device
*device
, int vf
, u8 port
,
1380 struct ifla_vf_guid
*node_guid
,
1381 struct ifla_vf_guid
*port_guid
);
1382 int mlx5_ib_set_vf_guid(struct ib_device
*device
, int vf
, u8 port
,
1383 u64 guid
, int type
);
1385 __be16
mlx5_get_roce_udp_sport(struct mlx5_ib_dev
*dev
,
1386 const struct ib_gid_attr
*attr
);
1388 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev
*dev
, u8 port_num
);
1389 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev
*dev
, u8 port_num
);
1391 /* GSI QP helper functions */
1392 struct ib_qp
*mlx5_ib_gsi_create_qp(struct ib_pd
*pd
,
1393 struct ib_qp_init_attr
*init_attr
);
1394 int mlx5_ib_gsi_destroy_qp(struct ib_qp
*qp
);
1395 int mlx5_ib_gsi_modify_qp(struct ib_qp
*qp
, struct ib_qp_attr
*attr
,
1397 int mlx5_ib_gsi_query_qp(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
1399 struct ib_qp_init_attr
*qp_init_attr
);
1400 int mlx5_ib_gsi_post_send(struct ib_qp
*qp
, const struct ib_send_wr
*wr
,
1401 const struct ib_send_wr
**bad_wr
);
1402 int mlx5_ib_gsi_post_recv(struct ib_qp
*qp
, const struct ib_recv_wr
*wr
,
1403 const struct ib_recv_wr
**bad_wr
);
1404 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp
*gsi
);
1406 int mlx5_ib_generate_wc(struct ib_cq
*ibcq
, struct ib_wc
*wc
);
1408 void mlx5_ib_free_bfreg(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
,
1410 struct mlx5_ib_dev
*mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info
*mpi
);
1411 struct mlx5_core_dev
*mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev
*dev
,
1413 u8
*native_port_num
);
1414 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev
*dev
,
1416 int mlx5_ib_fill_res_entry(struct sk_buff
*msg
,
1417 struct rdma_restrack_entry
*res
);
1418 int mlx5_ib_fill_stat_entry(struct sk_buff
*msg
,
1419 struct rdma_restrack_entry
*res
);
1421 extern const struct uapi_definition mlx5_ib_devx_defs
[];
1422 extern const struct uapi_definition mlx5_ib_flow_defs
[];
1423 extern const struct uapi_definition mlx5_ib_qos_defs
[];
1425 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
1426 int mlx5_ib_devx_create(struct mlx5_ib_dev
*dev
, bool is_user
);
1427 void mlx5_ib_devx_destroy(struct mlx5_ib_dev
*dev
, u16 uid
);
1428 void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev
*dev
);
1429 void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev
*dev
);
1430 struct mlx5_ib_flow_handler
*mlx5_ib_raw_fs_rule_add(
1431 struct mlx5_ib_dev
*dev
, struct mlx5_ib_flow_matcher
*fs_matcher
,
1432 struct mlx5_flow_context
*flow_context
,
1433 struct mlx5_flow_act
*flow_act
, u32 counter_id
,
1434 void *cmd_in
, int inlen
, int dest_id
, int dest_type
);
1435 bool mlx5_ib_devx_is_flow_dest(void *obj
, int *dest_id
, int *dest_type
);
1436 bool mlx5_ib_devx_is_flow_counter(void *obj
, u32 offset
, u32
*counter_id
);
1437 void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action
*maction
);
1440 mlx5_ib_devx_create(struct mlx5_ib_dev
*dev
,
1441 bool is_user
) { return -EOPNOTSUPP
; }
1442 static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev
*dev
, u16 uid
) {}
1443 static inline void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev
*dev
) {}
1444 static inline void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev
*dev
) {}
1445 static inline bool mlx5_ib_devx_is_flow_dest(void *obj
, int *dest_id
,
1451 mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action
*maction
)
1456 static inline void init_query_mad(struct ib_smp
*mad
)
1458 mad
->base_version
= 1;
1459 mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
1460 mad
->class_version
= 1;
1461 mad
->method
= IB_MGMT_METHOD_GET
;
1464 static inline u8
convert_access(int acc
)
1466 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
1467 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
1468 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
1469 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
1470 MLX5_PERM_LOCAL_READ
;
1473 static inline int is_qp1(enum ib_qp_type qp_type
)
1475 return qp_type
== MLX5_IB_QPT_HW_GSI
;
1478 #define MLX5_MAX_UMR_SHIFT 16
1479 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1481 static inline u32
check_cq_create_flags(u32 flags
)
1484 * It returns non-zero value for unsupported CQ
1485 * create flags, otherwise it returns zero.
1487 return (flags
& ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN
|
1488 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
));
1491 static inline int verify_assign_uidx(u8 cqe_version
, u32 cmd_uidx
,
1495 if ((cmd_uidx
== MLX5_IB_DEFAULT_UIDX
) ||
1496 (cmd_uidx
& ~MLX5_USER_ASSIGNED_UIDX_MASK
))
1498 *user_index
= cmd_uidx
;
1500 *user_index
= MLX5_IB_DEFAULT_UIDX
;
1506 static inline int get_qp_user_index(struct mlx5_ib_ucontext
*ucontext
,
1507 struct mlx5_ib_create_qp
*ucmd
,
1511 u8 cqe_version
= ucontext
->cqe_version
;
1513 if ((offsetofend(typeof(*ucmd
), uidx
) <= inlen
) && !cqe_version
&&
1514 (ucmd
->uidx
== MLX5_IB_DEFAULT_UIDX
))
1517 if ((offsetofend(typeof(*ucmd
), uidx
) <= inlen
) != !!cqe_version
)
1520 return verify_assign_uidx(cqe_version
, ucmd
->uidx
, user_index
);
1523 static inline int get_srq_user_index(struct mlx5_ib_ucontext
*ucontext
,
1524 struct mlx5_ib_create_srq
*ucmd
,
1528 u8 cqe_version
= ucontext
->cqe_version
;
1530 if ((offsetofend(typeof(*ucmd
), uidx
) <= inlen
) && !cqe_version
&&
1531 (ucmd
->uidx
== MLX5_IB_DEFAULT_UIDX
))
1534 if ((offsetofend(typeof(*ucmd
), uidx
) <= inlen
) != !!cqe_version
)
1537 return verify_assign_uidx(cqe_version
, ucmd
->uidx
, user_index
);
1540 static inline int get_uars_per_sys_page(struct mlx5_ib_dev
*dev
, bool lib_support
)
1542 return lib_support
&& MLX5_CAP_GEN(dev
->mdev
, uar_4k
) ?
1543 MLX5_UARS_IN_PAGE
: 1;
1546 static inline int get_num_static_uars(struct mlx5_ib_dev
*dev
,
1547 struct mlx5_bfreg_info
*bfregi
)
1549 return get_uars_per_sys_page(dev
, bfregi
->lib_uar_4k
) * bfregi
->num_static_sys_pages
;
1552 unsigned long mlx5_ib_get_xlt_emergency_page(void);
1553 void mlx5_ib_put_xlt_emergency_page(void);
1555 int bfregn_to_uar_index(struct mlx5_ib_dev
*dev
,
1556 struct mlx5_bfreg_info
*bfregi
, u32 bfregn
,
1559 int mlx5_ib_qp_set_counter(struct ib_qp
*qp
, struct rdma_counter
*counter
);
1560 u16
mlx5_ib_get_counters_id(struct mlx5_ib_dev
*dev
, u8 port_num
);
1562 static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev
*dev
,
1563 bool do_modify_atomic
, int access_flags
)
1565 if (MLX5_CAP_GEN(dev
->mdev
, umr_modify_entity_size_disabled
))
1568 if (do_modify_atomic
&&
1569 MLX5_CAP_GEN(dev
->mdev
, atomic
) &&
1570 MLX5_CAP_GEN(dev
->mdev
, umr_modify_atomic_disabled
))
1573 if (access_flags
& IB_ACCESS_RELAXED_ORDERING
&&
1574 (MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_write
) ||
1575 MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_read
)))
1581 int mlx5_ib_enable_driver(struct ib_device
*dev
);
1582 int mlx5_ib_test_wc(struct mlx5_ib_dev
*dev
);
1583 #endif /* MLX5_IB_H */