1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
5 * Copyright (c) 2004, 2020 Intel Corporation. All rights reserved.
6 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
7 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
8 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
9 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
15 #include <linux/ethtool.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/kref.h>
20 #include <linux/list.h>
21 #include <linux/rwsem.h>
22 #include <linux/workqueue.h>
23 #include <linux/irq_poll.h>
24 #include <uapi/linux/if_ether.h>
27 #include <linux/string.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/refcount.h>
31 #include <linux/if_link.h>
32 #include <linux/atomic.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/uaccess.h>
35 #include <linux/cgroup_rdma.h>
36 #include <linux/irqflags.h>
37 #include <linux/preempt.h>
38 #include <linux/dim.h>
39 #include <uapi/rdma/ib_user_verbs.h>
40 #include <rdma/rdma_counter.h>
41 #include <rdma/restrack.h>
42 #include <rdma/signature.h>
43 #include <uapi/rdma/rdma_user_ioctl.h>
44 #include <uapi/rdma/ib_user_ioctl_verbs.h>
46 #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
50 struct ib_usrq_object
;
54 struct hw_stats_device_data
;
56 extern struct workqueue_struct
*ib_wq
;
57 extern struct workqueue_struct
*ib_comp_wq
;
58 extern struct workqueue_struct
*ib_comp_unbound_wq
;
63 void ibdev_emerg(const struct ib_device
*ibdev
, const char *format
, ...);
65 void ibdev_alert(const struct ib_device
*ibdev
, const char *format
, ...);
67 void ibdev_crit(const struct ib_device
*ibdev
, const char *format
, ...);
69 void ibdev_err(const struct ib_device
*ibdev
, const char *format
, ...);
71 void ibdev_warn(const struct ib_device
*ibdev
, const char *format
, ...);
73 void ibdev_notice(const struct ib_device
*ibdev
, const char *format
, ...);
75 void ibdev_info(const struct ib_device
*ibdev
, const char *format
, ...);
77 #if defined(CONFIG_DYNAMIC_DEBUG) || \
78 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
79 #define ibdev_dbg(__dev, format, args...) \
80 dynamic_ibdev_dbg(__dev, format, ##args)
84 void ibdev_dbg(const struct ib_device
*ibdev
, const char *format
, ...) {}
87 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
89 static DEFINE_RATELIMIT_STATE(_rs, \
90 DEFAULT_RATELIMIT_INTERVAL, \
91 DEFAULT_RATELIMIT_BURST); \
92 if (__ratelimit(&_rs)) \
93 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
96 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
97 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
98 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \
99 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
100 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \
101 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
102 #define ibdev_err_ratelimited(ibdev, fmt, ...) \
103 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
104 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \
105 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
106 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \
107 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
108 #define ibdev_info_ratelimited(ibdev, fmt, ...) \
109 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
111 #if defined(CONFIG_DYNAMIC_DEBUG) || \
112 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
113 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
114 #define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
116 static DEFINE_RATELIMIT_STATE(_rs, \
117 DEFAULT_RATELIMIT_INTERVAL, \
118 DEFAULT_RATELIMIT_BURST); \
119 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
120 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
121 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
125 __printf(2, 3) __cold
127 void ibdev_dbg_ratelimited(const struct ib_device
*ibdev
, const char *format
, ...) {}
133 __be64 subnet_prefix
;
138 extern union ib_gid zgid
;
141 IB_GID_TYPE_IB
= IB_UVERBS_GID_TYPE_IB
,
142 IB_GID_TYPE_ROCE
= IB_UVERBS_GID_TYPE_ROCE_V1
,
143 IB_GID_TYPE_ROCE_UDP_ENCAP
= IB_UVERBS_GID_TYPE_ROCE_V2
,
147 #define ROCE_V2_UDP_DPORT 4791
149 struct net_device __rcu
*ndev
;
150 struct ib_device
*device
;
152 enum ib_gid_type gid_type
;
158 /* set the local administered indication */
159 IB_SA_WELL_KNOWN_GUID
= BIT_ULL(57) | 2,
162 enum rdma_transport_type
{
164 RDMA_TRANSPORT_IWARP
,
165 RDMA_TRANSPORT_USNIC
,
166 RDMA_TRANSPORT_USNIC_UDP
,
167 RDMA_TRANSPORT_UNSPECIFIED
,
170 enum rdma_protocol_type
{
174 RDMA_PROTOCOL_USNIC_UDP
177 __attribute_const__
enum rdma_transport_type
178 rdma_node_get_transport(unsigned int node_type
);
180 enum rdma_network_type
{
182 RDMA_NETWORK_ROCE_V1
,
187 static inline enum ib_gid_type
ib_network_to_gid_type(enum rdma_network_type network_type
)
189 if (network_type
== RDMA_NETWORK_IPV4
||
190 network_type
== RDMA_NETWORK_IPV6
)
191 return IB_GID_TYPE_ROCE_UDP_ENCAP
;
192 else if (network_type
== RDMA_NETWORK_ROCE_V1
)
193 return IB_GID_TYPE_ROCE
;
195 return IB_GID_TYPE_IB
;
198 static inline enum rdma_network_type
199 rdma_gid_attr_network_type(const struct ib_gid_attr
*attr
)
201 if (attr
->gid_type
== IB_GID_TYPE_IB
)
202 return RDMA_NETWORK_IB
;
204 if (attr
->gid_type
== IB_GID_TYPE_ROCE
)
205 return RDMA_NETWORK_ROCE_V1
;
207 if (ipv6_addr_v4mapped((struct in6_addr
*)&attr
->gid
))
208 return RDMA_NETWORK_IPV4
;
210 return RDMA_NETWORK_IPV6
;
213 enum rdma_link_layer
{
214 IB_LINK_LAYER_UNSPECIFIED
,
215 IB_LINK_LAYER_INFINIBAND
,
216 IB_LINK_LAYER_ETHERNET
,
219 enum ib_device_cap_flags
{
220 IB_DEVICE_RESIZE_MAX_WR
= IB_UVERBS_DEVICE_RESIZE_MAX_WR
,
221 IB_DEVICE_BAD_PKEY_CNTR
= IB_UVERBS_DEVICE_BAD_PKEY_CNTR
,
222 IB_DEVICE_BAD_QKEY_CNTR
= IB_UVERBS_DEVICE_BAD_QKEY_CNTR
,
223 IB_DEVICE_RAW_MULTI
= IB_UVERBS_DEVICE_RAW_MULTI
,
224 IB_DEVICE_AUTO_PATH_MIG
= IB_UVERBS_DEVICE_AUTO_PATH_MIG
,
225 IB_DEVICE_CHANGE_PHY_PORT
= IB_UVERBS_DEVICE_CHANGE_PHY_PORT
,
226 IB_DEVICE_UD_AV_PORT_ENFORCE
= IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE
,
227 IB_DEVICE_CURR_QP_STATE_MOD
= IB_UVERBS_DEVICE_CURR_QP_STATE_MOD
,
228 IB_DEVICE_SHUTDOWN_PORT
= IB_UVERBS_DEVICE_SHUTDOWN_PORT
,
229 /* IB_DEVICE_INIT_TYPE = IB_UVERBS_DEVICE_INIT_TYPE, (not in use) */
230 IB_DEVICE_PORT_ACTIVE_EVENT
= IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT
,
231 IB_DEVICE_SYS_IMAGE_GUID
= IB_UVERBS_DEVICE_SYS_IMAGE_GUID
,
232 IB_DEVICE_RC_RNR_NAK_GEN
= IB_UVERBS_DEVICE_RC_RNR_NAK_GEN
,
233 IB_DEVICE_SRQ_RESIZE
= IB_UVERBS_DEVICE_SRQ_RESIZE
,
234 IB_DEVICE_N_NOTIFY_CQ
= IB_UVERBS_DEVICE_N_NOTIFY_CQ
,
236 /* Reserved, old SEND_W_INV = 1 << 16,*/
237 IB_DEVICE_MEM_WINDOW
= IB_UVERBS_DEVICE_MEM_WINDOW
,
239 * Devices should set IB_DEVICE_UD_IP_SUM if they support
240 * insertion of UDP and TCP checksum on outgoing UD IPoIB
241 * messages and can verify the validity of checksum for
242 * incoming messages. Setting this flag implies that the
243 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
245 IB_DEVICE_UD_IP_CSUM
= IB_UVERBS_DEVICE_UD_IP_CSUM
,
246 IB_DEVICE_XRC
= IB_UVERBS_DEVICE_XRC
,
249 * This device supports the IB "base memory management extension",
250 * which includes support for fast registrations (IB_WR_REG_MR,
251 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
252 * also be set by any iWarp device which must support FRs to comply
253 * to the iWarp verbs spec. iWarp devices also support the
254 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
257 IB_DEVICE_MEM_MGT_EXTENSIONS
= IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS
,
258 IB_DEVICE_MEM_WINDOW_TYPE_2A
= IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A
,
259 IB_DEVICE_MEM_WINDOW_TYPE_2B
= IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B
,
260 IB_DEVICE_RC_IP_CSUM
= IB_UVERBS_DEVICE_RC_IP_CSUM
,
261 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
262 IB_DEVICE_RAW_IP_CSUM
= IB_UVERBS_DEVICE_RAW_IP_CSUM
,
263 IB_DEVICE_MANAGED_FLOW_STEERING
=
264 IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING
,
265 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
266 IB_DEVICE_RAW_SCATTER_FCS
= IB_UVERBS_DEVICE_RAW_SCATTER_FCS
,
267 /* The device supports padding incoming writes to cacheline. */
268 IB_DEVICE_PCI_WRITE_END_PADDING
=
269 IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING
,
270 /* Placement type attributes */
271 IB_DEVICE_FLUSH_GLOBAL
= IB_UVERBS_DEVICE_FLUSH_GLOBAL
,
272 IB_DEVICE_FLUSH_PERSISTENT
= IB_UVERBS_DEVICE_FLUSH_PERSISTENT
,
273 IB_DEVICE_ATOMIC_WRITE
= IB_UVERBS_DEVICE_ATOMIC_WRITE
,
276 enum ib_kernel_cap_flags
{
278 * This device supports a per-device lkey or stag that can be
279 * used without performing a memory registration for the local
280 * memory. Note that ULPs should never check this flag, but
281 * instead of use the local_dma_lkey flag in the ib_pd structure,
282 * which will always contain a usable lkey.
284 IBK_LOCAL_DMA_LKEY
= 1 << 0,
285 /* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
286 IBK_INTEGRITY_HANDOVER
= 1 << 1,
287 /* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
288 IBK_ON_DEMAND_PAGING
= 1 << 2,
289 /* IB_MR_TYPE_SG_GAPS is supported */
290 IBK_SG_GAPS_REG
= 1 << 3,
291 /* Driver supports RDMA_NLDEV_CMD_DELLINK */
292 IBK_ALLOW_USER_UNREG
= 1 << 4,
294 /* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
295 IBK_BLOCK_MULTICAST_LOOPBACK
= 1 << 5,
296 /* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
298 /* iopib will use the device ops:
305 IBK_VIRTUAL_FUNCTION
= 1 << 7,
306 /* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
307 IBK_RDMA_NETDEV_OPA
= 1 << 8,
316 enum ib_odp_general_cap_bits
{
317 IB_ODP_SUPPORT
= 1 << 0,
318 IB_ODP_SUPPORT_IMPLICIT
= 1 << 1,
321 enum ib_odp_transport_cap_bits
{
322 IB_ODP_SUPPORT_SEND
= 1 << 0,
323 IB_ODP_SUPPORT_RECV
= 1 << 1,
324 IB_ODP_SUPPORT_WRITE
= 1 << 2,
325 IB_ODP_SUPPORT_READ
= 1 << 3,
326 IB_ODP_SUPPORT_ATOMIC
= 1 << 4,
327 IB_ODP_SUPPORT_SRQ_RECV
= 1 << 5,
331 uint64_t general_caps
;
333 uint32_t rc_odp_caps
;
334 uint32_t uc_odp_caps
;
335 uint32_t ud_odp_caps
;
336 uint32_t xrc_odp_caps
;
337 } per_transport_caps
;
341 /* Corresponding bit will be set if qp type from
342 * 'enum ib_qp_type' is supported, e.g.
343 * supported_qpts |= 1 << IB_QPT_UD
346 u32 max_rwq_indirection_tables
;
347 u32 max_rwq_indirection_table_size
;
350 enum ib_tm_cap_flags
{
351 /* Support tag matching with rendezvous offload for RC transport */
352 IB_TM_CAP_RNDV_RC
= 1 << 0,
356 /* Max size of RNDV header */
357 u32 max_rndv_hdr_size
;
358 /* Max number of entries in tag matching list */
360 /* From enum ib_tm_cap_flags */
362 /* Max number of outstanding list operations */
364 /* Max number of SGE in tag matching entry */
368 struct ib_cq_init_attr
{
374 enum ib_cq_attr_mask
{
375 IB_CQ_MODERATE
= 1 << 0,
379 u16 max_cq_moderation_count
;
380 u16 max_cq_moderation_period
;
383 struct ib_dm_mr_attr
{
389 struct ib_dm_alloc_attr
{
395 struct ib_device_attr
{
397 __be64 sys_image_guid
;
405 u64 device_cap_flags
;
406 u64 kernel_cap_flags
;
417 int max_qp_init_rd_atom
;
418 int max_ee_init_rd_atom
;
419 enum ib_atomic_cap atomic_cap
;
420 enum ib_atomic_cap masked_atomic_cap
;
427 int max_mcast_qp_attach
;
428 int max_total_mcast_qp_attach
;
433 unsigned int max_fast_reg_page_list_len
;
434 unsigned int max_pi_fast_reg_page_list_len
;
436 u8 local_ca_ack_delay
;
439 struct ib_odp_caps odp_caps
;
440 uint64_t timestamp_mask
;
441 uint64_t hca_core_clock
; /* in KHZ */
442 struct ib_rss_caps rss_caps
;
444 u32 raw_packet_caps
; /* Use ib_raw_packet_caps enum */
445 struct ib_tm_caps tm_caps
;
446 struct ib_cq_caps cq_caps
;
448 /* Max entries for sgl for optimized performance per READ */
465 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu
)
468 case IB_MTU_256
: return 256;
469 case IB_MTU_512
: return 512;
470 case IB_MTU_1024
: return 1024;
471 case IB_MTU_2048
: return 2048;
472 case IB_MTU_4096
: return 4096;
477 static inline enum ib_mtu
ib_mtu_int_to_enum(int mtu
)
481 else if (mtu
>= 2048)
483 else if (mtu
>= 1024)
491 static inline int opa_mtu_enum_to_int(enum opa_mtu mtu
)
499 return(ib_mtu_enum_to_int((enum ib_mtu
)mtu
));
503 static inline enum opa_mtu
opa_mtu_int_to_enum(int mtu
)
506 return OPA_MTU_10240
;
507 else if (mtu
>= 8192)
510 return ((enum opa_mtu
)ib_mtu_int_to_enum(mtu
));
519 IB_PORT_ACTIVE_DEFER
= 5
522 enum ib_port_phys_state
{
523 IB_PORT_PHYS_STATE_SLEEP
= 1,
524 IB_PORT_PHYS_STATE_POLLING
= 2,
525 IB_PORT_PHYS_STATE_DISABLED
= 3,
526 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING
= 4,
527 IB_PORT_PHYS_STATE_LINK_UP
= 5,
528 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY
= 6,
529 IB_PORT_PHYS_STATE_PHY_TEST
= 7,
540 static inline int ib_width_enum_to_int(enum ib_port_width width
)
543 case IB_WIDTH_1X
: return 1;
544 case IB_WIDTH_2X
: return 2;
545 case IB_WIDTH_4X
: return 4;
546 case IB_WIDTH_8X
: return 8;
547 case IB_WIDTH_12X
: return 12;
565 IB_STAT_FLAG_OPTIONAL
= 1 << 0,
569 * struct rdma_stat_desc
570 * @name - The name of the counter
571 * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
572 * @priv - Driver private information; Core code should not use
574 struct rdma_stat_desc
{
581 * struct rdma_hw_stats
582 * @lock - Mutex to protect parallel write access to lifespan and values
583 * of counters, which are 64bits and not guaranteed to be written
584 * atomicaly on 32bits systems.
585 * @timestamp - Used by the core code to track when the last update was
586 * @lifespan - Used by the core code to determine how old the counters
587 * should be before being updated again. Stored in jiffies, defaults
588 * to 10 milliseconds, drivers can override the default be specifying
589 * their own value during their allocation routine.
590 * @descs - Array of pointers to static descriptors used for the counters
592 * @is_disabled - A bitmap to indicate each counter is currently disabled
594 * @num_counters - How many hardware counters there are. If name is
595 * shorter than this number, a kernel oops will result. Driver authors
596 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
597 * in their code to prevent this.
598 * @value - Array of u64 counters that are accessed by the sysfs code and
599 * filled in by the drivers get_stats routine
601 struct rdma_hw_stats
{
602 struct mutex lock
; /* Protect lifespan and values[] */
603 unsigned long timestamp
;
604 unsigned long lifespan
;
605 const struct rdma_stat_desc
*descs
;
606 unsigned long *is_disabled
;
608 u64 value
[] __counted_by(num_counters
);
611 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
613 struct rdma_hw_stats
*rdma_alloc_hw_stats_struct(
614 const struct rdma_stat_desc
*descs
, int num_counters
,
615 unsigned long lifespan
);
617 void rdma_free_hw_stats_struct(struct rdma_hw_stats
*stats
);
619 /* Define bits for the various functionality this port needs to be supported by
622 /* Management 0x00000FFF */
623 #define RDMA_CORE_CAP_IB_MAD 0x00000001
624 #define RDMA_CORE_CAP_IB_SMI 0x00000002
625 #define RDMA_CORE_CAP_IB_CM 0x00000004
626 #define RDMA_CORE_CAP_IW_CM 0x00000008
627 #define RDMA_CORE_CAP_IB_SA 0x00000010
628 #define RDMA_CORE_CAP_OPA_MAD 0x00000020
630 /* Address format 0x000FF000 */
631 #define RDMA_CORE_CAP_AF_IB 0x00001000
632 #define RDMA_CORE_CAP_ETH_AH 0x00002000
633 #define RDMA_CORE_CAP_OPA_AH 0x00004000
634 #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
636 /* Protocol 0xFFF00000 */
637 #define RDMA_CORE_CAP_PROT_IB 0x00100000
638 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000
639 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000
640 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
641 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
642 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000
644 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
645 | RDMA_CORE_CAP_PROT_ROCE \
646 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
648 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
649 | RDMA_CORE_CAP_IB_MAD \
650 | RDMA_CORE_CAP_IB_SMI \
651 | RDMA_CORE_CAP_IB_CM \
652 | RDMA_CORE_CAP_IB_SA \
653 | RDMA_CORE_CAP_AF_IB)
654 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
655 | RDMA_CORE_CAP_IB_MAD \
656 | RDMA_CORE_CAP_IB_CM \
657 | RDMA_CORE_CAP_AF_IB \
658 | RDMA_CORE_CAP_ETH_AH)
659 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
660 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
661 | RDMA_CORE_CAP_IB_MAD \
662 | RDMA_CORE_CAP_IB_CM \
663 | RDMA_CORE_CAP_AF_IB \
664 | RDMA_CORE_CAP_ETH_AH)
665 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
666 | RDMA_CORE_CAP_IW_CM)
667 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
668 | RDMA_CORE_CAP_OPA_MAD)
670 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
672 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
674 struct ib_port_attr
{
676 enum ib_port_state state
;
678 enum ib_mtu active_mtu
;
681 unsigned int ip_gids
:1;
682 /* This is the value from PortInfo CapabilityMask, defined by IBA */
701 enum ib_device_modify_flags
{
702 IB_DEVICE_MODIFY_SYS_IMAGE_GUID
= 1 << 0,
703 IB_DEVICE_MODIFY_NODE_DESC
= 1 << 1
706 #define IB_DEVICE_NODE_DESC_MAX 64
708 struct ib_device_modify
{
710 char node_desc
[IB_DEVICE_NODE_DESC_MAX
];
713 enum ib_port_modify_flags
{
714 IB_PORT_SHUTDOWN
= 1,
715 IB_PORT_INIT_TYPE
= (1<<2),
716 IB_PORT_RESET_QKEY_CNTR
= (1<<3),
717 IB_PORT_OPA_MASK_CHG
= (1<<4)
720 struct ib_port_modify
{
721 u32 set_port_cap_mask
;
722 u32 clr_port_cap_mask
;
730 IB_EVENT_QP_ACCESS_ERR
,
734 IB_EVENT_PATH_MIG_ERR
,
735 IB_EVENT_DEVICE_FATAL
,
736 IB_EVENT_PORT_ACTIVE
,
739 IB_EVENT_PKEY_CHANGE
,
742 IB_EVENT_SRQ_LIMIT_REACHED
,
743 IB_EVENT_QP_LAST_WQE_REACHED
,
744 IB_EVENT_CLIENT_REREGISTER
,
749 const char *__attribute_const__
ib_event_msg(enum ib_event_type event
);
752 struct ib_device
*device
;
760 enum ib_event_type event
;
763 struct ib_event_handler
{
764 struct ib_device
*device
;
765 void (*handler
)(struct ib_event_handler
*, struct ib_event
*);
766 struct list_head list
;
769 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
771 (_ptr)->device = _device; \
772 (_ptr)->handler = _handler; \
773 INIT_LIST_HEAD(&(_ptr)->list); \
776 struct ib_global_route
{
777 const struct ib_gid_attr
*sgid_attr
;
786 __be32 version_tclass_flow
;
794 union rdma_network_hdr
{
797 /* The IB spec states that if it's IPv4, the header
798 * is located in the last 20 bytes of the header.
801 struct iphdr roce4grh
;
805 #define IB_QPN_MASK 0xFFFFFF
808 IB_MULTICAST_QPN
= 0xffffff
811 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
812 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
819 IB_RATE_PORT_CURRENT
= 0,
820 IB_RATE_2_5_GBPS
= 2,
828 IB_RATE_120_GBPS
= 10,
829 IB_RATE_14_GBPS
= 11,
830 IB_RATE_56_GBPS
= 12,
831 IB_RATE_112_GBPS
= 13,
832 IB_RATE_168_GBPS
= 14,
833 IB_RATE_25_GBPS
= 15,
834 IB_RATE_100_GBPS
= 16,
835 IB_RATE_200_GBPS
= 17,
836 IB_RATE_300_GBPS
= 18,
837 IB_RATE_28_GBPS
= 19,
838 IB_RATE_50_GBPS
= 20,
839 IB_RATE_400_GBPS
= 21,
840 IB_RATE_600_GBPS
= 22,
841 IB_RATE_800_GBPS
= 23,
845 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
846 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
847 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
848 * @rate: rate to convert.
850 __attribute_const__
int ib_rate_to_mult(enum ib_rate rate
);
853 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
854 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
855 * @rate: rate to convert.
857 __attribute_const__
int ib_rate_to_mbps(enum ib_rate rate
);
861 * enum ib_mr_type - memory region type
862 * @IB_MR_TYPE_MEM_REG: memory region that is used for
863 * normal registration
864 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
865 * register any arbitrary sg lists (without
866 * the normal mr constraints - see
868 * @IB_MR_TYPE_DM: memory region that is used for device
869 * memory registration
870 * @IB_MR_TYPE_USER: memory region that is used for the user-space
872 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations
873 * without address translations (VA=PA)
874 * @IB_MR_TYPE_INTEGRITY: memory region that is used for
875 * data integrity operations
883 IB_MR_TYPE_INTEGRITY
,
886 enum ib_mr_status_check
{
887 IB_MR_CHECK_SIG_STATUS
= 1,
891 * struct ib_mr_status - Memory region status container
893 * @fail_status: Bitmask of MR checks status. For each
894 * failed check a corresponding status bit is set.
895 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
898 struct ib_mr_status
{
900 struct ib_sig_err sig_err
;
904 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
906 * @mult: multiple to convert.
908 __attribute_const__
enum ib_rate
mult_to_ib_rate(int mult
);
910 struct rdma_ah_init_attr
{
911 struct rdma_ah_attr
*ah_attr
;
913 struct net_device
*xmit_slave
;
916 enum rdma_ah_attr_type
{
917 RDMA_AH_ATTR_TYPE_UNDEFINED
,
918 RDMA_AH_ATTR_TYPE_IB
,
919 RDMA_AH_ATTR_TYPE_ROCE
,
920 RDMA_AH_ATTR_TYPE_OPA
,
928 struct roce_ah_attr
{
938 struct rdma_ah_attr
{
939 struct ib_global_route grh
;
944 enum rdma_ah_attr_type type
;
946 struct ib_ah_attr ib
;
947 struct roce_ah_attr roce
;
948 struct opa_ah_attr opa
;
956 IB_WC_LOC_EEC_OP_ERR
,
961 IB_WC_LOC_ACCESS_ERR
,
962 IB_WC_REM_INV_REQ_ERR
,
963 IB_WC_REM_ACCESS_ERR
,
966 IB_WC_RNR_RETRY_EXC_ERR
,
967 IB_WC_LOC_RDD_VIOL_ERR
,
968 IB_WC_REM_INV_RD_REQ_ERR
,
971 IB_WC_INV_EEC_STATE_ERR
,
973 IB_WC_RESP_TIMEOUT_ERR
,
977 const char *__attribute_const__
ib_wc_status_msg(enum ib_wc_status status
);
980 IB_WC_SEND
= IB_UVERBS_WC_SEND
,
981 IB_WC_RDMA_WRITE
= IB_UVERBS_WC_RDMA_WRITE
,
982 IB_WC_RDMA_READ
= IB_UVERBS_WC_RDMA_READ
,
983 IB_WC_COMP_SWAP
= IB_UVERBS_WC_COMP_SWAP
,
984 IB_WC_FETCH_ADD
= IB_UVERBS_WC_FETCH_ADD
,
985 IB_WC_BIND_MW
= IB_UVERBS_WC_BIND_MW
,
986 IB_WC_LOCAL_INV
= IB_UVERBS_WC_LOCAL_INV
,
987 IB_WC_LSO
= IB_UVERBS_WC_TSO
,
988 IB_WC_ATOMIC_WRITE
= IB_UVERBS_WC_ATOMIC_WRITE
,
990 IB_WC_MASKED_COMP_SWAP
,
991 IB_WC_MASKED_FETCH_ADD
,
992 IB_WC_FLUSH
= IB_UVERBS_WC_FLUSH
,
994 * Set value of IB_WC_RECV so consumers can test if a completion is a
995 * receive by testing (opcode & IB_WC_RECV).
998 IB_WC_RECV_RDMA_WITH_IMM
1003 IB_WC_WITH_IMM
= (1<<1),
1004 IB_WC_WITH_INVALIDATE
= (1<<2),
1005 IB_WC_IP_CSUM_OK
= (1<<3),
1006 IB_WC_WITH_SMAC
= (1<<4),
1007 IB_WC_WITH_VLAN
= (1<<5),
1008 IB_WC_WITH_NETWORK_HDR_TYPE
= (1<<6),
1014 struct ib_cqe
*wr_cqe
;
1016 enum ib_wc_status status
;
1017 enum ib_wc_opcode opcode
;
1023 u32 invalidate_rkey
;
1031 u32 port_num
; /* valid only for DR SMPs on switches */
1034 u8 network_hdr_type
;
1037 enum ib_cq_notify_flags
{
1038 IB_CQ_SOLICITED
= 1 << 0,
1039 IB_CQ_NEXT_COMP
= 1 << 1,
1040 IB_CQ_SOLICITED_MASK
= IB_CQ_SOLICITED
| IB_CQ_NEXT_COMP
,
1041 IB_CQ_REPORT_MISSED_EVENTS
= 1 << 2,
1045 IB_SRQT_BASIC
= IB_UVERBS_SRQT_BASIC
,
1046 IB_SRQT_XRC
= IB_UVERBS_SRQT_XRC
,
1047 IB_SRQT_TM
= IB_UVERBS_SRQT_TM
,
1050 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type
)
1052 return srq_type
== IB_SRQT_XRC
||
1053 srq_type
== IB_SRQT_TM
;
1056 enum ib_srq_attr_mask
{
1057 IB_SRQ_MAX_WR
= 1 << 0,
1058 IB_SRQ_LIMIT
= 1 << 1,
1061 struct ib_srq_attr
{
1067 struct ib_srq_init_attr
{
1068 void (*event_handler
)(struct ib_event
*, void *);
1070 struct ib_srq_attr attr
;
1071 enum ib_srq_type srq_type
;
1077 struct ib_xrcd
*xrcd
;
1092 u32 max_inline_data
;
1095 * Maximum number of rdma_rw_ctx structures in flight at a time.
1096 * ib_create_qp() will calculate the right amount of needed WRs
1097 * and MRs based on this.
1109 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1110 * here (and in that order) since the MAD layer uses them as
1111 * indices into a 2-entry table.
1116 IB_QPT_RC
= IB_UVERBS_QPT_RC
,
1117 IB_QPT_UC
= IB_UVERBS_QPT_UC
,
1118 IB_QPT_UD
= IB_UVERBS_QPT_UD
,
1120 IB_QPT_RAW_ETHERTYPE
,
1121 IB_QPT_RAW_PACKET
= IB_UVERBS_QPT_RAW_PACKET
,
1122 IB_QPT_XRC_INI
= IB_UVERBS_QPT_XRC_INI
,
1123 IB_QPT_XRC_TGT
= IB_UVERBS_QPT_XRC_TGT
,
1125 IB_QPT_DRIVER
= IB_UVERBS_QPT_DRIVER
,
1126 /* Reserve a range for qp types internal to the low level driver.
1127 * These qp types will not be visible at the IB core layer, so the
1128 * IB_QPT_MAX usages should not be affected in the core layer
1130 IB_QPT_RESERVED1
= 0x1000,
1142 enum ib_qp_create_flags
{
1143 IB_QP_CREATE_IPOIB_UD_LSO
= 1 << 0,
1144 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
=
1145 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
,
1146 IB_QP_CREATE_CROSS_CHANNEL
= 1 << 2,
1147 IB_QP_CREATE_MANAGED_SEND
= 1 << 3,
1148 IB_QP_CREATE_MANAGED_RECV
= 1 << 4,
1149 IB_QP_CREATE_NETIF_QP
= 1 << 5,
1150 IB_QP_CREATE_INTEGRITY_EN
= 1 << 6,
1151 IB_QP_CREATE_NETDEV_USE
= 1 << 7,
1152 IB_QP_CREATE_SCATTER_FCS
=
1153 IB_UVERBS_QP_CREATE_SCATTER_FCS
,
1154 IB_QP_CREATE_CVLAN_STRIPPING
=
1155 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING
,
1156 IB_QP_CREATE_SOURCE_QPN
= 1 << 10,
1157 IB_QP_CREATE_PCI_WRITE_END_PADDING
=
1158 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING
,
1159 /* reserve bits 26-31 for low level drivers' internal use */
1160 IB_QP_CREATE_RESERVED_START
= 1 << 26,
1161 IB_QP_CREATE_RESERVED_END
= 1 << 31,
1165 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1166 * callback to destroy the passed in QP.
1169 struct ib_qp_init_attr
{
1170 /* This callback occurs in workqueue context */
1171 void (*event_handler
)(struct ib_event
*, void *);
1174 struct ib_cq
*send_cq
;
1175 struct ib_cq
*recv_cq
;
1177 struct ib_xrcd
*xrcd
; /* XRC TGT QPs only */
1178 struct ib_qp_cap cap
;
1179 enum ib_sig_type sq_sig_type
;
1180 enum ib_qp_type qp_type
;
1184 * Only needed for special QP types, or when using the RW API.
1187 struct ib_rwq_ind_table
*rwq_ind_tbl
;
1191 struct ib_qp_open_attr
{
1192 void (*event_handler
)(struct ib_event
*, void *);
1195 enum ib_qp_type qp_type
;
1198 enum ib_rnr_timeout
{
1199 IB_RNR_TIMER_655_36
= 0,
1200 IB_RNR_TIMER_000_01
= 1,
1201 IB_RNR_TIMER_000_02
= 2,
1202 IB_RNR_TIMER_000_03
= 3,
1203 IB_RNR_TIMER_000_04
= 4,
1204 IB_RNR_TIMER_000_06
= 5,
1205 IB_RNR_TIMER_000_08
= 6,
1206 IB_RNR_TIMER_000_12
= 7,
1207 IB_RNR_TIMER_000_16
= 8,
1208 IB_RNR_TIMER_000_24
= 9,
1209 IB_RNR_TIMER_000_32
= 10,
1210 IB_RNR_TIMER_000_48
= 11,
1211 IB_RNR_TIMER_000_64
= 12,
1212 IB_RNR_TIMER_000_96
= 13,
1213 IB_RNR_TIMER_001_28
= 14,
1214 IB_RNR_TIMER_001_92
= 15,
1215 IB_RNR_TIMER_002_56
= 16,
1216 IB_RNR_TIMER_003_84
= 17,
1217 IB_RNR_TIMER_005_12
= 18,
1218 IB_RNR_TIMER_007_68
= 19,
1219 IB_RNR_TIMER_010_24
= 20,
1220 IB_RNR_TIMER_015_36
= 21,
1221 IB_RNR_TIMER_020_48
= 22,
1222 IB_RNR_TIMER_030_72
= 23,
1223 IB_RNR_TIMER_040_96
= 24,
1224 IB_RNR_TIMER_061_44
= 25,
1225 IB_RNR_TIMER_081_92
= 26,
1226 IB_RNR_TIMER_122_88
= 27,
1227 IB_RNR_TIMER_163_84
= 28,
1228 IB_RNR_TIMER_245_76
= 29,
1229 IB_RNR_TIMER_327_68
= 30,
1230 IB_RNR_TIMER_491_52
= 31
1233 enum ib_qp_attr_mask
{
1235 IB_QP_CUR_STATE
= (1<<1),
1236 IB_QP_EN_SQD_ASYNC_NOTIFY
= (1<<2),
1237 IB_QP_ACCESS_FLAGS
= (1<<3),
1238 IB_QP_PKEY_INDEX
= (1<<4),
1239 IB_QP_PORT
= (1<<5),
1240 IB_QP_QKEY
= (1<<6),
1242 IB_QP_PATH_MTU
= (1<<8),
1243 IB_QP_TIMEOUT
= (1<<9),
1244 IB_QP_RETRY_CNT
= (1<<10),
1245 IB_QP_RNR_RETRY
= (1<<11),
1246 IB_QP_RQ_PSN
= (1<<12),
1247 IB_QP_MAX_QP_RD_ATOMIC
= (1<<13),
1248 IB_QP_ALT_PATH
= (1<<14),
1249 IB_QP_MIN_RNR_TIMER
= (1<<15),
1250 IB_QP_SQ_PSN
= (1<<16),
1251 IB_QP_MAX_DEST_RD_ATOMIC
= (1<<17),
1252 IB_QP_PATH_MIG_STATE
= (1<<18),
1253 IB_QP_CAP
= (1<<19),
1254 IB_QP_DEST_QPN
= (1<<20),
1255 IB_QP_RESERVED1
= (1<<21),
1256 IB_QP_RESERVED2
= (1<<22),
1257 IB_QP_RESERVED3
= (1<<23),
1258 IB_QP_RESERVED4
= (1<<24),
1259 IB_QP_RATE_LIMIT
= (1<<25),
1261 IB_QP_ATTR_STANDARD_BITS
= GENMASK(20, 0),
1286 enum ib_qp_state qp_state
;
1287 enum ib_qp_state cur_qp_state
;
1288 enum ib_mtu path_mtu
;
1289 enum ib_mig_state path_mig_state
;
1294 int qp_access_flags
;
1295 struct ib_qp_cap cap
;
1296 struct rdma_ah_attr ah_attr
;
1297 struct rdma_ah_attr alt_ah_attr
;
1300 u8 en_sqd_async_notify
;
1303 u8 max_dest_rd_atomic
;
1312 struct net_device
*xmit_slave
;
1316 /* These are shared with userspace */
1317 IB_WR_RDMA_WRITE
= IB_UVERBS_WR_RDMA_WRITE
,
1318 IB_WR_RDMA_WRITE_WITH_IMM
= IB_UVERBS_WR_RDMA_WRITE_WITH_IMM
,
1319 IB_WR_SEND
= IB_UVERBS_WR_SEND
,
1320 IB_WR_SEND_WITH_IMM
= IB_UVERBS_WR_SEND_WITH_IMM
,
1321 IB_WR_RDMA_READ
= IB_UVERBS_WR_RDMA_READ
,
1322 IB_WR_ATOMIC_CMP_AND_SWP
= IB_UVERBS_WR_ATOMIC_CMP_AND_SWP
,
1323 IB_WR_ATOMIC_FETCH_AND_ADD
= IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD
,
1324 IB_WR_BIND_MW
= IB_UVERBS_WR_BIND_MW
,
1325 IB_WR_LSO
= IB_UVERBS_WR_TSO
,
1326 IB_WR_SEND_WITH_INV
= IB_UVERBS_WR_SEND_WITH_INV
,
1327 IB_WR_RDMA_READ_WITH_INV
= IB_UVERBS_WR_RDMA_READ_WITH_INV
,
1328 IB_WR_LOCAL_INV
= IB_UVERBS_WR_LOCAL_INV
,
1329 IB_WR_MASKED_ATOMIC_CMP_AND_SWP
=
1330 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP
,
1331 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
=
1332 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD
,
1333 IB_WR_FLUSH
= IB_UVERBS_WR_FLUSH
,
1334 IB_WR_ATOMIC_WRITE
= IB_UVERBS_WR_ATOMIC_WRITE
,
1336 /* These are kernel only and can not be issued by userspace */
1337 IB_WR_REG_MR
= 0x20,
1338 IB_WR_REG_MR_INTEGRITY
,
1340 /* reserve values for low level drivers' internal use.
1341 * These values will not be used at all in the ib core layer.
1343 IB_WR_RESERVED1
= 0xf0,
1355 enum ib_send_flags
{
1357 IB_SEND_SIGNALED
= (1<<1),
1358 IB_SEND_SOLICITED
= (1<<2),
1359 IB_SEND_INLINE
= (1<<3),
1360 IB_SEND_IP_CSUM
= (1<<4),
1362 /* reserve bits 26-31 for low level drivers' internal use */
1363 IB_SEND_RESERVED_START
= (1 << 26),
1364 IB_SEND_RESERVED_END
= (1 << 31),
1374 void (*done
)(struct ib_cq
*cq
, struct ib_wc
*wc
);
1378 struct ib_send_wr
*next
;
1381 struct ib_cqe
*wr_cqe
;
1383 struct ib_sge
*sg_list
;
1385 enum ib_wr_opcode opcode
;
1389 u32 invalidate_rkey
;
1394 struct ib_send_wr wr
;
1399 static inline const struct ib_rdma_wr
*rdma_wr(const struct ib_send_wr
*wr
)
1401 return container_of(wr
, struct ib_rdma_wr
, wr
);
1404 struct ib_atomic_wr
{
1405 struct ib_send_wr wr
;
1409 u64 compare_add_mask
;
1414 static inline const struct ib_atomic_wr
*atomic_wr(const struct ib_send_wr
*wr
)
1416 return container_of(wr
, struct ib_atomic_wr
, wr
);
1420 struct ib_send_wr wr
;
1427 u16 pkey_index
; /* valid for GSI only */
1428 u32 port_num
; /* valid for DR SMPs on switch only */
1431 static inline const struct ib_ud_wr
*ud_wr(const struct ib_send_wr
*wr
)
1433 return container_of(wr
, struct ib_ud_wr
, wr
);
1437 struct ib_send_wr wr
;
1443 static inline const struct ib_reg_wr
*reg_wr(const struct ib_send_wr
*wr
)
1445 return container_of(wr
, struct ib_reg_wr
, wr
);
1449 struct ib_recv_wr
*next
;
1452 struct ib_cqe
*wr_cqe
;
1454 struct ib_sge
*sg_list
;
1458 enum ib_access_flags
{
1459 IB_ACCESS_LOCAL_WRITE
= IB_UVERBS_ACCESS_LOCAL_WRITE
,
1460 IB_ACCESS_REMOTE_WRITE
= IB_UVERBS_ACCESS_REMOTE_WRITE
,
1461 IB_ACCESS_REMOTE_READ
= IB_UVERBS_ACCESS_REMOTE_READ
,
1462 IB_ACCESS_REMOTE_ATOMIC
= IB_UVERBS_ACCESS_REMOTE_ATOMIC
,
1463 IB_ACCESS_MW_BIND
= IB_UVERBS_ACCESS_MW_BIND
,
1464 IB_ZERO_BASED
= IB_UVERBS_ACCESS_ZERO_BASED
,
1465 IB_ACCESS_ON_DEMAND
= IB_UVERBS_ACCESS_ON_DEMAND
,
1466 IB_ACCESS_HUGETLB
= IB_UVERBS_ACCESS_HUGETLB
,
1467 IB_ACCESS_RELAXED_ORDERING
= IB_UVERBS_ACCESS_RELAXED_ORDERING
,
1468 IB_ACCESS_FLUSH_GLOBAL
= IB_UVERBS_ACCESS_FLUSH_GLOBAL
,
1469 IB_ACCESS_FLUSH_PERSISTENT
= IB_UVERBS_ACCESS_FLUSH_PERSISTENT
,
1471 IB_ACCESS_OPTIONAL
= IB_UVERBS_ACCESS_OPTIONAL_RANGE
,
1472 IB_ACCESS_SUPPORTED
=
1473 ((IB_ACCESS_FLUSH_PERSISTENT
<< 1) - 1) | IB_ACCESS_OPTIONAL
,
1477 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1478 * are hidden here instead of a uapi header!
1480 enum ib_mr_rereg_flags
{
1481 IB_MR_REREG_TRANS
= 1,
1482 IB_MR_REREG_PD
= (1<<1),
1483 IB_MR_REREG_ACCESS
= (1<<2),
1484 IB_MR_REREG_SUPPORTED
= ((IB_MR_REREG_ACCESS
<< 1) - 1)
1489 enum rdma_remove_reason
{
1491 * Userspace requested uobject deletion or initial try
1492 * to remove uobject via cleanup. Call could fail
1494 RDMA_REMOVE_DESTROY
,
1495 /* Context deletion. This call should delete the actual object itself */
1497 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1498 RDMA_REMOVE_DRIVER_REMOVE
,
1499 /* uobj is being cleaned-up before being committed */
1501 /* The driver failed to destroy the uobject and is being disconnected */
1502 RDMA_REMOVE_DRIVER_FAILURE
,
1505 struct ib_rdmacg_object
{
1506 #ifdef CONFIG_CGROUP_RDMA
1507 struct rdma_cgroup
*cg
; /* owner rdma cgroup */
1511 struct ib_ucontext
{
1512 struct ib_device
*device
;
1513 struct ib_uverbs_file
*ufile
;
1515 struct ib_rdmacg_object cg_obj
;
1517 * Implementation details of the RDMA core, don't use in drivers:
1519 struct rdma_restrack_entry res
;
1520 struct xarray mmap_xa
;
1524 u64 user_handle
; /* handle given to us by userspace */
1525 /* ufile & ucontext owning this object */
1526 struct ib_uverbs_file
*ufile
;
1527 /* FIXME, save memory: ufile->context == context */
1528 struct ib_ucontext
*context
; /* associated user context */
1529 void *object
; /* containing object */
1530 struct list_head list
; /* link to context's list */
1531 struct ib_rdmacg_object cg_obj
; /* rdmacg object */
1532 int id
; /* index into kernel idr */
1534 atomic_t usecnt
; /* protects exclusive access */
1535 struct rcu_head rcu
; /* kfree_rcu() overhead */
1537 const struct uverbs_api_object
*uapi_object
;
1541 const void __user
*inbuf
;
1542 void __user
*outbuf
;
1550 struct ib_device
*device
;
1551 struct ib_uobject
*uobject
;
1552 atomic_t usecnt
; /* count all resources */
1554 u32 unsafe_global_rkey
;
1557 * Implementation details of the RDMA core, don't use in drivers:
1559 struct ib_mr
*__internal_mr
;
1560 struct rdma_restrack_entry res
;
1564 struct ib_device
*device
;
1565 atomic_t usecnt
; /* count all exposed resources */
1566 struct inode
*inode
;
1567 struct rw_semaphore tgt_qps_rwsem
;
1568 struct xarray tgt_qps
;
1572 struct ib_device
*device
;
1574 struct ib_uobject
*uobject
;
1575 const struct ib_gid_attr
*sgid_attr
;
1576 enum rdma_ah_attr_type type
;
1579 typedef void (*ib_comp_handler
)(struct ib_cq
*cq
, void *cq_context
);
1581 enum ib_poll_context
{
1582 IB_POLL_SOFTIRQ
, /* poll from softirq context */
1583 IB_POLL_WORKQUEUE
, /* poll from workqueue */
1584 IB_POLL_UNBOUND_WORKQUEUE
, /* poll from unbound workqueue */
1585 IB_POLL_LAST_POOL_TYPE
= IB_POLL_UNBOUND_WORKQUEUE
,
1587 IB_POLL_DIRECT
, /* caller context, no hw completions */
1591 struct ib_device
*device
;
1592 struct ib_ucq_object
*uobject
;
1593 ib_comp_handler comp_handler
;
1594 void (*event_handler
)(struct ib_event
*, void *);
1597 unsigned int cqe_used
;
1598 atomic_t usecnt
; /* count number of work queues */
1599 enum ib_poll_context poll_ctx
;
1601 struct list_head pool_entry
;
1603 struct irq_poll iop
;
1604 struct work_struct work
;
1606 struct workqueue_struct
*comp_wq
;
1609 /* updated only by trace points */
1613 unsigned int comp_vector
;
1616 * Implementation details of the RDMA core, don't use in drivers:
1618 struct rdma_restrack_entry res
;
1622 struct ib_device
*device
;
1624 struct ib_usrq_object
*uobject
;
1625 void (*event_handler
)(struct ib_event
*, void *);
1627 enum ib_srq_type srq_type
;
1634 struct ib_xrcd
*xrcd
;
1641 * Implementation details of the RDMA core, don't use in drivers:
1643 struct rdma_restrack_entry res
;
1646 enum ib_raw_packet_caps
{
1648 * Strip cvlan from incoming packet and report it in the matching work
1649 * completion is supported.
1651 IB_RAW_PACKET_CAP_CVLAN_STRIPPING
=
1652 IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING
,
1654 * Scatter FCS field of an incoming packet to host memory is supported.
1656 IB_RAW_PACKET_CAP_SCATTER_FCS
= IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS
,
1657 /* Checksum offloads are supported (for both send and receive). */
1658 IB_RAW_PACKET_CAP_IP_CSUM
= IB_UVERBS_RAW_PACKET_CAP_IP_CSUM
,
1660 * When a packet is received for an RQ with no receive WQEs, the
1661 * packet processing is delayed.
1663 IB_RAW_PACKET_CAP_DELAY_DROP
= IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP
,
1667 IB_WQT_RQ
= IB_UVERBS_WQT_RQ
,
1677 struct ib_device
*device
;
1678 struct ib_uwq_object
*uobject
;
1680 void (*event_handler
)(struct ib_event
*, void *);
1684 enum ib_wq_state state
;
1685 enum ib_wq_type wq_type
;
1690 IB_WQ_FLAGS_CVLAN_STRIPPING
= IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING
,
1691 IB_WQ_FLAGS_SCATTER_FCS
= IB_UVERBS_WQ_FLAGS_SCATTER_FCS
,
1692 IB_WQ_FLAGS_DELAY_DROP
= IB_UVERBS_WQ_FLAGS_DELAY_DROP
,
1693 IB_WQ_FLAGS_PCI_WRITE_END_PADDING
=
1694 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING
,
1697 struct ib_wq_init_attr
{
1699 enum ib_wq_type wq_type
;
1703 void (*event_handler
)(struct ib_event
*, void *);
1704 u32 create_flags
; /* Use enum ib_wq_flags */
1707 enum ib_wq_attr_mask
{
1708 IB_WQ_STATE
= 1 << 0,
1709 IB_WQ_CUR_STATE
= 1 << 1,
1710 IB_WQ_FLAGS
= 1 << 2,
1714 enum ib_wq_state wq_state
;
1715 enum ib_wq_state curr_wq_state
;
1716 u32 flags
; /* Use enum ib_wq_flags */
1717 u32 flags_mask
; /* Use enum ib_wq_flags */
1720 struct ib_rwq_ind_table
{
1721 struct ib_device
*device
;
1722 struct ib_uobject
*uobject
;
1725 u32 log_ind_tbl_size
;
1726 struct ib_wq
**ind_tbl
;
1729 struct ib_rwq_ind_table_init_attr
{
1730 u32 log_ind_tbl_size
;
1731 /* Each entry is a pointer to Receive Work Queue */
1732 struct ib_wq
**ind_tbl
;
1735 enum port_pkey_state
{
1736 IB_PORT_PKEY_NOT_VALID
= 0,
1737 IB_PORT_PKEY_VALID
= 1,
1738 IB_PORT_PKEY_LISTED
= 2,
1741 struct ib_qp_security
;
1743 struct ib_port_pkey
{
1744 enum port_pkey_state state
;
1747 struct list_head qp_list
;
1748 struct list_head to_error_list
;
1749 struct ib_qp_security
*sec
;
1752 struct ib_ports_pkeys
{
1753 struct ib_port_pkey main
;
1754 struct ib_port_pkey alt
;
1757 struct ib_qp_security
{
1759 struct ib_device
*dev
;
1760 /* Hold this mutex when changing port and pkey settings. */
1762 struct ib_ports_pkeys
*ports_pkeys
;
1763 /* A list of all open shared QP handles. Required to enforce security
1764 * properly for all users of a shared QP.
1766 struct list_head shared_qp_list
;
1769 atomic_t error_list_count
;
1770 struct completion error_complete
;
1771 int error_comps_pending
;
1775 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1776 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1779 struct ib_device
*device
;
1781 struct ib_cq
*send_cq
;
1782 struct ib_cq
*recv_cq
;
1785 struct list_head rdma_mrs
;
1786 struct list_head sig_mrs
;
1788 struct completion srq_completion
;
1789 struct ib_xrcd
*xrcd
; /* XRC TGT QPs only */
1790 struct list_head xrcd_list
;
1792 /* count times opened, mcast attaches, flow attaches */
1794 struct list_head open_list
;
1795 struct ib_qp
*real_qp
;
1796 struct ib_uqp_object
*uobject
;
1797 void (*event_handler
)(struct ib_event
*, void *);
1798 void (*registered_event_handler
)(struct ib_event
*, void *);
1800 /* sgid_attrs associated with the AV's */
1801 const struct ib_gid_attr
*av_sgid_attr
;
1802 const struct ib_gid_attr
*alt_path_sgid_attr
;
1806 enum ib_qp_type qp_type
;
1807 struct ib_rwq_ind_table
*rwq_ind_tbl
;
1808 struct ib_qp_security
*qp_sec
;
1813 * Implementation details of the RDMA core, don't use in drivers:
1815 struct rdma_restrack_entry res
;
1817 /* The counter the qp is bind to */
1818 struct rdma_counter
*counter
;
1822 struct ib_device
*device
;
1825 struct ib_uobject
*uobject
;
1830 struct ib_device
*device
;
1836 unsigned int page_size
;
1837 enum ib_mr_type type
;
1840 struct ib_uobject
*uobject
; /* user */
1841 struct list_head qp_entry
; /* FR */
1845 struct ib_sig_attrs
*sig_attrs
; /* only for IB_MR_TYPE_INTEGRITY MRs */
1847 * Implementation details of the RDMA core, don't use in drivers:
1849 struct rdma_restrack_entry res
;
1853 struct ib_device
*device
;
1855 struct ib_uobject
*uobject
;
1857 enum ib_mw_type type
;
1860 /* Supported steering options */
1861 enum ib_flow_attr_type
{
1862 /* steering according to rule specifications */
1863 IB_FLOW_ATTR_NORMAL
= 0x0,
1864 /* default unicast and multicast rule -
1865 * receive all Eth traffic which isn't steered to any QP
1867 IB_FLOW_ATTR_ALL_DEFAULT
= 0x1,
1868 /* default multicast rule -
1869 * receive all Eth multicast traffic which isn't steered to any QP
1871 IB_FLOW_ATTR_MC_DEFAULT
= 0x2,
1872 /* sniffer rule - receive all port traffic */
1873 IB_FLOW_ATTR_SNIFFER
= 0x3
1876 /* Supported steering header types */
1877 enum ib_flow_spec_type
{
1879 IB_FLOW_SPEC_ETH
= 0x20,
1880 IB_FLOW_SPEC_IB
= 0x22,
1882 IB_FLOW_SPEC_IPV4
= 0x30,
1883 IB_FLOW_SPEC_IPV6
= 0x31,
1884 IB_FLOW_SPEC_ESP
= 0x34,
1886 IB_FLOW_SPEC_TCP
= 0x40,
1887 IB_FLOW_SPEC_UDP
= 0x41,
1888 IB_FLOW_SPEC_VXLAN_TUNNEL
= 0x50,
1889 IB_FLOW_SPEC_GRE
= 0x51,
1890 IB_FLOW_SPEC_MPLS
= 0x60,
1891 IB_FLOW_SPEC_INNER
= 0x100,
1893 IB_FLOW_SPEC_ACTION_TAG
= 0x1000,
1894 IB_FLOW_SPEC_ACTION_DROP
= 0x1001,
1895 IB_FLOW_SPEC_ACTION_HANDLE
= 0x1002,
1896 IB_FLOW_SPEC_ACTION_COUNT
= 0x1003,
1898 #define IB_FLOW_SPEC_LAYER_MASK 0xF0
1899 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1901 enum ib_flow_flags
{
1902 IB_FLOW_ATTR_FLAGS_DONT_TRAP
= 1UL << 1, /* Continue match, no steal */
1903 IB_FLOW_ATTR_FLAGS_EGRESS
= 1UL << 2, /* Egress flow */
1904 IB_FLOW_ATTR_FLAGS_RESERVED
= 1UL << 3 /* Must be last */
1907 struct ib_flow_eth_filter
{
1914 struct ib_flow_spec_eth
{
1917 struct ib_flow_eth_filter val
;
1918 struct ib_flow_eth_filter mask
;
1921 struct ib_flow_ib_filter
{
1926 struct ib_flow_spec_ib
{
1929 struct ib_flow_ib_filter val
;
1930 struct ib_flow_ib_filter mask
;
1933 /* IPv4 header flags */
1934 enum ib_ipv4_flags
{
1935 IB_IPV4_DONT_FRAG
= 0x2, /* Don't enable packet fragmentation */
1936 IB_IPV4_MORE_FRAG
= 0X4 /* For All fragmented packets except the
1937 last have this flag set */
1940 struct ib_flow_ipv4_filter
{
1949 struct ib_flow_spec_ipv4
{
1952 struct ib_flow_ipv4_filter val
;
1953 struct ib_flow_ipv4_filter mask
;
1956 struct ib_flow_ipv6_filter
{
1965 struct ib_flow_spec_ipv6
{
1968 struct ib_flow_ipv6_filter val
;
1969 struct ib_flow_ipv6_filter mask
;
1972 struct ib_flow_tcp_udp_filter
{
1977 struct ib_flow_spec_tcp_udp
{
1980 struct ib_flow_tcp_udp_filter val
;
1981 struct ib_flow_tcp_udp_filter mask
;
1984 struct ib_flow_tunnel_filter
{
1988 /* ib_flow_spec_tunnel describes the Vxlan tunnel
1989 * the tunnel_id from val has the vni value
1991 struct ib_flow_spec_tunnel
{
1994 struct ib_flow_tunnel_filter val
;
1995 struct ib_flow_tunnel_filter mask
;
1998 struct ib_flow_esp_filter
{
2003 struct ib_flow_spec_esp
{
2006 struct ib_flow_esp_filter val
;
2007 struct ib_flow_esp_filter mask
;
2010 struct ib_flow_gre_filter
{
2011 __be16 c_ks_res0_ver
;
2016 struct ib_flow_spec_gre
{
2019 struct ib_flow_gre_filter val
;
2020 struct ib_flow_gre_filter mask
;
2023 struct ib_flow_mpls_filter
{
2027 struct ib_flow_spec_mpls
{
2030 struct ib_flow_mpls_filter val
;
2031 struct ib_flow_mpls_filter mask
;
2034 struct ib_flow_spec_action_tag
{
2035 enum ib_flow_spec_type type
;
2040 struct ib_flow_spec_action_drop
{
2041 enum ib_flow_spec_type type
;
2045 struct ib_flow_spec_action_handle
{
2046 enum ib_flow_spec_type type
;
2048 struct ib_flow_action
*act
;
2051 enum ib_counters_description
{
2056 struct ib_flow_spec_action_count
{
2057 enum ib_flow_spec_type type
;
2059 struct ib_counters
*counters
;
2062 union ib_flow_spec
{
2067 struct ib_flow_spec_eth eth
;
2068 struct ib_flow_spec_ib ib
;
2069 struct ib_flow_spec_ipv4 ipv4
;
2070 struct ib_flow_spec_tcp_udp tcp_udp
;
2071 struct ib_flow_spec_ipv6 ipv6
;
2072 struct ib_flow_spec_tunnel tunnel
;
2073 struct ib_flow_spec_esp esp
;
2074 struct ib_flow_spec_gre gre
;
2075 struct ib_flow_spec_mpls mpls
;
2076 struct ib_flow_spec_action_tag flow_tag
;
2077 struct ib_flow_spec_action_drop drop
;
2078 struct ib_flow_spec_action_handle action
;
2079 struct ib_flow_spec_action_count flow_count
;
2082 struct ib_flow_attr
{
2083 enum ib_flow_attr_type type
;
2089 union ib_flow_spec flows
[];
2094 struct ib_device
*device
;
2095 struct ib_uobject
*uobject
;
2098 enum ib_flow_action_type
{
2099 IB_FLOW_ACTION_UNSPECIFIED
,
2100 IB_FLOW_ACTION_ESP
= 1,
2103 struct ib_flow_action_attrs_esp_keymats
{
2104 enum ib_uverbs_flow_action_esp_keymat protocol
;
2106 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm
;
2110 struct ib_flow_action_attrs_esp_replays
{
2111 enum ib_uverbs_flow_action_esp_replay protocol
;
2113 struct ib_uverbs_flow_action_esp_replay_bmp bmp
;
2117 enum ib_flow_action_attrs_esp_flags
{
2118 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2119 * This is done in order to share the same flags between user-space and
2120 * kernel and spare an unnecessary translation.
2124 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED
= 1ULL << 32,
2125 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS
= 1ULL << 33,
2128 struct ib_flow_spec_list
{
2129 struct ib_flow_spec_list
*next
;
2130 union ib_flow_spec spec
;
2133 struct ib_flow_action_attrs_esp
{
2134 struct ib_flow_action_attrs_esp_keymats
*keymat
;
2135 struct ib_flow_action_attrs_esp_replays
*replay
;
2136 struct ib_flow_spec_list
*encap
;
2137 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2138 * Value of 0 is a valid value.
2144 /* Use enum ib_flow_action_attrs_esp_flags */
2146 u64 hard_limit_pkts
;
2149 struct ib_flow_action
{
2150 struct ib_device
*device
;
2151 struct ib_uobject
*uobject
;
2152 enum ib_flow_action_type type
;
2158 enum ib_process_mad_flags
{
2159 IB_MAD_IGNORE_MKEY
= 1,
2160 IB_MAD_IGNORE_BKEY
= 2,
2161 IB_MAD_IGNORE_ALL
= IB_MAD_IGNORE_MKEY
| IB_MAD_IGNORE_BKEY
2164 enum ib_mad_result
{
2165 IB_MAD_RESULT_FAILURE
= 0, /* (!SUCCESS is the important flag) */
2166 IB_MAD_RESULT_SUCCESS
= 1 << 0, /* MAD was successfully processed */
2167 IB_MAD_RESULT_REPLY
= 1 << 1, /* Reply packet needs to be sent */
2168 IB_MAD_RESULT_CONSUMED
= 1 << 2 /* Packet consumed: stop processing */
2171 struct ib_port_cache
{
2173 struct ib_pkey_cache
*pkey
;
2174 struct ib_gid_table
*gid
;
2176 enum ib_port_state port_state
;
2177 enum ib_port_state last_port_state
;
2180 struct ib_port_immutable
{
2187 struct ib_port_data
{
2188 struct ib_device
*ib_dev
;
2190 struct ib_port_immutable immutable
;
2192 spinlock_t pkey_list_lock
;
2194 spinlock_t netdev_lock
;
2196 struct list_head pkey_list
;
2198 struct ib_port_cache cache
;
2200 struct net_device __rcu
*netdev
;
2201 netdevice_tracker netdev_tracker
;
2202 struct hlist_node ndev_hash_link
;
2203 struct rdma_port_counter port_counter
;
2204 struct ib_port
*sysfs
;
2207 /* rdma netdev type - specifies protocol type */
2208 enum rdma_netdev_t
{
2209 RDMA_NETDEV_OPA_VNIC
,
2214 * struct rdma_netdev - rdma netdev
2215 * For cases where netstack interfacing is required.
2217 struct rdma_netdev
{
2219 struct ib_device
*hca
;
2224 * cleanup function must be specified.
2225 * FIXME: This is only used for OPA_VNIC and that usage should be
2228 void (*free_rdma_netdev
)(struct net_device
*netdev
);
2230 /* control functions */
2231 void (*set_id
)(struct net_device
*netdev
, int id
);
2233 int (*send
)(struct net_device
*dev
, struct sk_buff
*skb
,
2234 struct ib_ah
*address
, u32 dqpn
);
2236 int (*attach_mcast
)(struct net_device
*dev
, struct ib_device
*hca
,
2237 union ib_gid
*gid
, u16 mlid
,
2238 int set_qkey
, u32 qkey
);
2239 int (*detach_mcast
)(struct net_device
*dev
, struct ib_device
*hca
,
2240 union ib_gid
*gid
, u16 mlid
);
2242 void (*tx_timeout
)(struct net_device
*dev
, unsigned int txqueue
);
2245 struct rdma_netdev_alloc_params
{
2251 int (*initialize_rdma_netdev
)(struct ib_device
*device
, u32 port_num
,
2252 struct net_device
*netdev
, void *param
);
2255 struct ib_odp_counters
{
2257 atomic64_t faults_handled
;
2258 atomic64_t invalidations
;
2259 atomic64_t invalidations_handled
;
2260 atomic64_t prefetch
;
2263 struct ib_counters
{
2264 struct ib_device
*device
;
2265 struct ib_uobject
*uobject
;
2266 /* num of objects attached */
2270 struct ib_counters_read_attr
{
2273 u32 flags
; /* use enum ib_read_counters_flags */
2276 struct uverbs_attr_bundle
;
2278 struct iw_cm_conn_param
;
2280 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2281 .size_##ib_struct = \
2282 (sizeof(struct drv_struct) + \
2283 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2284 BUILD_BUG_ON_ZERO( \
2285 !__same_type(((struct drv_struct *)NULL)->member, \
2288 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2289 ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2292 #define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \
2293 ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2296 #define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2297 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2299 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2301 struct rdma_user_mmap_entry
{
2303 struct ib_ucontext
*ucontext
;
2304 unsigned long start_pgoff
;
2306 bool driver_removed
;
2309 /* Return the offset (in bytes) the user should pass to libc's mmap() */
2311 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry
*entry
)
2313 return (u64
)entry
->start_pgoff
<< PAGE_SHIFT
;
2317 * struct ib_device_ops - InfiniBand device operations
2318 * This structure defines all the InfiniBand device operations, providers will
2319 * need to define the supported operations, otherwise they will be set to null.
2321 struct ib_device_ops
{
2322 struct module
*owner
;
2323 enum rdma_driver_id driver_id
;
2325 unsigned int uverbs_no_driver_id_binding
:1;
2328 * NOTE: New drivers should not make use of device_group; instead new
2329 * device parameter should be exposed via netlink command. This
2330 * mechanism exists only for existing drivers.
2332 const struct attribute_group
*device_group
;
2333 const struct attribute_group
**port_groups
;
2335 int (*post_send
)(struct ib_qp
*qp
, const struct ib_send_wr
*send_wr
,
2336 const struct ib_send_wr
**bad_send_wr
);
2337 int (*post_recv
)(struct ib_qp
*qp
, const struct ib_recv_wr
*recv_wr
,
2338 const struct ib_recv_wr
**bad_recv_wr
);
2339 void (*drain_rq
)(struct ib_qp
*qp
);
2340 void (*drain_sq
)(struct ib_qp
*qp
);
2341 int (*poll_cq
)(struct ib_cq
*cq
, int num_entries
, struct ib_wc
*wc
);
2342 int (*peek_cq
)(struct ib_cq
*cq
, int wc_cnt
);
2343 int (*req_notify_cq
)(struct ib_cq
*cq
, enum ib_cq_notify_flags flags
);
2344 int (*post_srq_recv
)(struct ib_srq
*srq
,
2345 const struct ib_recv_wr
*recv_wr
,
2346 const struct ib_recv_wr
**bad_recv_wr
);
2347 int (*process_mad
)(struct ib_device
*device
, int process_mad_flags
,
2348 u32 port_num
, const struct ib_wc
*in_wc
,
2349 const struct ib_grh
*in_grh
,
2350 const struct ib_mad
*in_mad
, struct ib_mad
*out_mad
,
2351 size_t *out_mad_size
, u16
*out_mad_pkey_index
);
2352 int (*query_device
)(struct ib_device
*device
,
2353 struct ib_device_attr
*device_attr
,
2354 struct ib_udata
*udata
);
2355 int (*modify_device
)(struct ib_device
*device
, int device_modify_mask
,
2356 struct ib_device_modify
*device_modify
);
2357 void (*get_dev_fw_str
)(struct ib_device
*device
, char *str
);
2358 const struct cpumask
*(*get_vector_affinity
)(struct ib_device
*ibdev
,
2360 int (*query_port
)(struct ib_device
*device
, u32 port_num
,
2361 struct ib_port_attr
*port_attr
);
2362 int (*modify_port
)(struct ib_device
*device
, u32 port_num
,
2363 int port_modify_mask
,
2364 struct ib_port_modify
*port_modify
);
2366 * The following mandatory functions are used only at device
2367 * registration. Keep functions such as these at the end of this
2368 * structure to avoid cache line misses when accessing struct ib_device
2371 int (*get_port_immutable
)(struct ib_device
*device
, u32 port_num
,
2372 struct ib_port_immutable
*immutable
);
2373 enum rdma_link_layer (*get_link_layer
)(struct ib_device
*device
,
2376 * When calling get_netdev, the HW vendor's driver should return the
2377 * net device of device @device at port @port_num or NULL if such
2378 * a net device doesn't exist. The vendor driver should call dev_hold
2379 * on this net device. The HW vendor's device driver must guarantee
2380 * that this function returns NULL before the net device has finished
2381 * NETDEV_UNREGISTER state.
2383 struct net_device
*(*get_netdev
)(struct ib_device
*device
,
2386 * rdma netdev operation
2388 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2389 * must return -EOPNOTSUPP if it doesn't support the specified type.
2391 struct net_device
*(*alloc_rdma_netdev
)(
2392 struct ib_device
*device
, u32 port_num
, enum rdma_netdev_t type
,
2393 const char *name
, unsigned char name_assign_type
,
2394 void (*setup
)(struct net_device
*));
2396 int (*rdma_netdev_get_params
)(struct ib_device
*device
, u32 port_num
,
2397 enum rdma_netdev_t type
,
2398 struct rdma_netdev_alloc_params
*params
);
2400 * query_gid should be return GID value for @device, when @port_num
2401 * link layer is either IB or iWarp. It is no-op if @port_num port
2402 * is RoCE link layer.
2404 int (*query_gid
)(struct ib_device
*device
, u32 port_num
, int index
,
2407 * When calling add_gid, the HW vendor's driver should add the gid
2408 * of device of port at gid index available at @attr. Meta-info of
2409 * that gid (for example, the network device related to this gid) is
2410 * available at @attr. @context allows the HW vendor driver to store
2411 * extra information together with a GID entry. The HW vendor driver may
2412 * allocate memory to contain this information and store it in @context
2413 * when a new GID entry is written to. Params are consistent until the
2414 * next call of add_gid or delete_gid. The function should return 0 on
2415 * success or error otherwise. The function could be called
2416 * concurrently for different ports. This function is only called when
2417 * roce_gid_table is used.
2419 int (*add_gid
)(const struct ib_gid_attr
*attr
, void **context
);
2421 * When calling del_gid, the HW vendor's driver should delete the
2422 * gid of device @device at gid index gid_index of port port_num
2423 * available in @attr.
2424 * Upon the deletion of a GID entry, the HW vendor must free any
2425 * allocated memory. The caller will clear @context afterwards.
2426 * This function is only called when roce_gid_table is used.
2428 int (*del_gid
)(const struct ib_gid_attr
*attr
, void **context
);
2429 int (*query_pkey
)(struct ib_device
*device
, u32 port_num
, u16 index
,
2431 int (*alloc_ucontext
)(struct ib_ucontext
*context
,
2432 struct ib_udata
*udata
);
2433 void (*dealloc_ucontext
)(struct ib_ucontext
*context
);
2434 int (*mmap
)(struct ib_ucontext
*context
, struct vm_area_struct
*vma
);
2436 * This will be called once refcount of an entry in mmap_xa reaches
2437 * zero. The type of the memory that was mapped may differ between
2438 * entries and is opaque to the rdma_user_mmap interface.
2439 * Therefore needs to be implemented by the driver in mmap_free.
2441 void (*mmap_free
)(struct rdma_user_mmap_entry
*entry
);
2442 void (*disassociate_ucontext
)(struct ib_ucontext
*ibcontext
);
2443 int (*alloc_pd
)(struct ib_pd
*pd
, struct ib_udata
*udata
);
2444 int (*dealloc_pd
)(struct ib_pd
*pd
, struct ib_udata
*udata
);
2445 int (*create_ah
)(struct ib_ah
*ah
, struct rdma_ah_init_attr
*attr
,
2446 struct ib_udata
*udata
);
2447 int (*create_user_ah
)(struct ib_ah
*ah
, struct rdma_ah_init_attr
*attr
,
2448 struct ib_udata
*udata
);
2449 int (*modify_ah
)(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
2450 int (*query_ah
)(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
2451 int (*destroy_ah
)(struct ib_ah
*ah
, u32 flags
);
2452 int (*create_srq
)(struct ib_srq
*srq
,
2453 struct ib_srq_init_attr
*srq_init_attr
,
2454 struct ib_udata
*udata
);
2455 int (*modify_srq
)(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
,
2456 enum ib_srq_attr_mask srq_attr_mask
,
2457 struct ib_udata
*udata
);
2458 int (*query_srq
)(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
);
2459 int (*destroy_srq
)(struct ib_srq
*srq
, struct ib_udata
*udata
);
2460 int (*create_qp
)(struct ib_qp
*qp
, struct ib_qp_init_attr
*qp_init_attr
,
2461 struct ib_udata
*udata
);
2462 int (*modify_qp
)(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
2463 int qp_attr_mask
, struct ib_udata
*udata
);
2464 int (*query_qp
)(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
2465 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
);
2466 int (*destroy_qp
)(struct ib_qp
*qp
, struct ib_udata
*udata
);
2467 int (*create_cq
)(struct ib_cq
*cq
, const struct ib_cq_init_attr
*attr
,
2468 struct uverbs_attr_bundle
*attrs
);
2469 int (*modify_cq
)(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
);
2470 int (*destroy_cq
)(struct ib_cq
*cq
, struct ib_udata
*udata
);
2471 int (*resize_cq
)(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
);
2472 struct ib_mr
*(*get_dma_mr
)(struct ib_pd
*pd
, int mr_access_flags
);
2473 struct ib_mr
*(*reg_user_mr
)(struct ib_pd
*pd
, u64 start
, u64 length
,
2474 u64 virt_addr
, int mr_access_flags
,
2475 struct ib_udata
*udata
);
2476 struct ib_mr
*(*reg_user_mr_dmabuf
)(struct ib_pd
*pd
, u64 offset
,
2477 u64 length
, u64 virt_addr
, int fd
,
2478 int mr_access_flags
,
2479 struct uverbs_attr_bundle
*attrs
);
2480 struct ib_mr
*(*rereg_user_mr
)(struct ib_mr
*mr
, int flags
, u64 start
,
2481 u64 length
, u64 virt_addr
,
2482 int mr_access_flags
, struct ib_pd
*pd
,
2483 struct ib_udata
*udata
);
2484 int (*dereg_mr
)(struct ib_mr
*mr
, struct ib_udata
*udata
);
2485 struct ib_mr
*(*alloc_mr
)(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
2487 struct ib_mr
*(*alloc_mr_integrity
)(struct ib_pd
*pd
,
2488 u32 max_num_data_sg
,
2489 u32 max_num_meta_sg
);
2490 int (*advise_mr
)(struct ib_pd
*pd
,
2491 enum ib_uverbs_advise_mr_advice advice
, u32 flags
,
2492 struct ib_sge
*sg_list
, u32 num_sge
,
2493 struct uverbs_attr_bundle
*attrs
);
2496 * Kernel users should universally support relaxed ordering (RO), as
2497 * they are designed to read data only after observing the CQE and use
2498 * the DMA API correctly.
2500 * Some drivers implicitly enable RO if platform supports it.
2502 int (*map_mr_sg
)(struct ib_mr
*mr
, struct scatterlist
*sg
, int sg_nents
,
2503 unsigned int *sg_offset
);
2504 int (*check_mr_status
)(struct ib_mr
*mr
, u32 check_mask
,
2505 struct ib_mr_status
*mr_status
);
2506 int (*alloc_mw
)(struct ib_mw
*mw
, struct ib_udata
*udata
);
2507 int (*dealloc_mw
)(struct ib_mw
*mw
);
2508 int (*attach_mcast
)(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
);
2509 int (*detach_mcast
)(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
);
2510 int (*alloc_xrcd
)(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
);
2511 int (*dealloc_xrcd
)(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
);
2512 struct ib_flow
*(*create_flow
)(struct ib_qp
*qp
,
2513 struct ib_flow_attr
*flow_attr
,
2514 struct ib_udata
*udata
);
2515 int (*destroy_flow
)(struct ib_flow
*flow_id
);
2516 int (*destroy_flow_action
)(struct ib_flow_action
*action
);
2517 int (*set_vf_link_state
)(struct ib_device
*device
, int vf
, u32 port
,
2519 int (*get_vf_config
)(struct ib_device
*device
, int vf
, u32 port
,
2520 struct ifla_vf_info
*ivf
);
2521 int (*get_vf_stats
)(struct ib_device
*device
, int vf
, u32 port
,
2522 struct ifla_vf_stats
*stats
);
2523 int (*get_vf_guid
)(struct ib_device
*device
, int vf
, u32 port
,
2524 struct ifla_vf_guid
*node_guid
,
2525 struct ifla_vf_guid
*port_guid
);
2526 int (*set_vf_guid
)(struct ib_device
*device
, int vf
, u32 port
, u64 guid
,
2528 struct ib_wq
*(*create_wq
)(struct ib_pd
*pd
,
2529 struct ib_wq_init_attr
*init_attr
,
2530 struct ib_udata
*udata
);
2531 int (*destroy_wq
)(struct ib_wq
*wq
, struct ib_udata
*udata
);
2532 int (*modify_wq
)(struct ib_wq
*wq
, struct ib_wq_attr
*attr
,
2533 u32 wq_attr_mask
, struct ib_udata
*udata
);
2534 int (*create_rwq_ind_table
)(struct ib_rwq_ind_table
*ib_rwq_ind_table
,
2535 struct ib_rwq_ind_table_init_attr
*init_attr
,
2536 struct ib_udata
*udata
);
2537 int (*destroy_rwq_ind_table
)(struct ib_rwq_ind_table
*wq_ind_table
);
2538 struct ib_dm
*(*alloc_dm
)(struct ib_device
*device
,
2539 struct ib_ucontext
*context
,
2540 struct ib_dm_alloc_attr
*attr
,
2541 struct uverbs_attr_bundle
*attrs
);
2542 int (*dealloc_dm
)(struct ib_dm
*dm
, struct uverbs_attr_bundle
*attrs
);
2543 struct ib_mr
*(*reg_dm_mr
)(struct ib_pd
*pd
, struct ib_dm
*dm
,
2544 struct ib_dm_mr_attr
*attr
,
2545 struct uverbs_attr_bundle
*attrs
);
2546 int (*create_counters
)(struct ib_counters
*counters
,
2547 struct uverbs_attr_bundle
*attrs
);
2548 int (*destroy_counters
)(struct ib_counters
*counters
);
2549 int (*read_counters
)(struct ib_counters
*counters
,
2550 struct ib_counters_read_attr
*counters_read_attr
,
2551 struct uverbs_attr_bundle
*attrs
);
2552 int (*map_mr_sg_pi
)(struct ib_mr
*mr
, struct scatterlist
*data_sg
,
2553 int data_sg_nents
, unsigned int *data_sg_offset
,
2554 struct scatterlist
*meta_sg
, int meta_sg_nents
,
2555 unsigned int *meta_sg_offset
);
2558 * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
2559 * fill in the driver initialized data. The struct is kfree()'ed by
2560 * the sysfs core when the device is removed. A lifespan of -1 in the
2561 * return struct tells the core to set a default lifespan.
2563 struct rdma_hw_stats
*(*alloc_hw_device_stats
)(struct ib_device
*device
);
2564 struct rdma_hw_stats
*(*alloc_hw_port_stats
)(struct ib_device
*device
,
2567 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2568 * @index - The index in the value array we wish to have updated, or
2569 * num_counters if we want all stats updated
2571 * < 0 - Error, no counters updated
2572 * index - Updated the single counter pointed to by index
2573 * num_counters - Updated all counters (will reset the timestamp
2574 * and prevent further calls for lifespan milliseconds)
2575 * Drivers are allowed to update all counters in leiu of just the
2576 * one given in index at their option
2578 int (*get_hw_stats
)(struct ib_device
*device
,
2579 struct rdma_hw_stats
*stats
, u32 port
, int index
);
2582 * modify_hw_stat - Modify the counter configuration
2583 * @enable: true/false when enable/disable a counter
2584 * Return codes - 0 on success or error code otherwise.
2586 int (*modify_hw_stat
)(struct ib_device
*device
, u32 port
,
2587 unsigned int counter_index
, bool enable
);
2589 * Allows rdma drivers to add their own restrack attributes.
2591 int (*fill_res_mr_entry
)(struct sk_buff
*msg
, struct ib_mr
*ibmr
);
2592 int (*fill_res_mr_entry_raw
)(struct sk_buff
*msg
, struct ib_mr
*ibmr
);
2593 int (*fill_res_cq_entry
)(struct sk_buff
*msg
, struct ib_cq
*ibcq
);
2594 int (*fill_res_cq_entry_raw
)(struct sk_buff
*msg
, struct ib_cq
*ibcq
);
2595 int (*fill_res_qp_entry
)(struct sk_buff
*msg
, struct ib_qp
*ibqp
);
2596 int (*fill_res_qp_entry_raw
)(struct sk_buff
*msg
, struct ib_qp
*ibqp
);
2597 int (*fill_res_cm_id_entry
)(struct sk_buff
*msg
, struct rdma_cm_id
*id
);
2598 int (*fill_res_srq_entry
)(struct sk_buff
*msg
, struct ib_srq
*ib_srq
);
2599 int (*fill_res_srq_entry_raw
)(struct sk_buff
*msg
, struct ib_srq
*ib_srq
);
2601 /* Device lifecycle callbacks */
2603 * Called after the device becomes registered, before clients are
2606 int (*enable_driver
)(struct ib_device
*dev
);
2608 * This is called as part of ib_dealloc_device().
2610 void (*dealloc_driver
)(struct ib_device
*dev
);
2612 /* iWarp CM callbacks */
2613 void (*iw_add_ref
)(struct ib_qp
*qp
);
2614 void (*iw_rem_ref
)(struct ib_qp
*qp
);
2615 struct ib_qp
*(*iw_get_qp
)(struct ib_device
*device
, int qpn
);
2616 int (*iw_connect
)(struct iw_cm_id
*cm_id
,
2617 struct iw_cm_conn_param
*conn_param
);
2618 int (*iw_accept
)(struct iw_cm_id
*cm_id
,
2619 struct iw_cm_conn_param
*conn_param
);
2620 int (*iw_reject
)(struct iw_cm_id
*cm_id
, const void *pdata
,
2622 int (*iw_create_listen
)(struct iw_cm_id
*cm_id
, int backlog
);
2623 int (*iw_destroy_listen
)(struct iw_cm_id
*cm_id
);
2625 * counter_bind_qp - Bind a QP to a counter.
2626 * @counter - The counter to be bound. If counter->id is zero then
2627 * the driver needs to allocate a new counter and set counter->id
2629 int (*counter_bind_qp
)(struct rdma_counter
*counter
, struct ib_qp
*qp
);
2631 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2632 * counter and bind it onto the default one
2634 int (*counter_unbind_qp
)(struct ib_qp
*qp
);
2636 * counter_dealloc -De-allocate the hw counter
2638 int (*counter_dealloc
)(struct rdma_counter
*counter
);
2640 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2641 * the driver initialized data.
2643 struct rdma_hw_stats
*(*counter_alloc_stats
)(
2644 struct rdma_counter
*counter
);
2646 * counter_update_stats - Query the stats value of this counter
2648 int (*counter_update_stats
)(struct rdma_counter
*counter
);
2651 * Allows rdma drivers to add their own restrack attributes
2652 * dumped via 'rdma stat' iproute2 command.
2654 int (*fill_stat_mr_entry
)(struct sk_buff
*msg
, struct ib_mr
*ibmr
);
2656 /* query driver for its ucontext properties */
2657 int (*query_ucontext
)(struct ib_ucontext
*context
,
2658 struct uverbs_attr_bundle
*attrs
);
2661 * Provide NUMA node. This API exists for rdmavt/hfi1 only.
2662 * Everyone else relies on Linux memory management model.
2664 int (*get_numa_node
)(struct ib_device
*dev
);
2667 * add_sub_dev - Add a sub IB device
2669 struct ib_device
*(*add_sub_dev
)(struct ib_device
*parent
,
2670 enum rdma_nl_dev_type type
,
2674 * del_sub_dev - Delete a sub IB device
2676 void (*del_sub_dev
)(struct ib_device
*sub_dev
);
2679 * ufile_cleanup - Attempt to cleanup ubojects HW resources inside
2682 void (*ufile_hw_cleanup
)(struct ib_uverbs_file
*ufile
);
2685 * report_port_event - Drivers need to implement this if they have
2686 * some private stuff to handle when link status changes.
2688 void (*report_port_event
)(struct ib_device
*ibdev
,
2689 struct net_device
*ndev
, unsigned long event
);
2691 DECLARE_RDMA_OBJ_SIZE(ib_ah
);
2692 DECLARE_RDMA_OBJ_SIZE(ib_counters
);
2693 DECLARE_RDMA_OBJ_SIZE(ib_cq
);
2694 DECLARE_RDMA_OBJ_SIZE(ib_mw
);
2695 DECLARE_RDMA_OBJ_SIZE(ib_pd
);
2696 DECLARE_RDMA_OBJ_SIZE(ib_qp
);
2697 DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table
);
2698 DECLARE_RDMA_OBJ_SIZE(ib_srq
);
2699 DECLARE_RDMA_OBJ_SIZE(ib_ucontext
);
2700 DECLARE_RDMA_OBJ_SIZE(ib_xrcd
);
2703 struct ib_core_device
{
2704 /* device must be the first element in structure until,
2705 * union of ib_core_device and device exists in ib_device.
2708 possible_net_t rdma_net
;
2709 struct kobject
*ports_kobj
;
2710 struct list_head port_list
;
2711 struct ib_device
*owner
; /* reach back to owner ib_device */
2714 struct rdma_restrack_root
;
2716 /* Do not access @dma_device directly from ULP nor from HW drivers. */
2717 struct device
*dma_device
;
2718 struct ib_device_ops ops
;
2719 char name
[IB_DEVICE_NAME_MAX
];
2720 struct rcu_head rcu_head
;
2722 struct list_head event_handler_list
;
2723 /* Protects event_handler_list */
2724 struct rw_semaphore event_handler_rwsem
;
2726 /* Protects QP's event_handler calls and open_qp list */
2727 spinlock_t qp_open_list_lock
;
2729 struct rw_semaphore client_data_rwsem
;
2730 struct xarray client_data
;
2731 struct mutex unregistration_lock
;
2733 /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2734 rwlock_t cache_lock
;
2736 * port_data is indexed by port number
2738 struct ib_port_data
*port_data
;
2740 int num_comp_vectors
;
2744 struct ib_core_device coredev
;
2747 /* First group is for device attributes,
2748 * Second group is for driver provided attributes (optional).
2749 * Third group is for the hw_stats
2750 * It is a NULL terminated array.
2752 const struct attribute_group
*groups
[4];
2754 u64 uverbs_cmd_mask
;
2756 char node_desc
[IB_DEVICE_NODE_DESC_MAX
];
2760 /* Indicates kernel verbs support, should not be used in drivers */
2761 u16 kverbs_provider
:1;
2762 /* CQ adaptive moderation (RDMA DIM) */
2766 struct ib_device_attr attrs
;
2767 struct hw_stats_device_data
*hw_stats_data
;
2769 #ifdef CONFIG_CGROUP_RDMA
2770 struct rdmacg_device cg_device
;
2775 spinlock_t cq_pools_lock
;
2776 struct list_head cq_pools
[IB_POLL_LAST_POOL_TYPE
+ 1];
2778 struct rdma_restrack_root
*res
;
2780 const struct uapi_definition
*driver_def
;
2783 * Positive refcount indicates that the device is currently
2784 * registered and cannot be unregistered.
2786 refcount_t refcount
;
2787 struct completion unreg_completion
;
2788 struct work_struct unregistration_work
;
2790 const struct rdma_link_ops
*link_ops
;
2792 /* Protects compat_devs xarray modifications */
2793 struct mutex compat_devs_mutex
;
2794 /* Maintains compat devices for each net namespace */
2795 struct xarray compat_devs
;
2797 /* Used by iWarp CM */
2798 char iw_ifname
[IFNAMSIZ
];
2799 u32 iw_driver_flags
;
2802 /* A parent device has a list of sub-devices */
2803 struct mutex subdev_lock
;
2804 struct list_head subdev_list_head
;
2806 /* A sub device has a type and a parent */
2807 enum rdma_nl_dev_type type
;
2808 struct ib_device
*parent
;
2809 struct list_head subdev_list
;
2811 enum rdma_nl_name_assign_type name_assign_type
;
2814 static inline void *rdma_zalloc_obj(struct ib_device
*dev
, size_t size
,
2815 gfp_t gfp
, bool is_numa_aware
)
2817 if (is_numa_aware
&& dev
->ops
.get_numa_node
)
2818 return kzalloc_node(size
, gfp
, dev
->ops
.get_numa_node(dev
));
2820 return kzalloc(size
, gfp
);
2823 struct ib_client_nl_info
;
2826 int (*add
)(struct ib_device
*ibdev
);
2827 void (*remove
)(struct ib_device
*, void *client_data
);
2828 void (*rename
)(struct ib_device
*dev
, void *client_data
);
2829 int (*get_nl_info
)(struct ib_device
*ibdev
, void *client_data
,
2830 struct ib_client_nl_info
*res
);
2831 int (*get_global_nl_info
)(struct ib_client_nl_info
*res
);
2833 /* Returns the net_dev belonging to this ib_client and matching the
2835 * @dev: An RDMA device that the net_dev use for communication.
2836 * @port: A physical port number on the RDMA device.
2837 * @pkey: P_Key that the net_dev uses if applicable.
2838 * @gid: A GID that the net_dev uses to communicate.
2839 * @addr: An IP address the net_dev is configured with.
2840 * @client_data: The device's client data set by ib_set_client_data().
2842 * An ib_client that implements a net_dev on top of RDMA devices
2843 * (such as IP over IB) should implement this callback, allowing the
2844 * rdma_cm module to find the right net_dev for a given request.
2846 * The caller is responsible for calling dev_put on the returned
2848 struct net_device
*(*get_net_dev_by_params
)(
2849 struct ib_device
*dev
,
2852 const union ib_gid
*gid
,
2853 const struct sockaddr
*addr
,
2857 struct completion uses_zero
;
2860 /* kverbs are not required by the client */
2865 * IB block DMA iterator
2867 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2868 * to a HW supported page size.
2870 struct ib_block_iter
{
2871 /* internal states */
2872 struct scatterlist
*__sg
; /* sg holding the current aligned block */
2873 dma_addr_t __dma_addr
; /* unaligned DMA address of this block */
2874 size_t __sg_numblocks
; /* ib_umem_num_dma_blocks() */
2875 unsigned int __sg_nents
; /* number of SG entries */
2876 unsigned int __sg_advance
; /* number of bytes to advance in sg in next step */
2877 unsigned int __pg_bit
; /* alignment of current block */
2880 struct ib_device
*_ib_alloc_device(size_t size
);
2881 #define ib_alloc_device(drv_struct, member) \
2882 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2883 BUILD_BUG_ON_ZERO(offsetof( \
2884 struct drv_struct, member))), \
2885 struct drv_struct, member)
2887 void ib_dealloc_device(struct ib_device
*device
);
2889 void ib_get_device_fw_str(struct ib_device
*device
, char *str
);
2891 int ib_register_device(struct ib_device
*device
, const char *name
,
2892 struct device
*dma_device
);
2893 void ib_unregister_device(struct ib_device
*device
);
2894 void ib_unregister_driver(enum rdma_driver_id driver_id
);
2895 void ib_unregister_device_and_put(struct ib_device
*device
);
2896 void ib_unregister_device_queued(struct ib_device
*ib_dev
);
2898 int ib_register_client (struct ib_client
*client
);
2899 void ib_unregister_client(struct ib_client
*client
);
2901 void __rdma_block_iter_start(struct ib_block_iter
*biter
,
2902 struct scatterlist
*sglist
,
2904 unsigned long pgsz
);
2905 bool __rdma_block_iter_next(struct ib_block_iter
*biter
);
2908 * rdma_block_iter_dma_address - get the aligned dma address of the current
2909 * block held by the block iterator.
2910 * @biter: block iterator holding the memory block
2912 static inline dma_addr_t
2913 rdma_block_iter_dma_address(struct ib_block_iter
*biter
)
2915 return biter
->__dma_addr
& ~(BIT_ULL(biter
->__pg_bit
) - 1);
2919 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2920 * @sglist: sglist to iterate over
2921 * @biter: block iterator holding the memory block
2922 * @nents: maximum number of sg entries to iterate over
2923 * @pgsz: best HW supported page size to use
2925 * Callers may use rdma_block_iter_dma_address() to get each
2926 * blocks aligned DMA address.
2928 #define rdma_for_each_block(sglist, biter, nents, pgsz) \
2929 for (__rdma_block_iter_start(biter, sglist, nents, \
2931 __rdma_block_iter_next(biter);)
2934 * ib_get_client_data - Get IB client context
2935 * @device:Device to get context for
2936 * @client:Client to get context for
2938 * ib_get_client_data() returns the client context data set with
2939 * ib_set_client_data(). This can only be called while the client is
2940 * registered to the device, once the ib_client remove() callback returns this
2943 static inline void *ib_get_client_data(struct ib_device
*device
,
2944 struct ib_client
*client
)
2946 return xa_load(&device
->client_data
, client
->client_id
);
2948 void ib_set_client_data(struct ib_device
*device
, struct ib_client
*client
,
2950 void ib_set_device_ops(struct ib_device
*device
,
2951 const struct ib_device_ops
*ops
);
2953 int rdma_user_mmap_io(struct ib_ucontext
*ucontext
, struct vm_area_struct
*vma
,
2954 unsigned long pfn
, unsigned long size
, pgprot_t prot
,
2955 struct rdma_user_mmap_entry
*entry
);
2956 int rdma_user_mmap_entry_insert(struct ib_ucontext
*ucontext
,
2957 struct rdma_user_mmap_entry
*entry
,
2959 int rdma_user_mmap_entry_insert_range(struct ib_ucontext
*ucontext
,
2960 struct rdma_user_mmap_entry
*entry
,
2961 size_t length
, u32 min_pgoff
,
2964 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
2965 void rdma_user_mmap_disassociate(struct ib_device
*device
);
2967 static inline void rdma_user_mmap_disassociate(struct ib_device
*device
)
2973 rdma_user_mmap_entry_insert_exact(struct ib_ucontext
*ucontext
,
2974 struct rdma_user_mmap_entry
*entry
,
2975 size_t length
, u32 pgoff
)
2977 return rdma_user_mmap_entry_insert_range(ucontext
, entry
, length
, pgoff
,
2981 struct rdma_user_mmap_entry
*
2982 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext
*ucontext
,
2983 unsigned long pgoff
);
2984 struct rdma_user_mmap_entry
*
2985 rdma_user_mmap_entry_get(struct ib_ucontext
*ucontext
,
2986 struct vm_area_struct
*vma
);
2987 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry
*entry
);
2989 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry
*entry
);
2991 static inline int ib_copy_from_udata(void *dest
, struct ib_udata
*udata
, size_t len
)
2993 return copy_from_user(dest
, udata
->inbuf
, len
) ? -EFAULT
: 0;
2996 static inline int ib_copy_to_udata(struct ib_udata
*udata
, void *src
, size_t len
)
2998 return copy_to_user(udata
->outbuf
, src
, len
) ? -EFAULT
: 0;
3001 static inline bool ib_is_buffer_cleared(const void __user
*p
,
3007 if (len
> USHRT_MAX
)
3010 buf
= memdup_user(p
, len
);
3014 ret
= !memchr_inv(buf
, 0, len
);
3019 static inline bool ib_is_udata_cleared(struct ib_udata
*udata
,
3023 return ib_is_buffer_cleared(udata
->inbuf
+ offset
, len
);
3027 * ib_modify_qp_is_ok - Check that the supplied attribute mask
3028 * contains all required attributes and no attributes not allowed for
3029 * the given QP state transition.
3030 * @cur_state: Current QP state
3031 * @next_state: Next QP state
3033 * @mask: Mask of supplied QP attributes
3035 * This function is a helper function that a low-level driver's
3036 * modify_qp method can use to validate the consumer's input. It
3037 * checks that cur_state and next_state are valid QP states, that a
3038 * transition from cur_state to next_state is allowed by the IB spec,
3039 * and that the attribute mask supplied is allowed for the transition.
3041 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state next_state
,
3042 enum ib_qp_type type
, enum ib_qp_attr_mask mask
);
3044 void ib_register_event_handler(struct ib_event_handler
*event_handler
);
3045 void ib_unregister_event_handler(struct ib_event_handler
*event_handler
);
3046 void ib_dispatch_event(const struct ib_event
*event
);
3048 int ib_query_port(struct ib_device
*device
,
3049 u32 port_num
, struct ib_port_attr
*port_attr
);
3051 enum rdma_link_layer
rdma_port_get_link_layer(struct ib_device
*device
,
3055 * rdma_cap_ib_switch - Check if the device is IB switch
3056 * @device: Device to check
3058 * Device driver is responsible for setting is_switch bit on
3059 * in ib_device structure at init time.
3061 * Return: true if the device is IB switch.
3063 static inline bool rdma_cap_ib_switch(const struct ib_device
*device
)
3065 return device
->is_switch
;
3069 * rdma_start_port - Return the first valid port number for the device
3072 * @device: Device to be checked
3074 * Return start port number
3076 static inline u32
rdma_start_port(const struct ib_device
*device
)
3078 return rdma_cap_ib_switch(device
) ? 0 : 1;
3082 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3083 * @device - The struct ib_device * to iterate over
3084 * @iter - The unsigned int to store the port number
3086 #define rdma_for_each_port(device, iter) \
3087 for (iter = rdma_start_port(device + \
3088 BUILD_BUG_ON_ZERO(!__same_type(u32, \
3090 iter <= rdma_end_port(device); iter++)
3093 * rdma_end_port - Return the last valid port number for the device
3096 * @device: Device to be checked
3098 * Return last port number
3100 static inline u32
rdma_end_port(const struct ib_device
*device
)
3102 return rdma_cap_ib_switch(device
) ? 0 : device
->phys_port_cnt
;
3105 static inline int rdma_is_port_valid(const struct ib_device
*device
,
3108 return (port
>= rdma_start_port(device
) &&
3109 port
<= rdma_end_port(device
));
3112 static inline bool rdma_is_grh_required(const struct ib_device
*device
,
3115 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3116 RDMA_CORE_PORT_IB_GRH_REQUIRED
;
3119 static inline bool rdma_protocol_ib(const struct ib_device
*device
,
3122 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3123 RDMA_CORE_CAP_PROT_IB
;
3126 static inline bool rdma_protocol_roce(const struct ib_device
*device
,
3129 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3130 (RDMA_CORE_CAP_PROT_ROCE
| RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
);
3133 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device
*device
,
3136 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3137 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
;
3140 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device
*device
,
3143 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3144 RDMA_CORE_CAP_PROT_ROCE
;
3147 static inline bool rdma_protocol_iwarp(const struct ib_device
*device
,
3150 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3151 RDMA_CORE_CAP_PROT_IWARP
;
3154 static inline bool rdma_ib_or_roce(const struct ib_device
*device
,
3157 return rdma_protocol_ib(device
, port_num
) ||
3158 rdma_protocol_roce(device
, port_num
);
3161 static inline bool rdma_protocol_raw_packet(const struct ib_device
*device
,
3164 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3165 RDMA_CORE_CAP_PROT_RAW_PACKET
;
3168 static inline bool rdma_protocol_usnic(const struct ib_device
*device
,
3171 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3172 RDMA_CORE_CAP_PROT_USNIC
;
3176 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3177 * Management Datagrams.
3178 * @device: Device to check
3179 * @port_num: Port number to check
3181 * Management Datagrams (MAD) are a required part of the InfiniBand
3182 * specification and are supported on all InfiniBand devices. A slightly
3183 * extended version are also supported on OPA interfaces.
3185 * Return: true if the port supports sending/receiving of MAD packets.
3187 static inline bool rdma_cap_ib_mad(const struct ib_device
*device
, u32 port_num
)
3189 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3190 RDMA_CORE_CAP_IB_MAD
;
3194 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3195 * Management Datagrams.
3196 * @device: Device to check
3197 * @port_num: Port number to check
3199 * Intel OmniPath devices extend and/or replace the InfiniBand Management
3200 * datagrams with their own versions. These OPA MADs share many but not all of
3201 * the characteristics of InfiniBand MADs.
3203 * OPA MADs differ in the following ways:
3205 * 1) MADs are variable size up to 2K
3206 * IBTA defined MADs remain fixed at 256 bytes
3207 * 2) OPA SMPs must carry valid PKeys
3208 * 3) OPA SMP packets are a different format
3210 * Return: true if the port supports OPA MAD packet formats.
3212 static inline bool rdma_cap_opa_mad(struct ib_device
*device
, u32 port_num
)
3214 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3215 RDMA_CORE_CAP_OPA_MAD
;
3219 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3220 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3221 * @device: Device to check
3222 * @port_num: Port number to check
3224 * Each InfiniBand node is required to provide a Subnet Management Agent
3225 * that the subnet manager can access. Prior to the fabric being fully
3226 * configured by the subnet manager, the SMA is accessed via a well known
3227 * interface called the Subnet Management Interface (SMI). This interface
3228 * uses directed route packets to communicate with the SM to get around the
3229 * chicken and egg problem of the SM needing to know what's on the fabric
3230 * in order to configure the fabric, and needing to configure the fabric in
3231 * order to send packets to the devices on the fabric. These directed
3232 * route packets do not need the fabric fully configured in order to reach
3233 * their destination. The SMI is the only method allowed to send
3234 * directed route packets on an InfiniBand fabric.
3236 * Return: true if the port provides an SMI.
3238 static inline bool rdma_cap_ib_smi(const struct ib_device
*device
, u32 port_num
)
3240 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3241 RDMA_CORE_CAP_IB_SMI
;
3245 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3246 * Communication Manager.
3247 * @device: Device to check
3248 * @port_num: Port number to check
3250 * The InfiniBand Communication Manager is one of many pre-defined General
3251 * Service Agents (GSA) that are accessed via the General Service
3252 * Interface (GSI). It's role is to facilitate establishment of connections
3253 * between nodes as well as other management related tasks for established
3256 * Return: true if the port supports an IB CM (this does not guarantee that
3257 * a CM is actually running however).
3259 static inline bool rdma_cap_ib_cm(const struct ib_device
*device
, u32 port_num
)
3261 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3262 RDMA_CORE_CAP_IB_CM
;
3266 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3267 * Communication Manager.
3268 * @device: Device to check
3269 * @port_num: Port number to check
3271 * Similar to above, but specific to iWARP connections which have a different
3272 * managment protocol than InfiniBand.
3274 * Return: true if the port supports an iWARP CM (this does not guarantee that
3275 * a CM is actually running however).
3277 static inline bool rdma_cap_iw_cm(const struct ib_device
*device
, u32 port_num
)
3279 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3280 RDMA_CORE_CAP_IW_CM
;
3284 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3285 * Subnet Administration.
3286 * @device: Device to check
3287 * @port_num: Port number to check
3289 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3290 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
3291 * fabrics, devices should resolve routes to other hosts by contacting the
3292 * SA to query the proper route.
3294 * Return: true if the port should act as a client to the fabric Subnet
3295 * Administration interface. This does not imply that the SA service is
3298 static inline bool rdma_cap_ib_sa(const struct ib_device
*device
, u32 port_num
)
3300 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3301 RDMA_CORE_CAP_IB_SA
;
3305 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3307 * @device: Device to check
3308 * @port_num: Port number to check
3310 * InfiniBand multicast registration is more complex than normal IPv4 or
3311 * IPv6 multicast registration. Each Host Channel Adapter must register
3312 * with the Subnet Manager when it wishes to join a multicast group. It
3313 * should do so only once regardless of how many queue pairs it subscribes
3314 * to this group. And it should leave the group only after all queue pairs
3315 * attached to the group have been detached.
3317 * Return: true if the port must undertake the additional adminstrative
3318 * overhead of registering/unregistering with the SM and tracking of the
3319 * total number of queue pairs attached to the multicast group.
3321 static inline bool rdma_cap_ib_mcast(const struct ib_device
*device
,
3324 return rdma_cap_ib_sa(device
, port_num
);
3328 * rdma_cap_af_ib - Check if the port of device has the capability
3329 * Native Infiniband Address.
3330 * @device: Device to check
3331 * @port_num: Port number to check
3333 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3334 * GID. RoCE uses a different mechanism, but still generates a GID via
3335 * a prescribed mechanism and port specific data.
3337 * Return: true if the port uses a GID address to identify devices on the
3340 static inline bool rdma_cap_af_ib(const struct ib_device
*device
, u32 port_num
)
3342 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3343 RDMA_CORE_CAP_AF_IB
;
3347 * rdma_cap_eth_ah - Check if the port of device has the capability
3348 * Ethernet Address Handle.
3349 * @device: Device to check
3350 * @port_num: Port number to check
3352 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3353 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3354 * port. Normally, packet headers are generated by the sending host
3355 * adapter, but when sending connectionless datagrams, we must manually
3356 * inject the proper headers for the fabric we are communicating over.
3358 * Return: true if we are running as a RoCE port and must force the
3359 * addition of a Global Route Header built from our Ethernet Address
3360 * Handle into our header list for connectionless packets.
3362 static inline bool rdma_cap_eth_ah(const struct ib_device
*device
, u32 port_num
)
3364 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3365 RDMA_CORE_CAP_ETH_AH
;
3369 * rdma_cap_opa_ah - Check if the port of device supports
3370 * OPA Address handles
3371 * @device: Device to check
3372 * @port_num: Port number to check
3374 * Return: true if we are running on an OPA device which supports
3375 * the extended OPA addressing.
3377 static inline bool rdma_cap_opa_ah(struct ib_device
*device
, u32 port_num
)
3379 return (device
->port_data
[port_num
].immutable
.core_cap_flags
&
3380 RDMA_CORE_CAP_OPA_AH
) == RDMA_CORE_CAP_OPA_AH
;
3384 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3387 * @port_num: Port number
3389 * This MAD size includes the MAD headers and MAD payload. No other headers
3392 * Return the max MAD size required by the Port. Will return 0 if the port
3393 * does not support MADs
3395 static inline size_t rdma_max_mad_size(const struct ib_device
*device
,
3398 return device
->port_data
[port_num
].immutable
.max_mad_size
;
3402 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3403 * @device: Device to check
3404 * @port_num: Port number to check
3406 * RoCE GID table mechanism manages the various GIDs for a device.
3408 * NOTE: if allocating the port's GID table has failed, this call will still
3409 * return true, but any RoCE GID table API will fail.
3411 * Return: true if the port uses RoCE GID table mechanism in order to manage
3414 static inline bool rdma_cap_roce_gid_table(const struct ib_device
*device
,
3417 return rdma_protocol_roce(device
, port_num
) &&
3418 device
->ops
.add_gid
&& device
->ops
.del_gid
;
3422 * Check if the device supports READ W/ INVALIDATE.
3424 static inline bool rdma_cap_read_inv(struct ib_device
*dev
, u32 port_num
)
3427 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
3428 * has support for it yet.
3430 return rdma_protocol_iwarp(dev
, port_num
);
3434 * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3436 * @port_num: 1 based Port number
3438 * Return true if port is an Intel OPA port , false if not
3440 static inline bool rdma_core_cap_opa_port(struct ib_device
*device
,
3443 return (device
->port_data
[port_num
].immutable
.core_cap_flags
&
3444 RDMA_CORE_PORT_INTEL_OPA
) == RDMA_CORE_PORT_INTEL_OPA
;
3448 * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3450 * @port_num: Port number
3451 * @mtu: enum value of MTU
3453 * Return the MTU size supported by the port as an integer value. Will return
3454 * -1 if enum value of mtu is not supported.
3456 static inline int rdma_mtu_enum_to_int(struct ib_device
*device
, u32 port
,
3459 if (rdma_core_cap_opa_port(device
, port
))
3460 return opa_mtu_enum_to_int((enum opa_mtu
)mtu
);
3462 return ib_mtu_enum_to_int((enum ib_mtu
)mtu
);
3466 * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3468 * @port_num: Port number
3469 * @attr: port attribute
3471 * Return the MTU size supported by the port as an integer value.
3473 static inline int rdma_mtu_from_attr(struct ib_device
*device
, u32 port
,
3474 struct ib_port_attr
*attr
)
3476 if (rdma_core_cap_opa_port(device
, port
))
3477 return attr
->phys_mtu
;
3479 return ib_mtu_enum_to_int(attr
->max_mtu
);
3482 int ib_set_vf_link_state(struct ib_device
*device
, int vf
, u32 port
,
3484 int ib_get_vf_config(struct ib_device
*device
, int vf
, u32 port
,
3485 struct ifla_vf_info
*info
);
3486 int ib_get_vf_stats(struct ib_device
*device
, int vf
, u32 port
,
3487 struct ifla_vf_stats
*stats
);
3488 int ib_get_vf_guid(struct ib_device
*device
, int vf
, u32 port
,
3489 struct ifla_vf_guid
*node_guid
,
3490 struct ifla_vf_guid
*port_guid
);
3491 int ib_set_vf_guid(struct ib_device
*device
, int vf
, u32 port
, u64 guid
,
3494 int ib_query_pkey(struct ib_device
*device
,
3495 u32 port_num
, u16 index
, u16
*pkey
);
3497 int ib_modify_device(struct ib_device
*device
,
3498 int device_modify_mask
,
3499 struct ib_device_modify
*device_modify
);
3501 int ib_modify_port(struct ib_device
*device
,
3502 u32 port_num
, int port_modify_mask
,
3503 struct ib_port_modify
*port_modify
);
3505 int ib_find_gid(struct ib_device
*device
, union ib_gid
*gid
,
3506 u32
*port_num
, u16
*index
);
3508 int ib_find_pkey(struct ib_device
*device
,
3509 u32 port_num
, u16 pkey
, u16
*index
);
3513 * Create a memory registration for all memory in the system and place
3514 * the rkey for it into pd->unsafe_global_rkey. This can be used by
3515 * ULPs to avoid the overhead of dynamic MRs.
3517 * This flag is generally considered unsafe and must only be used in
3518 * extremly trusted environments. Every use of it will log a warning
3519 * in the kernel log.
3521 IB_PD_UNSAFE_GLOBAL_RKEY
= 0x01,
3524 struct ib_pd
*__ib_alloc_pd(struct ib_device
*device
, unsigned int flags
,
3525 const char *caller
);
3528 * ib_alloc_pd - Allocates an unused protection domain.
3529 * @device: The device on which to allocate the protection domain.
3530 * @flags: protection domain flags
3532 * A protection domain object provides an association between QPs, shared
3533 * receive queues, address handles, memory regions, and memory windows.
3535 * Every PD has a local_dma_lkey which can be used as the lkey value for local
3536 * memory operations.
3538 #define ib_alloc_pd(device, flags) \
3539 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3541 int ib_dealloc_pd_user(struct ib_pd
*pd
, struct ib_udata
*udata
);
3544 * ib_dealloc_pd - Deallocate kernel PD
3545 * @pd: The protection domain
3547 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3549 static inline void ib_dealloc_pd(struct ib_pd
*pd
)
3551 int ret
= ib_dealloc_pd_user(pd
, NULL
);
3553 WARN_ONCE(ret
, "Destroy of kernel PD shouldn't fail");
3556 enum rdma_create_ah_flags
{
3557 /* In a sleepable context */
3558 RDMA_CREATE_AH_SLEEPABLE
= BIT(0),
3562 * rdma_create_ah - Creates an address handle for the given address vector.
3563 * @pd: The protection domain associated with the address handle.
3564 * @ah_attr: The attributes of the address vector.
3565 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3567 * The address handle is used to reference a local or global destination
3568 * in all UD QP post sends.
3570 struct ib_ah
*rdma_create_ah(struct ib_pd
*pd
, struct rdma_ah_attr
*ah_attr
,
3574 * rdma_create_user_ah - Creates an address handle for the given address vector.
3575 * It resolves destination mac address for ah attribute of RoCE type.
3576 * @pd: The protection domain associated with the address handle.
3577 * @ah_attr: The attributes of the address vector.
3578 * @udata: pointer to user's input output buffer information need by
3581 * It returns 0 on success and returns appropriate error code on error.
3582 * The address handle is used to reference a local or global destination
3583 * in all UD QP post sends.
3585 struct ib_ah
*rdma_create_user_ah(struct ib_pd
*pd
,
3586 struct rdma_ah_attr
*ah_attr
,
3587 struct ib_udata
*udata
);
3589 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3591 * @hdr: the L3 header to parse
3592 * @net_type: type of header to parse
3593 * @sgid: place to store source gid
3594 * @dgid: place to store destination gid
3596 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr
*hdr
,
3597 enum rdma_network_type net_type
,
3598 union ib_gid
*sgid
, union ib_gid
*dgid
);
3601 * ib_get_rdma_header_version - Get the header version
3602 * @hdr: the L3 header to parse
3604 int ib_get_rdma_header_version(const union rdma_network_hdr
*hdr
);
3607 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3609 * @device: Device on which the received message arrived.
3610 * @port_num: Port on which the received message arrived.
3611 * @wc: Work completion associated with the received message.
3612 * @grh: References the received global route header. This parameter is
3613 * ignored unless the work completion indicates that the GRH is valid.
3614 * @ah_attr: Returned attributes that can be used when creating an address
3615 * handle for replying to the message.
3616 * When ib_init_ah_attr_from_wc() returns success,
3617 * (a) for IB link layer it optionally contains a reference to SGID attribute
3618 * when GRH is present for IB link layer.
3619 * (b) for RoCE link layer it contains a reference to SGID attribute.
3620 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3621 * attributes which are initialized using ib_init_ah_attr_from_wc().
3624 int ib_init_ah_attr_from_wc(struct ib_device
*device
, u32 port_num
,
3625 const struct ib_wc
*wc
, const struct ib_grh
*grh
,
3626 struct rdma_ah_attr
*ah_attr
);
3629 * ib_create_ah_from_wc - Creates an address handle associated with the
3630 * sender of the specified work completion.
3631 * @pd: The protection domain associated with the address handle.
3632 * @wc: Work completion information associated with a received message.
3633 * @grh: References the received global route header. This parameter is
3634 * ignored unless the work completion indicates that the GRH is valid.
3635 * @port_num: The outbound port number to associate with the address.
3637 * The address handle is used to reference a local or global destination
3638 * in all UD QP post sends.
3640 struct ib_ah
*ib_create_ah_from_wc(struct ib_pd
*pd
, const struct ib_wc
*wc
,
3641 const struct ib_grh
*grh
, u32 port_num
);
3644 * rdma_modify_ah - Modifies the address vector associated with an address
3646 * @ah: The address handle to modify.
3647 * @ah_attr: The new address vector attributes to associate with the
3650 int rdma_modify_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
3653 * rdma_query_ah - Queries the address vector associated with an address
3655 * @ah: The address handle to query.
3656 * @ah_attr: The address vector attributes associated with the address
3659 int rdma_query_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
3661 enum rdma_destroy_ah_flags
{
3662 /* In a sleepable context */
3663 RDMA_DESTROY_AH_SLEEPABLE
= BIT(0),
3667 * rdma_destroy_ah_user - Destroys an address handle.
3668 * @ah: The address handle to destroy.
3669 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3670 * @udata: Valid user data or NULL for kernel objects
3672 int rdma_destroy_ah_user(struct ib_ah
*ah
, u32 flags
, struct ib_udata
*udata
);
3675 * rdma_destroy_ah - Destroys an kernel address handle.
3676 * @ah: The address handle to destroy.
3677 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3679 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3681 static inline void rdma_destroy_ah(struct ib_ah
*ah
, u32 flags
)
3683 int ret
= rdma_destroy_ah_user(ah
, flags
, NULL
);
3685 WARN_ONCE(ret
, "Destroy of kernel AH shouldn't fail");
3688 struct ib_srq
*ib_create_srq_user(struct ib_pd
*pd
,
3689 struct ib_srq_init_attr
*srq_init_attr
,
3690 struct ib_usrq_object
*uobject
,
3691 struct ib_udata
*udata
);
3692 static inline struct ib_srq
*
3693 ib_create_srq(struct ib_pd
*pd
, struct ib_srq_init_attr
*srq_init_attr
)
3695 if (!pd
->device
->ops
.create_srq
)
3696 return ERR_PTR(-EOPNOTSUPP
);
3698 return ib_create_srq_user(pd
, srq_init_attr
, NULL
, NULL
);
3702 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3703 * @srq: The SRQ to modify.
3704 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
3705 * the current values of selected SRQ attributes are returned.
3706 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3707 * are being modified.
3709 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3710 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3711 * the number of receives queued drops below the limit.
3713 int ib_modify_srq(struct ib_srq
*srq
,
3714 struct ib_srq_attr
*srq_attr
,
3715 enum ib_srq_attr_mask srq_attr_mask
);
3718 * ib_query_srq - Returns the attribute list and current values for the
3720 * @srq: The SRQ to query.
3721 * @srq_attr: The attributes of the specified SRQ.
3723 int ib_query_srq(struct ib_srq
*srq
,
3724 struct ib_srq_attr
*srq_attr
);
3727 * ib_destroy_srq_user - Destroys the specified SRQ.
3728 * @srq: The SRQ to destroy.
3729 * @udata: Valid user data or NULL for kernel objects
3731 int ib_destroy_srq_user(struct ib_srq
*srq
, struct ib_udata
*udata
);
3734 * ib_destroy_srq - Destroys the specified kernel SRQ.
3735 * @srq: The SRQ to destroy.
3737 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3739 static inline void ib_destroy_srq(struct ib_srq
*srq
)
3741 int ret
= ib_destroy_srq_user(srq
, NULL
);
3743 WARN_ONCE(ret
, "Destroy of kernel SRQ shouldn't fail");
3747 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3748 * @srq: The SRQ to post the work request on.
3749 * @recv_wr: A list of work requests to post on the receive queue.
3750 * @bad_recv_wr: On an immediate failure, this parameter will reference
3751 * the work request that failed to be posted on the QP.
3753 static inline int ib_post_srq_recv(struct ib_srq
*srq
,
3754 const struct ib_recv_wr
*recv_wr
,
3755 const struct ib_recv_wr
**bad_recv_wr
)
3757 const struct ib_recv_wr
*dummy
;
3759 return srq
->device
->ops
.post_srq_recv(srq
, recv_wr
,
3760 bad_recv_wr
? : &dummy
);
3763 struct ib_qp
*ib_create_qp_kernel(struct ib_pd
*pd
,
3764 struct ib_qp_init_attr
*qp_init_attr
,
3765 const char *caller
);
3767 * ib_create_qp - Creates a kernel QP associated with the specific protection
3769 * @pd: The protection domain associated with the QP.
3770 * @init_attr: A list of initial attributes required to create the
3771 * QP. If QP creation succeeds, then the attributes are updated to
3772 * the actual capabilities of the created QP.
3774 static inline struct ib_qp
*ib_create_qp(struct ib_pd
*pd
,
3775 struct ib_qp_init_attr
*init_attr
)
3777 return ib_create_qp_kernel(pd
, init_attr
, KBUILD_MODNAME
);
3781 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3782 * @qp: The QP to modify.
3783 * @attr: On input, specifies the QP attributes to modify. On output,
3784 * the current values of selected QP attributes are returned.
3785 * @attr_mask: A bit-mask used to specify which attributes of the QP
3786 * are being modified.
3787 * @udata: pointer to user's input output buffer information
3788 * are being modified.
3789 * It returns 0 on success and returns appropriate error code on error.
3791 int ib_modify_qp_with_udata(struct ib_qp
*qp
,
3792 struct ib_qp_attr
*attr
,
3794 struct ib_udata
*udata
);
3797 * ib_modify_qp - Modifies the attributes for the specified QP and then
3798 * transitions the QP to the given state.
3799 * @qp: The QP to modify.
3800 * @qp_attr: On input, specifies the QP attributes to modify. On output,
3801 * the current values of selected QP attributes are returned.
3802 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3803 * are being modified.
3805 int ib_modify_qp(struct ib_qp
*qp
,
3806 struct ib_qp_attr
*qp_attr
,
3810 * ib_query_qp - Returns the attribute list and current values for the
3812 * @qp: The QP to query.
3813 * @qp_attr: The attributes of the specified QP.
3814 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3815 * @qp_init_attr: Additional attributes of the selected QP.
3817 * The qp_attr_mask may be used to limit the query to gathering only the
3818 * selected attributes.
3820 int ib_query_qp(struct ib_qp
*qp
,
3821 struct ib_qp_attr
*qp_attr
,
3823 struct ib_qp_init_attr
*qp_init_attr
);
3826 * ib_destroy_qp - Destroys the specified QP.
3827 * @qp: The QP to destroy.
3828 * @udata: Valid udata or NULL for kernel objects
3830 int ib_destroy_qp_user(struct ib_qp
*qp
, struct ib_udata
*udata
);
3833 * ib_destroy_qp - Destroys the specified kernel QP.
3834 * @qp: The QP to destroy.
3836 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3838 static inline int ib_destroy_qp(struct ib_qp
*qp
)
3840 return ib_destroy_qp_user(qp
, NULL
);
3844 * ib_open_qp - Obtain a reference to an existing sharable QP.
3845 * @xrcd - XRC domain
3846 * @qp_open_attr: Attributes identifying the QP to open.
3848 * Returns a reference to a sharable QP.
3850 struct ib_qp
*ib_open_qp(struct ib_xrcd
*xrcd
,
3851 struct ib_qp_open_attr
*qp_open_attr
);
3854 * ib_close_qp - Release an external reference to a QP.
3855 * @qp: The QP handle to release
3857 * The opened QP handle is released by the caller. The underlying
3858 * shared QP is not destroyed until all internal references are released.
3860 int ib_close_qp(struct ib_qp
*qp
);
3863 * ib_post_send - Posts a list of work requests to the send queue of
3865 * @qp: The QP to post the work request on.
3866 * @send_wr: A list of work requests to post on the send queue.
3867 * @bad_send_wr: On an immediate failure, this parameter will reference
3868 * the work request that failed to be posted on the QP.
3870 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3871 * error is returned, the QP state shall not be affected,
3872 * ib_post_send() will return an immediate error after queueing any
3873 * earlier work requests in the list.
3875 static inline int ib_post_send(struct ib_qp
*qp
,
3876 const struct ib_send_wr
*send_wr
,
3877 const struct ib_send_wr
**bad_send_wr
)
3879 const struct ib_send_wr
*dummy
;
3881 return qp
->device
->ops
.post_send(qp
, send_wr
, bad_send_wr
? : &dummy
);
3885 * ib_post_recv - Posts a list of work requests to the receive queue of
3887 * @qp: The QP to post the work request on.
3888 * @recv_wr: A list of work requests to post on the receive queue.
3889 * @bad_recv_wr: On an immediate failure, this parameter will reference
3890 * the work request that failed to be posted on the QP.
3892 static inline int ib_post_recv(struct ib_qp
*qp
,
3893 const struct ib_recv_wr
*recv_wr
,
3894 const struct ib_recv_wr
**bad_recv_wr
)
3896 const struct ib_recv_wr
*dummy
;
3898 return qp
->device
->ops
.post_recv(qp
, recv_wr
, bad_recv_wr
? : &dummy
);
3901 struct ib_cq
*__ib_alloc_cq(struct ib_device
*dev
, void *private, int nr_cqe
,
3902 int comp_vector
, enum ib_poll_context poll_ctx
,
3903 const char *caller
);
3904 static inline struct ib_cq
*ib_alloc_cq(struct ib_device
*dev
, void *private,
3905 int nr_cqe
, int comp_vector
,
3906 enum ib_poll_context poll_ctx
)
3908 return __ib_alloc_cq(dev
, private, nr_cqe
, comp_vector
, poll_ctx
,
3912 struct ib_cq
*__ib_alloc_cq_any(struct ib_device
*dev
, void *private,
3913 int nr_cqe
, enum ib_poll_context poll_ctx
,
3914 const char *caller
);
3917 * ib_alloc_cq_any: Allocate kernel CQ
3918 * @dev: The IB device
3919 * @private: Private data attached to the CQE
3920 * @nr_cqe: Number of CQEs in the CQ
3921 * @poll_ctx: Context used for polling the CQ
3923 static inline struct ib_cq
*ib_alloc_cq_any(struct ib_device
*dev
,
3924 void *private, int nr_cqe
,
3925 enum ib_poll_context poll_ctx
)
3927 return __ib_alloc_cq_any(dev
, private, nr_cqe
, poll_ctx
,
3931 void ib_free_cq(struct ib_cq
*cq
);
3932 int ib_process_cq_direct(struct ib_cq
*cq
, int budget
);
3935 * ib_create_cq - Creates a CQ on the specified device.
3936 * @device: The device on which to create the CQ.
3937 * @comp_handler: A user-specified callback that is invoked when a
3938 * completion event occurs on the CQ.
3939 * @event_handler: A user-specified callback that is invoked when an
3940 * asynchronous event not associated with a completion occurs on the CQ.
3941 * @cq_context: Context associated with the CQ returned to the user via
3942 * the associated completion and event handlers.
3943 * @cq_attr: The attributes the CQ should be created upon.
3945 * Users can examine the cq structure to determine the actual CQ size.
3947 struct ib_cq
*__ib_create_cq(struct ib_device
*device
,
3948 ib_comp_handler comp_handler
,
3949 void (*event_handler
)(struct ib_event
*, void *),
3951 const struct ib_cq_init_attr
*cq_attr
,
3952 const char *caller
);
3953 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3954 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3957 * ib_resize_cq - Modifies the capacity of the CQ.
3958 * @cq: The CQ to resize.
3959 * @cqe: The minimum size of the CQ.
3961 * Users can examine the cq structure to determine the actual CQ size.
3963 int ib_resize_cq(struct ib_cq
*cq
, int cqe
);
3966 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3967 * @cq: The CQ to modify.
3968 * @cq_count: number of CQEs that will trigger an event
3969 * @cq_period: max period of time in usec before triggering an event
3972 int rdma_set_cq_moderation(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
);
3975 * ib_destroy_cq_user - Destroys the specified CQ.
3976 * @cq: The CQ to destroy.
3977 * @udata: Valid user data or NULL for kernel objects
3979 int ib_destroy_cq_user(struct ib_cq
*cq
, struct ib_udata
*udata
);
3982 * ib_destroy_cq - Destroys the specified kernel CQ.
3983 * @cq: The CQ to destroy.
3985 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3987 static inline void ib_destroy_cq(struct ib_cq
*cq
)
3989 int ret
= ib_destroy_cq_user(cq
, NULL
);
3991 WARN_ONCE(ret
, "Destroy of kernel CQ shouldn't fail");
3995 * ib_poll_cq - poll a CQ for completion(s)
3996 * @cq:the CQ being polled
3997 * @num_entries:maximum number of completions to return
3998 * @wc:array of at least @num_entries &struct ib_wc where completions
4001 * Poll a CQ for (possibly multiple) completions. If the return value
4002 * is < 0, an error occurred. If the return value is >= 0, it is the
4003 * number of completions returned. If the return value is
4004 * non-negative and < num_entries, then the CQ was emptied.
4006 static inline int ib_poll_cq(struct ib_cq
*cq
, int num_entries
,
4009 return cq
->device
->ops
.poll_cq(cq
, num_entries
, wc
);
4013 * ib_req_notify_cq - Request completion notification on a CQ.
4014 * @cq: The CQ to generate an event for.
4016 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
4017 * to request an event on the next solicited event or next work
4018 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
4019 * may also be |ed in to request a hint about missed events, as
4023 * < 0 means an error occurred while requesting notification
4024 * == 0 means notification was requested successfully, and if
4025 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
4026 * were missed and it is safe to wait for another event. In
4027 * this case is it guaranteed that any work completions added
4028 * to the CQ since the last CQ poll will trigger a completion
4029 * notification event.
4030 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
4031 * in. It means that the consumer must poll the CQ again to
4032 * make sure it is empty to avoid missing an event because of a
4033 * race between requesting notification and an entry being
4034 * added to the CQ. This return value means it is possible
4035 * (but not guaranteed) that a work completion has been added
4036 * to the CQ since the last poll without triggering a
4037 * completion notification event.
4039 static inline int ib_req_notify_cq(struct ib_cq
*cq
,
4040 enum ib_cq_notify_flags flags
)
4042 return cq
->device
->ops
.req_notify_cq(cq
, flags
);
4045 struct ib_cq
*ib_cq_pool_get(struct ib_device
*dev
, unsigned int nr_cqe
,
4046 int comp_vector_hint
,
4047 enum ib_poll_context poll_ctx
);
4049 void ib_cq_pool_put(struct ib_cq
*cq
, unsigned int nr_cqe
);
4052 * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
4053 * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
4054 * address into the dma address.
4056 static inline bool ib_uses_virt_dma(struct ib_device
*dev
)
4058 return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA
) && !dev
->dma_device
;
4062 * Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
4064 static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device
*dev
)
4066 if (ib_uses_virt_dma(dev
))
4069 return dma_pci_p2pdma_supported(dev
->dma_device
);
4073 * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
4074 * @dma_addr: The DMA address
4076 * Used by ib_uses_virt_dma() devices to get back to the kernel pointer after
4077 * going through the dma_addr marshalling.
4079 static inline void *ib_virt_dma_to_ptr(u64 dma_addr
)
4081 /* virt_dma mode maps the kvs's directly into the dma addr */
4082 return (void *)(uintptr_t)dma_addr
;
4086 * ib_virt_dma_to_page - Convert a dma_addr to a struct page
4087 * @dma_addr: The DMA address
4089 * Used by ib_uses_virt_dma() device to get back to the struct page after going
4090 * through the dma_addr marshalling.
4092 static inline struct page
*ib_virt_dma_to_page(u64 dma_addr
)
4094 return virt_to_page(ib_virt_dma_to_ptr(dma_addr
));
4098 * ib_dma_mapping_error - check a DMA addr for error
4099 * @dev: The device for which the dma_addr was created
4100 * @dma_addr: The DMA address to check
4102 static inline int ib_dma_mapping_error(struct ib_device
*dev
, u64 dma_addr
)
4104 if (ib_uses_virt_dma(dev
))
4106 return dma_mapping_error(dev
->dma_device
, dma_addr
);
4110 * ib_dma_map_single - Map a kernel virtual address to DMA address
4111 * @dev: The device for which the dma_addr is to be created
4112 * @cpu_addr: The kernel virtual address
4113 * @size: The size of the region in bytes
4114 * @direction: The direction of the DMA
4116 static inline u64
ib_dma_map_single(struct ib_device
*dev
,
4117 void *cpu_addr
, size_t size
,
4118 enum dma_data_direction direction
)
4120 if (ib_uses_virt_dma(dev
))
4121 return (uintptr_t)cpu_addr
;
4122 return dma_map_single(dev
->dma_device
, cpu_addr
, size
, direction
);
4126 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
4127 * @dev: The device for which the DMA address was created
4128 * @addr: The DMA address
4129 * @size: The size of the region in bytes
4130 * @direction: The direction of the DMA
4132 static inline void ib_dma_unmap_single(struct ib_device
*dev
,
4133 u64 addr
, size_t size
,
4134 enum dma_data_direction direction
)
4136 if (!ib_uses_virt_dma(dev
))
4137 dma_unmap_single(dev
->dma_device
, addr
, size
, direction
);
4141 * ib_dma_map_page - Map a physical page to DMA address
4142 * @dev: The device for which the dma_addr is to be created
4143 * @page: The page to be mapped
4144 * @offset: The offset within the page
4145 * @size: The size of the region in bytes
4146 * @direction: The direction of the DMA
4148 static inline u64
ib_dma_map_page(struct ib_device
*dev
,
4150 unsigned long offset
,
4152 enum dma_data_direction direction
)
4154 if (ib_uses_virt_dma(dev
))
4155 return (uintptr_t)(page_address(page
) + offset
);
4156 return dma_map_page(dev
->dma_device
, page
, offset
, size
, direction
);
4160 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4161 * @dev: The device for which the DMA address was created
4162 * @addr: The DMA address
4163 * @size: The size of the region in bytes
4164 * @direction: The direction of the DMA
4166 static inline void ib_dma_unmap_page(struct ib_device
*dev
,
4167 u64 addr
, size_t size
,
4168 enum dma_data_direction direction
)
4170 if (!ib_uses_virt_dma(dev
))
4171 dma_unmap_page(dev
->dma_device
, addr
, size
, direction
);
4174 int ib_dma_virt_map_sg(struct ib_device
*dev
, struct scatterlist
*sg
, int nents
);
4175 static inline int ib_dma_map_sg_attrs(struct ib_device
*dev
,
4176 struct scatterlist
*sg
, int nents
,
4177 enum dma_data_direction direction
,
4178 unsigned long dma_attrs
)
4180 if (ib_uses_virt_dma(dev
))
4181 return ib_dma_virt_map_sg(dev
, sg
, nents
);
4182 return dma_map_sg_attrs(dev
->dma_device
, sg
, nents
, direction
,
4186 static inline void ib_dma_unmap_sg_attrs(struct ib_device
*dev
,
4187 struct scatterlist
*sg
, int nents
,
4188 enum dma_data_direction direction
,
4189 unsigned long dma_attrs
)
4191 if (!ib_uses_virt_dma(dev
))
4192 dma_unmap_sg_attrs(dev
->dma_device
, sg
, nents
, direction
,
4197 * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
4198 * @dev: The device for which the DMA addresses are to be created
4199 * @sg: The sg_table object describing the buffer
4200 * @direction: The direction of the DMA
4201 * @attrs: Optional DMA attributes for the map operation
4203 static inline int ib_dma_map_sgtable_attrs(struct ib_device
*dev
,
4204 struct sg_table
*sgt
,
4205 enum dma_data_direction direction
,
4206 unsigned long dma_attrs
)
4210 if (ib_uses_virt_dma(dev
)) {
4211 nents
= ib_dma_virt_map_sg(dev
, sgt
->sgl
, sgt
->orig_nents
);
4217 return dma_map_sgtable(dev
->dma_device
, sgt
, direction
, dma_attrs
);
4220 static inline void ib_dma_unmap_sgtable_attrs(struct ib_device
*dev
,
4221 struct sg_table
*sgt
,
4222 enum dma_data_direction direction
,
4223 unsigned long dma_attrs
)
4225 if (!ib_uses_virt_dma(dev
))
4226 dma_unmap_sgtable(dev
->dma_device
, sgt
, direction
, dma_attrs
);
4230 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4231 * @dev: The device for which the DMA addresses are to be created
4232 * @sg: The array of scatter/gather entries
4233 * @nents: The number of scatter/gather entries
4234 * @direction: The direction of the DMA
4236 static inline int ib_dma_map_sg(struct ib_device
*dev
,
4237 struct scatterlist
*sg
, int nents
,
4238 enum dma_data_direction direction
)
4240 return ib_dma_map_sg_attrs(dev
, sg
, nents
, direction
, 0);
4244 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4245 * @dev: The device for which the DMA addresses were created
4246 * @sg: The array of scatter/gather entries
4247 * @nents: The number of scatter/gather entries
4248 * @direction: The direction of the DMA
4250 static inline void ib_dma_unmap_sg(struct ib_device
*dev
,
4251 struct scatterlist
*sg
, int nents
,
4252 enum dma_data_direction direction
)
4254 ib_dma_unmap_sg_attrs(dev
, sg
, nents
, direction
, 0);
4258 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4259 * @dev: The device to query
4261 * The returned value represents a size in bytes.
4263 static inline unsigned int ib_dma_max_seg_size(struct ib_device
*dev
)
4265 if (ib_uses_virt_dma(dev
))
4267 return dma_get_max_seg_size(dev
->dma_device
);
4271 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4272 * @dev: The device for which the DMA address was created
4273 * @addr: The DMA address
4274 * @size: The size of the region in bytes
4275 * @dir: The direction of the DMA
4277 static inline void ib_dma_sync_single_for_cpu(struct ib_device
*dev
,
4280 enum dma_data_direction dir
)
4282 if (!ib_uses_virt_dma(dev
))
4283 dma_sync_single_for_cpu(dev
->dma_device
, addr
, size
, dir
);
4287 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4288 * @dev: The device for which the DMA address was created
4289 * @addr: The DMA address
4290 * @size: The size of the region in bytes
4291 * @dir: The direction of the DMA
4293 static inline void ib_dma_sync_single_for_device(struct ib_device
*dev
,
4296 enum dma_data_direction dir
)
4298 if (!ib_uses_virt_dma(dev
))
4299 dma_sync_single_for_device(dev
->dma_device
, addr
, size
, dir
);
4302 /* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4303 * space. This function should be called when 'current' is the owning MM.
4305 struct ib_mr
*ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
4306 u64 virt_addr
, int mr_access_flags
);
4308 /* ib_advise_mr - give an advice about an address range in a memory region */
4309 int ib_advise_mr(struct ib_pd
*pd
, enum ib_uverbs_advise_mr_advice advice
,
4310 u32 flags
, struct ib_sge
*sg_list
, u32 num_sge
);
4312 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4313 * HCA translation table.
4314 * @mr: The memory region to deregister.
4315 * @udata: Valid user data or NULL for kernel object
4317 * This function can fail, if the memory region has memory windows bound to it.
4319 int ib_dereg_mr_user(struct ib_mr
*mr
, struct ib_udata
*udata
);
4322 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4323 * HCA translation table.
4324 * @mr: The memory region to deregister.
4326 * This function can fail, if the memory region has memory windows bound to it.
4328 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4330 static inline int ib_dereg_mr(struct ib_mr
*mr
)
4332 return ib_dereg_mr_user(mr
, NULL
);
4335 struct ib_mr
*ib_alloc_mr(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
4338 struct ib_mr
*ib_alloc_mr_integrity(struct ib_pd
*pd
,
4339 u32 max_num_data_sg
,
4340 u32 max_num_meta_sg
);
4343 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4345 * @mr - struct ib_mr pointer to be updated.
4346 * @newkey - new key to be used.
4348 static inline void ib_update_fast_reg_key(struct ib_mr
*mr
, u8 newkey
)
4350 mr
->lkey
= (mr
->lkey
& 0xffffff00) | newkey
;
4351 mr
->rkey
= (mr
->rkey
& 0xffffff00) | newkey
;
4355 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4356 * for calculating a new rkey for type 2 memory windows.
4357 * @rkey - the rkey to increment.
4359 static inline u32
ib_inc_rkey(u32 rkey
)
4361 const u32 mask
= 0x000000ff;
4362 return ((rkey
+ 1) & mask
) | (rkey
& ~mask
);
4366 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4367 * @qp: QP to attach to the multicast group. The QP must be type
4369 * @gid: Multicast group GID.
4370 * @lid: Multicast group LID in host byte order.
4372 * In order to send and receive multicast packets, subnet
4373 * administration must have created the multicast group and configured
4374 * the fabric appropriately. The port associated with the specified
4375 * QP must also be a member of the multicast group.
4377 int ib_attach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
);
4380 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4381 * @qp: QP to detach from the multicast group.
4382 * @gid: Multicast group GID.
4383 * @lid: Multicast group LID in host byte order.
4385 int ib_detach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
);
4387 struct ib_xrcd
*ib_alloc_xrcd_user(struct ib_device
*device
,
4388 struct inode
*inode
, struct ib_udata
*udata
);
4389 int ib_dealloc_xrcd_user(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
);
4391 static inline int ib_check_mr_access(struct ib_device
*ib_dev
,
4394 u64 device_cap
= ib_dev
->attrs
.device_cap_flags
;
4397 * Local write permission is required if remote write or
4398 * remote atomic permission is also requested.
4400 if (flags
& (IB_ACCESS_REMOTE_ATOMIC
| IB_ACCESS_REMOTE_WRITE
) &&
4401 !(flags
& IB_ACCESS_LOCAL_WRITE
))
4404 if (flags
& ~IB_ACCESS_SUPPORTED
)
4407 if (flags
& IB_ACCESS_ON_DEMAND
&&
4408 !(ib_dev
->attrs
.kernel_cap_flags
& IBK_ON_DEMAND_PAGING
))
4411 if ((flags
& IB_ACCESS_FLUSH_GLOBAL
&&
4412 !(device_cap
& IB_DEVICE_FLUSH_GLOBAL
)) ||
4413 (flags
& IB_ACCESS_FLUSH_PERSISTENT
&&
4414 !(device_cap
& IB_DEVICE_FLUSH_PERSISTENT
)))
4420 static inline bool ib_access_writable(int access_flags
)
4423 * We have writable memory backing the MR if any of the following
4424 * access flags are set. "Local write" and "remote write" obviously
4425 * require write access. "Remote atomic" can do things like fetch and
4426 * add, which will modify memory, and "MW bind" can change permissions
4427 * by binding a window.
4429 return access_flags
&
4430 (IB_ACCESS_LOCAL_WRITE
| IB_ACCESS_REMOTE_WRITE
|
4431 IB_ACCESS_REMOTE_ATOMIC
| IB_ACCESS_MW_BIND
);
4435 * ib_check_mr_status: lightweight check of MR status.
4436 * This routine may provide status checks on a selected
4437 * ib_mr. first use is for signature status check.
4439 * @mr: A memory region.
4440 * @check_mask: Bitmask of which checks to perform from
4441 * ib_mr_status_check enumeration.
4442 * @mr_status: The container of relevant status checks.
4443 * failed checks will be indicated in the status bitmask
4444 * and the relevant info shall be in the error item.
4446 int ib_check_mr_status(struct ib_mr
*mr
, u32 check_mask
,
4447 struct ib_mr_status
*mr_status
);
4450 * ib_device_try_get: Hold a registration lock
4451 * device: The device to lock
4453 * A device under an active registration lock cannot become unregistered. It
4454 * is only possible to obtain a registration lock on a device that is fully
4455 * registered, otherwise this function returns false.
4457 * The registration lock is only necessary for actions which require the
4458 * device to still be registered. Uses that only require the device pointer to
4459 * be valid should use get_device(&ibdev->dev) to hold the memory.
4462 static inline bool ib_device_try_get(struct ib_device
*dev
)
4464 return refcount_inc_not_zero(&dev
->refcount
);
4467 void ib_device_put(struct ib_device
*device
);
4468 struct ib_device
*ib_device_get_by_netdev(struct net_device
*ndev
,
4469 enum rdma_driver_id driver_id
);
4470 struct ib_device
*ib_device_get_by_name(const char *name
,
4471 enum rdma_driver_id driver_id
);
4472 struct net_device
*ib_get_net_dev_by_params(struct ib_device
*dev
, u32 port
,
4473 u16 pkey
, const union ib_gid
*gid
,
4474 const struct sockaddr
*addr
);
4475 int ib_device_set_netdev(struct ib_device
*ib_dev
, struct net_device
*ndev
,
4477 struct net_device
*ib_device_get_netdev(struct ib_device
*ib_dev
,
4479 int ib_query_netdev_port(struct ib_device
*ibdev
, struct net_device
*ndev
,
4482 static inline enum ib_port_state
ib_get_curr_port_state(struct net_device
*net_dev
)
4484 return (netif_running(net_dev
) && netif_carrier_ok(net_dev
)) ?
4485 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
4488 void ib_dispatch_port_state_event(struct ib_device
*ibdev
,
4489 struct net_device
*ndev
);
4490 struct ib_wq
*ib_create_wq(struct ib_pd
*pd
,
4491 struct ib_wq_init_attr
*init_attr
);
4492 int ib_destroy_wq_user(struct ib_wq
*wq
, struct ib_udata
*udata
);
4494 int ib_map_mr_sg(struct ib_mr
*mr
, struct scatterlist
*sg
, int sg_nents
,
4495 unsigned int *sg_offset
, unsigned int page_size
);
4496 int ib_map_mr_sg_pi(struct ib_mr
*mr
, struct scatterlist
*data_sg
,
4497 int data_sg_nents
, unsigned int *data_sg_offset
,
4498 struct scatterlist
*meta_sg
, int meta_sg_nents
,
4499 unsigned int *meta_sg_offset
, unsigned int page_size
);
4502 ib_map_mr_sg_zbva(struct ib_mr
*mr
, struct scatterlist
*sg
, int sg_nents
,
4503 unsigned int *sg_offset
, unsigned int page_size
)
4507 n
= ib_map_mr_sg(mr
, sg
, sg_nents
, sg_offset
, page_size
);
4513 int ib_sg_to_pages(struct ib_mr
*mr
, struct scatterlist
*sgl
, int sg_nents
,
4514 unsigned int *sg_offset
, int (*set_page
)(struct ib_mr
*, u64
));
4516 void ib_drain_rq(struct ib_qp
*qp
);
4517 void ib_drain_sq(struct ib_qp
*qp
);
4518 void ib_drain_qp(struct ib_qp
*qp
);
4520 int ib_get_eth_speed(struct ib_device
*dev
, u32 port_num
, u16
*speed
,
4523 static inline u8
*rdma_ah_retrieve_dmac(struct rdma_ah_attr
*attr
)
4525 if (attr
->type
== RDMA_AH_ATTR_TYPE_ROCE
)
4526 return attr
->roce
.dmac
;
4530 static inline void rdma_ah_set_dlid(struct rdma_ah_attr
*attr
, u32 dlid
)
4532 if (attr
->type
== RDMA_AH_ATTR_TYPE_IB
)
4533 attr
->ib
.dlid
= (u16
)dlid
;
4534 else if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4535 attr
->opa
.dlid
= dlid
;
4538 static inline u32
rdma_ah_get_dlid(const struct rdma_ah_attr
*attr
)
4540 if (attr
->type
== RDMA_AH_ATTR_TYPE_IB
)
4541 return attr
->ib
.dlid
;
4542 else if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4543 return attr
->opa
.dlid
;
4547 static inline void rdma_ah_set_sl(struct rdma_ah_attr
*attr
, u8 sl
)
4552 static inline u8
rdma_ah_get_sl(const struct rdma_ah_attr
*attr
)
4557 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr
*attr
,
4560 if (attr
->type
== RDMA_AH_ATTR_TYPE_IB
)
4561 attr
->ib
.src_path_bits
= src_path_bits
;
4562 else if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4563 attr
->opa
.src_path_bits
= src_path_bits
;
4566 static inline u8
rdma_ah_get_path_bits(const struct rdma_ah_attr
*attr
)
4568 if (attr
->type
== RDMA_AH_ATTR_TYPE_IB
)
4569 return attr
->ib
.src_path_bits
;
4570 else if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4571 return attr
->opa
.src_path_bits
;
4575 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr
*attr
,
4578 if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4579 attr
->opa
.make_grd
= make_grd
;
4582 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr
*attr
)
4584 if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4585 return attr
->opa
.make_grd
;
4589 static inline void rdma_ah_set_port_num(struct rdma_ah_attr
*attr
, u32 port_num
)
4591 attr
->port_num
= port_num
;
4594 static inline u32
rdma_ah_get_port_num(const struct rdma_ah_attr
*attr
)
4596 return attr
->port_num
;
4599 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr
*attr
,
4602 attr
->static_rate
= static_rate
;
4605 static inline u8
rdma_ah_get_static_rate(const struct rdma_ah_attr
*attr
)
4607 return attr
->static_rate
;
4610 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr
*attr
,
4611 enum ib_ah_flags flag
)
4613 attr
->ah_flags
= flag
;
4616 static inline enum ib_ah_flags
4617 rdma_ah_get_ah_flags(const struct rdma_ah_attr
*attr
)
4619 return attr
->ah_flags
;
4622 static inline const struct ib_global_route
4623 *rdma_ah_read_grh(const struct rdma_ah_attr
*attr
)
4628 /*To retrieve and modify the grh */
4629 static inline struct ib_global_route
4630 *rdma_ah_retrieve_grh(struct rdma_ah_attr
*attr
)
4635 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr
*attr
, void *dgid
)
4637 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(attr
);
4639 memcpy(grh
->dgid
.raw
, dgid
, sizeof(grh
->dgid
));
4642 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr
*attr
,
4645 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(attr
);
4647 grh
->dgid
.global
.subnet_prefix
= prefix
;
4650 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr
*attr
,
4653 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(attr
);
4655 grh
->dgid
.global
.interface_id
= if_id
;
4658 static inline void rdma_ah_set_grh(struct rdma_ah_attr
*attr
,
4659 union ib_gid
*dgid
, u32 flow_label
,
4660 u8 sgid_index
, u8 hop_limit
,
4663 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(attr
);
4665 attr
->ah_flags
= IB_AH_GRH
;
4668 grh
->flow_label
= flow_label
;
4669 grh
->sgid_index
= sgid_index
;
4670 grh
->hop_limit
= hop_limit
;
4671 grh
->traffic_class
= traffic_class
;
4672 grh
->sgid_attr
= NULL
;
4675 void rdma_destroy_ah_attr(struct rdma_ah_attr
*ah_attr
);
4676 void rdma_move_grh_sgid_attr(struct rdma_ah_attr
*attr
, union ib_gid
*dgid
,
4677 u32 flow_label
, u8 hop_limit
, u8 traffic_class
,
4678 const struct ib_gid_attr
*sgid_attr
);
4679 void rdma_copy_ah_attr(struct rdma_ah_attr
*dest
,
4680 const struct rdma_ah_attr
*src
);
4681 void rdma_replace_ah_attr(struct rdma_ah_attr
*old
,
4682 const struct rdma_ah_attr
*new);
4683 void rdma_move_ah_attr(struct rdma_ah_attr
*dest
, struct rdma_ah_attr
*src
);
4686 * rdma_ah_find_type - Return address handle type.
4688 * @dev: Device to be checked
4689 * @port_num: Port number
4691 static inline enum rdma_ah_attr_type
rdma_ah_find_type(struct ib_device
*dev
,
4694 if (rdma_protocol_roce(dev
, port_num
))
4695 return RDMA_AH_ATTR_TYPE_ROCE
;
4696 if (rdma_protocol_ib(dev
, port_num
)) {
4697 if (rdma_cap_opa_ah(dev
, port_num
))
4698 return RDMA_AH_ATTR_TYPE_OPA
;
4699 return RDMA_AH_ATTR_TYPE_IB
;
4701 if (dev
->type
== RDMA_DEVICE_TYPE_SMI
)
4702 return RDMA_AH_ATTR_TYPE_IB
;
4704 return RDMA_AH_ATTR_TYPE_UNDEFINED
;
4708 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4709 * In the current implementation the only way to
4710 * get the 32bit lid is from other sources for OPA.
4711 * For IB, lids will always be 16bits so cast the
4712 * value accordingly.
4716 static inline u16
ib_lid_cpu16(u32 lid
)
4718 WARN_ON_ONCE(lid
& 0xFFFF0000);
4723 * ib_lid_be16 - Return lid in 16bit BE encoding.
4727 static inline __be16
ib_lid_be16(u32 lid
)
4729 WARN_ON_ONCE(lid
& 0xFFFF0000);
4730 return cpu_to_be16((u16
)lid
);
4734 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4736 * @device: the rdma device
4737 * @comp_vector: index of completion vector
4739 * Returns NULL on failure, otherwise a corresponding cpu map of the
4740 * completion vector (returns all-cpus map if the device driver doesn't
4741 * implement get_vector_affinity).
4743 static inline const struct cpumask
*
4744 ib_get_vector_affinity(struct ib_device
*device
, int comp_vector
)
4746 if (comp_vector
< 0 || comp_vector
>= device
->num_comp_vectors
||
4747 !device
->ops
.get_vector_affinity
)
4750 return device
->ops
.get_vector_affinity(device
, comp_vector
);
4755 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4756 * and add their gids, as needed, to the relevant RoCE devices.
4758 * @device: the rdma device
4760 void rdma_roce_rescan_device(struct ib_device
*ibdev
);
4761 void rdma_roce_rescan_port(struct ib_device
*ib_dev
, u32 port
);
4762 void roce_del_all_netdev_gids(struct ib_device
*ib_dev
,
4763 u32 port
, struct net_device
*ndev
);
4765 struct ib_ucontext
*ib_uverbs_get_ucontext_file(struct ib_uverbs_file
*ufile
);
4767 int uverbs_destroy_def_handler(struct uverbs_attr_bundle
*attrs
);
4769 struct net_device
*rdma_alloc_netdev(struct ib_device
*device
, u32 port_num
,
4770 enum rdma_netdev_t type
, const char *name
,
4771 unsigned char name_assign_type
,
4772 void (*setup
)(struct net_device
*));
4774 int rdma_init_netdev(struct ib_device
*device
, u32 port_num
,
4775 enum rdma_netdev_t type
, const char *name
,
4776 unsigned char name_assign_type
,
4777 void (*setup
)(struct net_device
*),
4778 struct net_device
*netdev
);
4781 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4783 * @device: device pointer for which ib_device pointer to retrieve
4785 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4788 static inline struct ib_device
*rdma_device_to_ibdev(struct device
*device
)
4790 struct ib_core_device
*coredev
=
4791 container_of(device
, struct ib_core_device
, dev
);
4793 return coredev
->owner
;
4797 * ibdev_to_node - return the NUMA node for a given ib_device
4798 * @dev: device to get the NUMA node for.
4800 static inline int ibdev_to_node(struct ib_device
*ibdev
)
4802 struct device
*parent
= ibdev
->dev
.parent
;
4805 return NUMA_NO_NODE
;
4806 return dev_to_node(parent
);
4810 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4811 * ib_device holder structure from device pointer.
4813 * NOTE: New drivers should not make use of this API; This API is only for
4814 * existing drivers who have exposed sysfs entries using
4815 * ops->device_group.
4817 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4818 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4820 bool rdma_dev_access_netns(const struct ib_device
*device
,
4821 const struct net
*net
);
4823 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4824 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4825 #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4828 * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4831 * This function will convert the 20 bit flow_label input to a valid RoCE v2
4832 * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4835 static inline u16
rdma_flow_label_to_udp_sport(u32 fl
)
4837 u32 fl_low
= fl
& 0x03fff, fl_high
= fl
& 0xFC000;
4839 fl_low
^= fl_high
>> 14;
4840 return (u16
)(fl_low
| IB_ROCE_UDP_ENCAP_VALID_PORT_MIN
);
4844 * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4845 * local and remote qpn values
4847 * This function folded the multiplication results of two qpns, 24 bit each,
4848 * fields, and converts it to a 20 bit results.
4850 * This function will create symmetric flow_label value based on the local
4851 * and remote qpn values. this will allow both the requester and responder
4852 * to calculate the same flow_label for a given connection.
4854 * This helper function should be used by driver in case the upper layer
4855 * provide a zero flow_label value. This is to improve entropy of RDMA
4856 * traffic in the network.
4858 static inline u32
rdma_calc_flow_label(u32 lqpn
, u32 rqpn
)
4860 u64 v
= (u64
)lqpn
* rqpn
;
4865 return (u32
)(v
& IB_GRH_FLOWLABEL_MASK
);
4869 * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
4870 * label. If flow label is not defined in GRH then
4871 * calculate it based on lqpn/rqpn.
4873 * @fl: flow label from GRH
4874 * @lqpn: local qp number
4875 * @rqpn: remote qp number
4877 static inline u16
rdma_get_udp_sport(u32 fl
, u32 lqpn
, u32 rqpn
)
4880 fl
= rdma_calc_flow_label(lqpn
, rqpn
);
4882 return rdma_flow_label_to_udp_sport(fl
);
4885 const struct ib_port_immutable
*
4886 ib_port_immutable_read(struct ib_device
*dev
, unsigned int port
);
4888 /** ib_add_sub_device - Add a sub IB device on an existing one
4890 * @parent: The IB device that needs to add a sub device
4891 * @type: The type of the new sub device
4892 * @name: The name of the new sub device
4895 * Return 0 on success, an error code otherwise
4897 int ib_add_sub_device(struct ib_device
*parent
,
4898 enum rdma_nl_dev_type type
,
4902 /** ib_del_sub_device_and_put - Delect an IB sub device while holding a 'get'
4904 * @sub: The sub device that is going to be deleted
4906 * Return 0 on success, an error code otherwise
4908 int ib_del_sub_device_and_put(struct ib_device
*sub
);
4910 static inline void ib_mark_name_assigned_by_user(struct ib_device
*ibdev
)
4912 ibdev
->name_assign_type
= RDMA_NAME_ASSIGN_TYPE_USER
;
4915 #endif /* IB_VERBS_H */