1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
5 * Copyright (c) 2004 Intel Corporation. All rights reserved.
6 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
7 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
8 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
9 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
15 #include <linux/types.h>
16 #include <linux/device.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/kref.h>
19 #include <linux/list.h>
20 #include <linux/rwsem.h>
21 #include <linux/workqueue.h>
22 #include <linux/irq_poll.h>
23 #include <uapi/linux/if_ether.h>
26 #include <linux/string.h>
27 #include <linux/slab.h>
28 #include <linux/netdevice.h>
29 #include <linux/refcount.h>
30 #include <linux/if_link.h>
31 #include <linux/atomic.h>
32 #include <linux/mmu_notifier.h>
33 #include <linux/uaccess.h>
34 #include <linux/cgroup_rdma.h>
35 #include <linux/irqflags.h>
36 #include <linux/preempt.h>
37 #include <linux/dim.h>
38 #include <uapi/rdma/ib_user_verbs.h>
39 #include <rdma/rdma_counter.h>
40 #include <rdma/restrack.h>
41 #include <rdma/signature.h>
42 #include <uapi/rdma/rdma_user_ioctl.h>
43 #include <uapi/rdma/ib_user_ioctl_verbs.h>
45 #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
49 struct ib_usrq_object
;
53 extern struct workqueue_struct
*ib_wq
;
54 extern struct workqueue_struct
*ib_comp_wq
;
55 extern struct workqueue_struct
*ib_comp_unbound_wq
;
60 void ibdev_printk(const char *level
, const struct ib_device
*ibdev
,
61 const char *format
, ...);
63 void ibdev_emerg(const struct ib_device
*ibdev
, const char *format
, ...);
65 void ibdev_alert(const struct ib_device
*ibdev
, const char *format
, ...);
67 void ibdev_crit(const struct ib_device
*ibdev
, const char *format
, ...);
69 void ibdev_err(const struct ib_device
*ibdev
, const char *format
, ...);
71 void ibdev_warn(const struct ib_device
*ibdev
, const char *format
, ...);
73 void ibdev_notice(const struct ib_device
*ibdev
, const char *format
, ...);
75 void ibdev_info(const struct ib_device
*ibdev
, const char *format
, ...);
77 #if defined(CONFIG_DYNAMIC_DEBUG) || \
78 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
79 #define ibdev_dbg(__dev, format, args...) \
80 dynamic_ibdev_dbg(__dev, format, ##args)
84 void ibdev_dbg(const struct ib_device
*ibdev
, const char *format
, ...) {}
87 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
89 static DEFINE_RATELIMIT_STATE(_rs, \
90 DEFAULT_RATELIMIT_INTERVAL, \
91 DEFAULT_RATELIMIT_BURST); \
92 if (__ratelimit(&_rs)) \
93 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
96 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
97 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
98 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \
99 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
100 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \
101 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
102 #define ibdev_err_ratelimited(ibdev, fmt, ...) \
103 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
104 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \
105 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
106 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \
107 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
108 #define ibdev_info_ratelimited(ibdev, fmt, ...) \
109 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
111 #if defined(CONFIG_DYNAMIC_DEBUG) || \
112 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
113 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
114 #define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
116 static DEFINE_RATELIMIT_STATE(_rs, \
117 DEFAULT_RATELIMIT_INTERVAL, \
118 DEFAULT_RATELIMIT_BURST); \
119 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
120 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
121 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
125 __printf(2, 3) __cold
127 void ibdev_dbg_ratelimited(const struct ib_device
*ibdev
, const char *format
, ...) {}
133 __be64 subnet_prefix
;
138 extern union ib_gid zgid
;
141 IB_GID_TYPE_IB
= IB_UVERBS_GID_TYPE_IB
,
142 IB_GID_TYPE_ROCE
= IB_UVERBS_GID_TYPE_ROCE_V1
,
143 IB_GID_TYPE_ROCE_UDP_ENCAP
= IB_UVERBS_GID_TYPE_ROCE_V2
,
147 #define ROCE_V2_UDP_DPORT 4791
149 struct net_device __rcu
*ndev
;
150 struct ib_device
*device
;
152 enum ib_gid_type gid_type
;
158 /* set the local administered indication */
159 IB_SA_WELL_KNOWN_GUID
= BIT_ULL(57) | 2,
162 enum rdma_transport_type
{
164 RDMA_TRANSPORT_IWARP
,
165 RDMA_TRANSPORT_USNIC
,
166 RDMA_TRANSPORT_USNIC_UDP
,
167 RDMA_TRANSPORT_UNSPECIFIED
,
170 enum rdma_protocol_type
{
174 RDMA_PROTOCOL_USNIC_UDP
177 __attribute_const__
enum rdma_transport_type
178 rdma_node_get_transport(unsigned int node_type
);
180 enum rdma_network_type
{
182 RDMA_NETWORK_ROCE_V1
,
187 static inline enum ib_gid_type
ib_network_to_gid_type(enum rdma_network_type network_type
)
189 if (network_type
== RDMA_NETWORK_IPV4
||
190 network_type
== RDMA_NETWORK_IPV6
)
191 return IB_GID_TYPE_ROCE_UDP_ENCAP
;
192 else if (network_type
== RDMA_NETWORK_ROCE_V1
)
193 return IB_GID_TYPE_ROCE
;
195 return IB_GID_TYPE_IB
;
198 static inline enum rdma_network_type
199 rdma_gid_attr_network_type(const struct ib_gid_attr
*attr
)
201 if (attr
->gid_type
== IB_GID_TYPE_IB
)
202 return RDMA_NETWORK_IB
;
204 if (attr
->gid_type
== IB_GID_TYPE_ROCE
)
205 return RDMA_NETWORK_ROCE_V1
;
207 if (ipv6_addr_v4mapped((struct in6_addr
*)&attr
->gid
))
208 return RDMA_NETWORK_IPV4
;
210 return RDMA_NETWORK_IPV6
;
213 enum rdma_link_layer
{
214 IB_LINK_LAYER_UNSPECIFIED
,
215 IB_LINK_LAYER_INFINIBAND
,
216 IB_LINK_LAYER_ETHERNET
,
219 enum ib_device_cap_flags
{
220 IB_DEVICE_RESIZE_MAX_WR
= (1 << 0),
221 IB_DEVICE_BAD_PKEY_CNTR
= (1 << 1),
222 IB_DEVICE_BAD_QKEY_CNTR
= (1 << 2),
223 IB_DEVICE_RAW_MULTI
= (1 << 3),
224 IB_DEVICE_AUTO_PATH_MIG
= (1 << 4),
225 IB_DEVICE_CHANGE_PHY_PORT
= (1 << 5),
226 IB_DEVICE_UD_AV_PORT_ENFORCE
= (1 << 6),
227 IB_DEVICE_CURR_QP_STATE_MOD
= (1 << 7),
228 IB_DEVICE_SHUTDOWN_PORT
= (1 << 8),
229 /* Not in use, former INIT_TYPE = (1 << 9),*/
230 IB_DEVICE_PORT_ACTIVE_EVENT
= (1 << 10),
231 IB_DEVICE_SYS_IMAGE_GUID
= (1 << 11),
232 IB_DEVICE_RC_RNR_NAK_GEN
= (1 << 12),
233 IB_DEVICE_SRQ_RESIZE
= (1 << 13),
234 IB_DEVICE_N_NOTIFY_CQ
= (1 << 14),
237 * This device supports a per-device lkey or stag that can be
238 * used without performing a memory registration for the local
239 * memory. Note that ULPs should never check this flag, but
240 * instead of use the local_dma_lkey flag in the ib_pd structure,
241 * which will always contain a usable lkey.
243 IB_DEVICE_LOCAL_DMA_LKEY
= (1 << 15),
244 /* Reserved, old SEND_W_INV = (1 << 16),*/
245 IB_DEVICE_MEM_WINDOW
= (1 << 17),
247 * Devices should set IB_DEVICE_UD_IP_SUM if they support
248 * insertion of UDP and TCP checksum on outgoing UD IPoIB
249 * messages and can verify the validity of checksum for
250 * incoming messages. Setting this flag implies that the
251 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
253 IB_DEVICE_UD_IP_CSUM
= (1 << 18),
254 IB_DEVICE_UD_TSO
= (1 << 19),
255 IB_DEVICE_XRC
= (1 << 20),
258 * This device supports the IB "base memory management extension",
259 * which includes support for fast registrations (IB_WR_REG_MR,
260 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
261 * also be set by any iWarp device which must support FRs to comply
262 * to the iWarp verbs spec. iWarp devices also support the
263 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
266 IB_DEVICE_MEM_MGT_EXTENSIONS
= (1 << 21),
267 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK
= (1 << 22),
268 IB_DEVICE_MEM_WINDOW_TYPE_2A
= (1 << 23),
269 IB_DEVICE_MEM_WINDOW_TYPE_2B
= (1 << 24),
270 IB_DEVICE_RC_IP_CSUM
= (1 << 25),
271 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
272 IB_DEVICE_RAW_IP_CSUM
= (1 << 26),
274 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
275 * support execution of WQEs that involve synchronization
276 * of I/O operations with single completion queue managed
279 IB_DEVICE_CROSS_CHANNEL
= (1 << 27),
280 IB_DEVICE_MANAGED_FLOW_STEERING
= (1 << 29),
281 IB_DEVICE_INTEGRITY_HANDOVER
= (1 << 30),
282 IB_DEVICE_ON_DEMAND_PAGING
= (1ULL << 31),
283 IB_DEVICE_SG_GAPS_REG
= (1ULL << 32),
284 IB_DEVICE_VIRTUAL_FUNCTION
= (1ULL << 33),
285 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
286 IB_DEVICE_RAW_SCATTER_FCS
= (1ULL << 34),
287 IB_DEVICE_RDMA_NETDEV_OPA
= (1ULL << 35),
288 /* The device supports padding incoming writes to cacheline. */
289 IB_DEVICE_PCI_WRITE_END_PADDING
= (1ULL << 36),
290 IB_DEVICE_ALLOW_USER_UNREG
= (1ULL << 37),
299 enum ib_odp_general_cap_bits
{
300 IB_ODP_SUPPORT
= 1 << 0,
301 IB_ODP_SUPPORT_IMPLICIT
= 1 << 1,
304 enum ib_odp_transport_cap_bits
{
305 IB_ODP_SUPPORT_SEND
= 1 << 0,
306 IB_ODP_SUPPORT_RECV
= 1 << 1,
307 IB_ODP_SUPPORT_WRITE
= 1 << 2,
308 IB_ODP_SUPPORT_READ
= 1 << 3,
309 IB_ODP_SUPPORT_ATOMIC
= 1 << 4,
310 IB_ODP_SUPPORT_SRQ_RECV
= 1 << 5,
314 uint64_t general_caps
;
316 uint32_t rc_odp_caps
;
317 uint32_t uc_odp_caps
;
318 uint32_t ud_odp_caps
;
319 uint32_t xrc_odp_caps
;
320 } per_transport_caps
;
324 /* Corresponding bit will be set if qp type from
325 * 'enum ib_qp_type' is supported, e.g.
326 * supported_qpts |= 1 << IB_QPT_UD
329 u32 max_rwq_indirection_tables
;
330 u32 max_rwq_indirection_table_size
;
333 enum ib_tm_cap_flags
{
334 /* Support tag matching with rendezvous offload for RC transport */
335 IB_TM_CAP_RNDV_RC
= 1 << 0,
339 /* Max size of RNDV header */
340 u32 max_rndv_hdr_size
;
341 /* Max number of entries in tag matching list */
343 /* From enum ib_tm_cap_flags */
345 /* Max number of outstanding list operations */
347 /* Max number of SGE in tag matching entry */
351 struct ib_cq_init_attr
{
357 enum ib_cq_attr_mask
{
358 IB_CQ_MODERATE
= 1 << 0,
362 u16 max_cq_moderation_count
;
363 u16 max_cq_moderation_period
;
366 struct ib_dm_mr_attr
{
372 struct ib_dm_alloc_attr
{
378 struct ib_device_attr
{
380 __be64 sys_image_guid
;
388 u64 device_cap_flags
;
399 int max_qp_init_rd_atom
;
400 int max_ee_init_rd_atom
;
401 enum ib_atomic_cap atomic_cap
;
402 enum ib_atomic_cap masked_atomic_cap
;
409 int max_mcast_qp_attach
;
410 int max_total_mcast_qp_attach
;
415 unsigned int max_fast_reg_page_list_len
;
416 unsigned int max_pi_fast_reg_page_list_len
;
418 u8 local_ca_ack_delay
;
421 struct ib_odp_caps odp_caps
;
422 uint64_t timestamp_mask
;
423 uint64_t hca_core_clock
; /* in KHZ */
424 struct ib_rss_caps rss_caps
;
426 u32 raw_packet_caps
; /* Use ib_raw_packet_caps enum */
427 struct ib_tm_caps tm_caps
;
428 struct ib_cq_caps cq_caps
;
430 /* Max entries for sgl for optimized performance per READ */
447 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu
)
450 case IB_MTU_256
: return 256;
451 case IB_MTU_512
: return 512;
452 case IB_MTU_1024
: return 1024;
453 case IB_MTU_2048
: return 2048;
454 case IB_MTU_4096
: return 4096;
459 static inline enum ib_mtu
ib_mtu_int_to_enum(int mtu
)
463 else if (mtu
>= 2048)
465 else if (mtu
>= 1024)
473 static inline int opa_mtu_enum_to_int(enum opa_mtu mtu
)
481 return(ib_mtu_enum_to_int((enum ib_mtu
)mtu
));
485 static inline enum opa_mtu
opa_mtu_int_to_enum(int mtu
)
488 return OPA_MTU_10240
;
489 else if (mtu
>= 8192)
492 return ((enum opa_mtu
)ib_mtu_int_to_enum(mtu
));
501 IB_PORT_ACTIVE_DEFER
= 5
504 enum ib_port_phys_state
{
505 IB_PORT_PHYS_STATE_SLEEP
= 1,
506 IB_PORT_PHYS_STATE_POLLING
= 2,
507 IB_PORT_PHYS_STATE_DISABLED
= 3,
508 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING
= 4,
509 IB_PORT_PHYS_STATE_LINK_UP
= 5,
510 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY
= 6,
511 IB_PORT_PHYS_STATE_PHY_TEST
= 7,
522 static inline int ib_width_enum_to_int(enum ib_port_width width
)
525 case IB_WIDTH_1X
: return 1;
526 case IB_WIDTH_2X
: return 2;
527 case IB_WIDTH_4X
: return 4;
528 case IB_WIDTH_8X
: return 8;
529 case IB_WIDTH_12X
: return 12;
546 * struct rdma_hw_stats
547 * @lock - Mutex to protect parallel write access to lifespan and values
548 * of counters, which are 64bits and not guaranteeed to be written
549 * atomicaly on 32bits systems.
550 * @timestamp - Used by the core code to track when the last update was
551 * @lifespan - Used by the core code to determine how old the counters
552 * should be before being updated again. Stored in jiffies, defaults
553 * to 10 milliseconds, drivers can override the default be specifying
554 * their own value during their allocation routine.
555 * @name - Array of pointers to static names used for the counters in
557 * @num_counters - How many hardware counters there are. If name is
558 * shorter than this number, a kernel oops will result. Driver authors
559 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
560 * in their code to prevent this.
561 * @value - Array of u64 counters that are accessed by the sysfs code and
562 * filled in by the drivers get_stats routine
564 struct rdma_hw_stats
{
565 struct mutex lock
; /* Protect lifespan and values[] */
566 unsigned long timestamp
;
567 unsigned long lifespan
;
568 const char * const *names
;
573 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
575 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
577 * @names - Array of static const char *
578 * @num_counters - How many elements in array
579 * @lifespan - How many milliseconds between updates
581 static inline struct rdma_hw_stats
*rdma_alloc_hw_stats_struct(
582 const char * const *names
, int num_counters
,
583 unsigned long lifespan
)
585 struct rdma_hw_stats
*stats
;
587 stats
= kzalloc(sizeof(*stats
) + num_counters
* sizeof(u64
),
591 stats
->names
= names
;
592 stats
->num_counters
= num_counters
;
593 stats
->lifespan
= msecs_to_jiffies(lifespan
);
599 /* Define bits for the various functionality this port needs to be supported by
602 /* Management 0x00000FFF */
603 #define RDMA_CORE_CAP_IB_MAD 0x00000001
604 #define RDMA_CORE_CAP_IB_SMI 0x00000002
605 #define RDMA_CORE_CAP_IB_CM 0x00000004
606 #define RDMA_CORE_CAP_IW_CM 0x00000008
607 #define RDMA_CORE_CAP_IB_SA 0x00000010
608 #define RDMA_CORE_CAP_OPA_MAD 0x00000020
610 /* Address format 0x000FF000 */
611 #define RDMA_CORE_CAP_AF_IB 0x00001000
612 #define RDMA_CORE_CAP_ETH_AH 0x00002000
613 #define RDMA_CORE_CAP_OPA_AH 0x00004000
614 #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
616 /* Protocol 0xFFF00000 */
617 #define RDMA_CORE_CAP_PROT_IB 0x00100000
618 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000
619 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000
620 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
621 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
622 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000
624 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
625 | RDMA_CORE_CAP_PROT_ROCE \
626 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
628 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
629 | RDMA_CORE_CAP_IB_MAD \
630 | RDMA_CORE_CAP_IB_SMI \
631 | RDMA_CORE_CAP_IB_CM \
632 | RDMA_CORE_CAP_IB_SA \
633 | RDMA_CORE_CAP_AF_IB)
634 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
635 | RDMA_CORE_CAP_IB_MAD \
636 | RDMA_CORE_CAP_IB_CM \
637 | RDMA_CORE_CAP_AF_IB \
638 | RDMA_CORE_CAP_ETH_AH)
639 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
640 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
641 | RDMA_CORE_CAP_IB_MAD \
642 | RDMA_CORE_CAP_IB_CM \
643 | RDMA_CORE_CAP_AF_IB \
644 | RDMA_CORE_CAP_ETH_AH)
645 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
646 | RDMA_CORE_CAP_IW_CM)
647 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
648 | RDMA_CORE_CAP_OPA_MAD)
650 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
652 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
654 struct ib_port_attr
{
656 enum ib_port_state state
;
658 enum ib_mtu active_mtu
;
661 unsigned int ip_gids
:1;
662 /* This is the value from PortInfo CapabilityMask, defined by IBA */
681 enum ib_device_modify_flags
{
682 IB_DEVICE_MODIFY_SYS_IMAGE_GUID
= 1 << 0,
683 IB_DEVICE_MODIFY_NODE_DESC
= 1 << 1
686 #define IB_DEVICE_NODE_DESC_MAX 64
688 struct ib_device_modify
{
690 char node_desc
[IB_DEVICE_NODE_DESC_MAX
];
693 enum ib_port_modify_flags
{
694 IB_PORT_SHUTDOWN
= 1,
695 IB_PORT_INIT_TYPE
= (1<<2),
696 IB_PORT_RESET_QKEY_CNTR
= (1<<3),
697 IB_PORT_OPA_MASK_CHG
= (1<<4)
700 struct ib_port_modify
{
701 u32 set_port_cap_mask
;
702 u32 clr_port_cap_mask
;
710 IB_EVENT_QP_ACCESS_ERR
,
714 IB_EVENT_PATH_MIG_ERR
,
715 IB_EVENT_DEVICE_FATAL
,
716 IB_EVENT_PORT_ACTIVE
,
719 IB_EVENT_PKEY_CHANGE
,
722 IB_EVENT_SRQ_LIMIT_REACHED
,
723 IB_EVENT_QP_LAST_WQE_REACHED
,
724 IB_EVENT_CLIENT_REREGISTER
,
729 const char *__attribute_const__
ib_event_msg(enum ib_event_type event
);
732 struct ib_device
*device
;
740 enum ib_event_type event
;
743 struct ib_event_handler
{
744 struct ib_device
*device
;
745 void (*handler
)(struct ib_event_handler
*, struct ib_event
*);
746 struct list_head list
;
749 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
751 (_ptr)->device = _device; \
752 (_ptr)->handler = _handler; \
753 INIT_LIST_HEAD(&(_ptr)->list); \
756 struct ib_global_route
{
757 const struct ib_gid_attr
*sgid_attr
;
766 __be32 version_tclass_flow
;
774 union rdma_network_hdr
{
777 /* The IB spec states that if it's IPv4, the header
778 * is located in the last 20 bytes of the header.
781 struct iphdr roce4grh
;
785 #define IB_QPN_MASK 0xFFFFFF
788 IB_MULTICAST_QPN
= 0xffffff
791 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
792 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
799 IB_RATE_PORT_CURRENT
= 0,
800 IB_RATE_2_5_GBPS
= 2,
808 IB_RATE_120_GBPS
= 10,
809 IB_RATE_14_GBPS
= 11,
810 IB_RATE_56_GBPS
= 12,
811 IB_RATE_112_GBPS
= 13,
812 IB_RATE_168_GBPS
= 14,
813 IB_RATE_25_GBPS
= 15,
814 IB_RATE_100_GBPS
= 16,
815 IB_RATE_200_GBPS
= 17,
816 IB_RATE_300_GBPS
= 18,
817 IB_RATE_28_GBPS
= 19,
818 IB_RATE_50_GBPS
= 20,
819 IB_RATE_400_GBPS
= 21,
820 IB_RATE_600_GBPS
= 22,
824 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
825 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
826 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
827 * @rate: rate to convert.
829 __attribute_const__
int ib_rate_to_mult(enum ib_rate rate
);
832 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
833 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
834 * @rate: rate to convert.
836 __attribute_const__
int ib_rate_to_mbps(enum ib_rate rate
);
840 * enum ib_mr_type - memory region type
841 * @IB_MR_TYPE_MEM_REG: memory region that is used for
842 * normal registration
843 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
844 * register any arbitrary sg lists (without
845 * the normal mr constraints - see
847 * @IB_MR_TYPE_DM: memory region that is used for device
848 * memory registration
849 * @IB_MR_TYPE_USER: memory region that is used for the user-space
851 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations
852 * without address translations (VA=PA)
853 * @IB_MR_TYPE_INTEGRITY: memory region that is used for
854 * data integrity operations
862 IB_MR_TYPE_INTEGRITY
,
865 enum ib_mr_status_check
{
866 IB_MR_CHECK_SIG_STATUS
= 1,
870 * struct ib_mr_status - Memory region status container
872 * @fail_status: Bitmask of MR checks status. For each
873 * failed check a corresponding status bit is set.
874 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
877 struct ib_mr_status
{
879 struct ib_sig_err sig_err
;
883 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
885 * @mult: multiple to convert.
887 __attribute_const__
enum ib_rate
mult_to_ib_rate(int mult
);
889 struct rdma_ah_init_attr
{
890 struct rdma_ah_attr
*ah_attr
;
892 struct net_device
*xmit_slave
;
895 enum rdma_ah_attr_type
{
896 RDMA_AH_ATTR_TYPE_UNDEFINED
,
897 RDMA_AH_ATTR_TYPE_IB
,
898 RDMA_AH_ATTR_TYPE_ROCE
,
899 RDMA_AH_ATTR_TYPE_OPA
,
907 struct roce_ah_attr
{
917 struct rdma_ah_attr
{
918 struct ib_global_route grh
;
923 enum rdma_ah_attr_type type
;
925 struct ib_ah_attr ib
;
926 struct roce_ah_attr roce
;
927 struct opa_ah_attr opa
;
935 IB_WC_LOC_EEC_OP_ERR
,
940 IB_WC_LOC_ACCESS_ERR
,
941 IB_WC_REM_INV_REQ_ERR
,
942 IB_WC_REM_ACCESS_ERR
,
945 IB_WC_RNR_RETRY_EXC_ERR
,
946 IB_WC_LOC_RDD_VIOL_ERR
,
947 IB_WC_REM_INV_RD_REQ_ERR
,
950 IB_WC_INV_EEC_STATE_ERR
,
952 IB_WC_RESP_TIMEOUT_ERR
,
956 const char *__attribute_const__
ib_wc_status_msg(enum ib_wc_status status
);
959 IB_WC_SEND
= IB_UVERBS_WC_SEND
,
960 IB_WC_RDMA_WRITE
= IB_UVERBS_WC_RDMA_WRITE
,
961 IB_WC_RDMA_READ
= IB_UVERBS_WC_RDMA_READ
,
962 IB_WC_COMP_SWAP
= IB_UVERBS_WC_COMP_SWAP
,
963 IB_WC_FETCH_ADD
= IB_UVERBS_WC_FETCH_ADD
,
964 IB_WC_BIND_MW
= IB_UVERBS_WC_BIND_MW
,
965 IB_WC_LOCAL_INV
= IB_UVERBS_WC_LOCAL_INV
,
966 IB_WC_LSO
= IB_UVERBS_WC_TSO
,
968 IB_WC_MASKED_COMP_SWAP
,
969 IB_WC_MASKED_FETCH_ADD
,
971 * Set value of IB_WC_RECV so consumers can test if a completion is a
972 * receive by testing (opcode & IB_WC_RECV).
975 IB_WC_RECV_RDMA_WITH_IMM
980 IB_WC_WITH_IMM
= (1<<1),
981 IB_WC_WITH_INVALIDATE
= (1<<2),
982 IB_WC_IP_CSUM_OK
= (1<<3),
983 IB_WC_WITH_SMAC
= (1<<4),
984 IB_WC_WITH_VLAN
= (1<<5),
985 IB_WC_WITH_NETWORK_HDR_TYPE
= (1<<6),
991 struct ib_cqe
*wr_cqe
;
993 enum ib_wc_status status
;
994 enum ib_wc_opcode opcode
;
1000 u32 invalidate_rkey
;
1008 u8 port_num
; /* valid only for DR SMPs on switches */
1011 u8 network_hdr_type
;
1014 enum ib_cq_notify_flags
{
1015 IB_CQ_SOLICITED
= 1 << 0,
1016 IB_CQ_NEXT_COMP
= 1 << 1,
1017 IB_CQ_SOLICITED_MASK
= IB_CQ_SOLICITED
| IB_CQ_NEXT_COMP
,
1018 IB_CQ_REPORT_MISSED_EVENTS
= 1 << 2,
1022 IB_SRQT_BASIC
= IB_UVERBS_SRQT_BASIC
,
1023 IB_SRQT_XRC
= IB_UVERBS_SRQT_XRC
,
1024 IB_SRQT_TM
= IB_UVERBS_SRQT_TM
,
1027 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type
)
1029 return srq_type
== IB_SRQT_XRC
||
1030 srq_type
== IB_SRQT_TM
;
1033 enum ib_srq_attr_mask
{
1034 IB_SRQ_MAX_WR
= 1 << 0,
1035 IB_SRQ_LIMIT
= 1 << 1,
1038 struct ib_srq_attr
{
1044 struct ib_srq_init_attr
{
1045 void (*event_handler
)(struct ib_event
*, void *);
1047 struct ib_srq_attr attr
;
1048 enum ib_srq_type srq_type
;
1054 struct ib_xrcd
*xrcd
;
1069 u32 max_inline_data
;
1072 * Maximum number of rdma_rw_ctx structures in flight at a time.
1073 * ib_create_qp() will calculate the right amount of neededed WRs
1074 * and MRs based on this.
1086 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1087 * here (and in that order) since the MAD layer uses them as
1088 * indices into a 2-entry table.
1093 IB_QPT_RC
= IB_UVERBS_QPT_RC
,
1094 IB_QPT_UC
= IB_UVERBS_QPT_UC
,
1095 IB_QPT_UD
= IB_UVERBS_QPT_UD
,
1097 IB_QPT_RAW_ETHERTYPE
,
1098 IB_QPT_RAW_PACKET
= IB_UVERBS_QPT_RAW_PACKET
,
1099 IB_QPT_XRC_INI
= IB_UVERBS_QPT_XRC_INI
,
1100 IB_QPT_XRC_TGT
= IB_UVERBS_QPT_XRC_TGT
,
1102 IB_QPT_DRIVER
= IB_UVERBS_QPT_DRIVER
,
1103 /* Reserve a range for qp types internal to the low level driver.
1104 * These qp types will not be visible at the IB core layer, so the
1105 * IB_QPT_MAX usages should not be affected in the core layer
1107 IB_QPT_RESERVED1
= 0x1000,
1119 enum ib_qp_create_flags
{
1120 IB_QP_CREATE_IPOIB_UD_LSO
= 1 << 0,
1121 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
=
1122 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
,
1123 IB_QP_CREATE_CROSS_CHANNEL
= 1 << 2,
1124 IB_QP_CREATE_MANAGED_SEND
= 1 << 3,
1125 IB_QP_CREATE_MANAGED_RECV
= 1 << 4,
1126 IB_QP_CREATE_NETIF_QP
= 1 << 5,
1127 IB_QP_CREATE_INTEGRITY_EN
= 1 << 6,
1128 IB_QP_CREATE_NETDEV_USE
= 1 << 7,
1129 IB_QP_CREATE_SCATTER_FCS
=
1130 IB_UVERBS_QP_CREATE_SCATTER_FCS
,
1131 IB_QP_CREATE_CVLAN_STRIPPING
=
1132 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING
,
1133 IB_QP_CREATE_SOURCE_QPN
= 1 << 10,
1134 IB_QP_CREATE_PCI_WRITE_END_PADDING
=
1135 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING
,
1136 /* reserve bits 26-31 for low level drivers' internal use */
1137 IB_QP_CREATE_RESERVED_START
= 1 << 26,
1138 IB_QP_CREATE_RESERVED_END
= 1 << 31,
1142 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1143 * callback to destroy the passed in QP.
1146 struct ib_qp_init_attr
{
1147 /* Consumer's event_handler callback must not block */
1148 void (*event_handler
)(struct ib_event
*, void *);
1151 struct ib_cq
*send_cq
;
1152 struct ib_cq
*recv_cq
;
1154 struct ib_xrcd
*xrcd
; /* XRC TGT QPs only */
1155 struct ib_qp_cap cap
;
1156 enum ib_sig_type sq_sig_type
;
1157 enum ib_qp_type qp_type
;
1161 * Only needed for special QP types, or when using the RW API.
1164 struct ib_rwq_ind_table
*rwq_ind_tbl
;
1168 struct ib_qp_open_attr
{
1169 void (*event_handler
)(struct ib_event
*, void *);
1172 enum ib_qp_type qp_type
;
1175 enum ib_rnr_timeout
{
1176 IB_RNR_TIMER_655_36
= 0,
1177 IB_RNR_TIMER_000_01
= 1,
1178 IB_RNR_TIMER_000_02
= 2,
1179 IB_RNR_TIMER_000_03
= 3,
1180 IB_RNR_TIMER_000_04
= 4,
1181 IB_RNR_TIMER_000_06
= 5,
1182 IB_RNR_TIMER_000_08
= 6,
1183 IB_RNR_TIMER_000_12
= 7,
1184 IB_RNR_TIMER_000_16
= 8,
1185 IB_RNR_TIMER_000_24
= 9,
1186 IB_RNR_TIMER_000_32
= 10,
1187 IB_RNR_TIMER_000_48
= 11,
1188 IB_RNR_TIMER_000_64
= 12,
1189 IB_RNR_TIMER_000_96
= 13,
1190 IB_RNR_TIMER_001_28
= 14,
1191 IB_RNR_TIMER_001_92
= 15,
1192 IB_RNR_TIMER_002_56
= 16,
1193 IB_RNR_TIMER_003_84
= 17,
1194 IB_RNR_TIMER_005_12
= 18,
1195 IB_RNR_TIMER_007_68
= 19,
1196 IB_RNR_TIMER_010_24
= 20,
1197 IB_RNR_TIMER_015_36
= 21,
1198 IB_RNR_TIMER_020_48
= 22,
1199 IB_RNR_TIMER_030_72
= 23,
1200 IB_RNR_TIMER_040_96
= 24,
1201 IB_RNR_TIMER_061_44
= 25,
1202 IB_RNR_TIMER_081_92
= 26,
1203 IB_RNR_TIMER_122_88
= 27,
1204 IB_RNR_TIMER_163_84
= 28,
1205 IB_RNR_TIMER_245_76
= 29,
1206 IB_RNR_TIMER_327_68
= 30,
1207 IB_RNR_TIMER_491_52
= 31
1210 enum ib_qp_attr_mask
{
1212 IB_QP_CUR_STATE
= (1<<1),
1213 IB_QP_EN_SQD_ASYNC_NOTIFY
= (1<<2),
1214 IB_QP_ACCESS_FLAGS
= (1<<3),
1215 IB_QP_PKEY_INDEX
= (1<<4),
1216 IB_QP_PORT
= (1<<5),
1217 IB_QP_QKEY
= (1<<6),
1219 IB_QP_PATH_MTU
= (1<<8),
1220 IB_QP_TIMEOUT
= (1<<9),
1221 IB_QP_RETRY_CNT
= (1<<10),
1222 IB_QP_RNR_RETRY
= (1<<11),
1223 IB_QP_RQ_PSN
= (1<<12),
1224 IB_QP_MAX_QP_RD_ATOMIC
= (1<<13),
1225 IB_QP_ALT_PATH
= (1<<14),
1226 IB_QP_MIN_RNR_TIMER
= (1<<15),
1227 IB_QP_SQ_PSN
= (1<<16),
1228 IB_QP_MAX_DEST_RD_ATOMIC
= (1<<17),
1229 IB_QP_PATH_MIG_STATE
= (1<<18),
1230 IB_QP_CAP
= (1<<19),
1231 IB_QP_DEST_QPN
= (1<<20),
1232 IB_QP_RESERVED1
= (1<<21),
1233 IB_QP_RESERVED2
= (1<<22),
1234 IB_QP_RESERVED3
= (1<<23),
1235 IB_QP_RESERVED4
= (1<<24),
1236 IB_QP_RATE_LIMIT
= (1<<25),
1261 enum ib_qp_state qp_state
;
1262 enum ib_qp_state cur_qp_state
;
1263 enum ib_mtu path_mtu
;
1264 enum ib_mig_state path_mig_state
;
1269 int qp_access_flags
;
1270 struct ib_qp_cap cap
;
1271 struct rdma_ah_attr ah_attr
;
1272 struct rdma_ah_attr alt_ah_attr
;
1275 u8 en_sqd_async_notify
;
1278 u8 max_dest_rd_atomic
;
1287 struct net_device
*xmit_slave
;
1291 /* These are shared with userspace */
1292 IB_WR_RDMA_WRITE
= IB_UVERBS_WR_RDMA_WRITE
,
1293 IB_WR_RDMA_WRITE_WITH_IMM
= IB_UVERBS_WR_RDMA_WRITE_WITH_IMM
,
1294 IB_WR_SEND
= IB_UVERBS_WR_SEND
,
1295 IB_WR_SEND_WITH_IMM
= IB_UVERBS_WR_SEND_WITH_IMM
,
1296 IB_WR_RDMA_READ
= IB_UVERBS_WR_RDMA_READ
,
1297 IB_WR_ATOMIC_CMP_AND_SWP
= IB_UVERBS_WR_ATOMIC_CMP_AND_SWP
,
1298 IB_WR_ATOMIC_FETCH_AND_ADD
= IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD
,
1299 IB_WR_BIND_MW
= IB_UVERBS_WR_BIND_MW
,
1300 IB_WR_LSO
= IB_UVERBS_WR_TSO
,
1301 IB_WR_SEND_WITH_INV
= IB_UVERBS_WR_SEND_WITH_INV
,
1302 IB_WR_RDMA_READ_WITH_INV
= IB_UVERBS_WR_RDMA_READ_WITH_INV
,
1303 IB_WR_LOCAL_INV
= IB_UVERBS_WR_LOCAL_INV
,
1304 IB_WR_MASKED_ATOMIC_CMP_AND_SWP
=
1305 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP
,
1306 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
=
1307 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD
,
1309 /* These are kernel only and can not be issued by userspace */
1310 IB_WR_REG_MR
= 0x20,
1311 IB_WR_REG_MR_INTEGRITY
,
1313 /* reserve values for low level drivers' internal use.
1314 * These values will not be used at all in the ib core layer.
1316 IB_WR_RESERVED1
= 0xf0,
1328 enum ib_send_flags
{
1330 IB_SEND_SIGNALED
= (1<<1),
1331 IB_SEND_SOLICITED
= (1<<2),
1332 IB_SEND_INLINE
= (1<<3),
1333 IB_SEND_IP_CSUM
= (1<<4),
1335 /* reserve bits 26-31 for low level drivers' internal use */
1336 IB_SEND_RESERVED_START
= (1 << 26),
1337 IB_SEND_RESERVED_END
= (1 << 31),
1347 void (*done
)(struct ib_cq
*cq
, struct ib_wc
*wc
);
1351 struct ib_send_wr
*next
;
1354 struct ib_cqe
*wr_cqe
;
1356 struct ib_sge
*sg_list
;
1358 enum ib_wr_opcode opcode
;
1362 u32 invalidate_rkey
;
1367 struct ib_send_wr wr
;
1372 static inline const struct ib_rdma_wr
*rdma_wr(const struct ib_send_wr
*wr
)
1374 return container_of(wr
, struct ib_rdma_wr
, wr
);
1377 struct ib_atomic_wr
{
1378 struct ib_send_wr wr
;
1382 u64 compare_add_mask
;
1387 static inline const struct ib_atomic_wr
*atomic_wr(const struct ib_send_wr
*wr
)
1389 return container_of(wr
, struct ib_atomic_wr
, wr
);
1393 struct ib_send_wr wr
;
1400 u16 pkey_index
; /* valid for GSI only */
1401 u8 port_num
; /* valid for DR SMPs on switch only */
1404 static inline const struct ib_ud_wr
*ud_wr(const struct ib_send_wr
*wr
)
1406 return container_of(wr
, struct ib_ud_wr
, wr
);
1410 struct ib_send_wr wr
;
1416 static inline const struct ib_reg_wr
*reg_wr(const struct ib_send_wr
*wr
)
1418 return container_of(wr
, struct ib_reg_wr
, wr
);
1422 struct ib_recv_wr
*next
;
1425 struct ib_cqe
*wr_cqe
;
1427 struct ib_sge
*sg_list
;
1431 enum ib_access_flags
{
1432 IB_ACCESS_LOCAL_WRITE
= IB_UVERBS_ACCESS_LOCAL_WRITE
,
1433 IB_ACCESS_REMOTE_WRITE
= IB_UVERBS_ACCESS_REMOTE_WRITE
,
1434 IB_ACCESS_REMOTE_READ
= IB_UVERBS_ACCESS_REMOTE_READ
,
1435 IB_ACCESS_REMOTE_ATOMIC
= IB_UVERBS_ACCESS_REMOTE_ATOMIC
,
1436 IB_ACCESS_MW_BIND
= IB_UVERBS_ACCESS_MW_BIND
,
1437 IB_ZERO_BASED
= IB_UVERBS_ACCESS_ZERO_BASED
,
1438 IB_ACCESS_ON_DEMAND
= IB_UVERBS_ACCESS_ON_DEMAND
,
1439 IB_ACCESS_HUGETLB
= IB_UVERBS_ACCESS_HUGETLB
,
1440 IB_ACCESS_RELAXED_ORDERING
= IB_UVERBS_ACCESS_RELAXED_ORDERING
,
1442 IB_ACCESS_OPTIONAL
= IB_UVERBS_ACCESS_OPTIONAL_RANGE
,
1443 IB_ACCESS_SUPPORTED
=
1444 ((IB_ACCESS_HUGETLB
<< 1) - 1) | IB_ACCESS_OPTIONAL
,
1448 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1449 * are hidden here instead of a uapi header!
1451 enum ib_mr_rereg_flags
{
1452 IB_MR_REREG_TRANS
= 1,
1453 IB_MR_REREG_PD
= (1<<1),
1454 IB_MR_REREG_ACCESS
= (1<<2),
1455 IB_MR_REREG_SUPPORTED
= ((IB_MR_REREG_ACCESS
<< 1) - 1)
1460 enum rdma_remove_reason
{
1462 * Userspace requested uobject deletion or initial try
1463 * to remove uobject via cleanup. Call could fail
1465 RDMA_REMOVE_DESTROY
,
1466 /* Context deletion. This call should delete the actual object itself */
1468 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1469 RDMA_REMOVE_DRIVER_REMOVE
,
1470 /* uobj is being cleaned-up before being committed */
1474 struct ib_rdmacg_object
{
1475 #ifdef CONFIG_CGROUP_RDMA
1476 struct rdma_cgroup
*cg
; /* owner rdma cgroup */
1480 struct ib_ucontext
{
1481 struct ib_device
*device
;
1482 struct ib_uverbs_file
*ufile
;
1484 bool cleanup_retryable
;
1486 struct ib_rdmacg_object cg_obj
;
1488 * Implementation details of the RDMA core, don't use in drivers:
1490 struct rdma_restrack_entry res
;
1491 struct xarray mmap_xa
;
1495 u64 user_handle
; /* handle given to us by userspace */
1496 /* ufile & ucontext owning this object */
1497 struct ib_uverbs_file
*ufile
;
1498 /* FIXME, save memory: ufile->context == context */
1499 struct ib_ucontext
*context
; /* associated user context */
1500 void *object
; /* containing object */
1501 struct list_head list
; /* link to context's list */
1502 struct ib_rdmacg_object cg_obj
; /* rdmacg object */
1503 int id
; /* index into kernel idr */
1505 atomic_t usecnt
; /* protects exclusive access */
1506 struct rcu_head rcu
; /* kfree_rcu() overhead */
1508 const struct uverbs_api_object
*uapi_object
;
1512 const void __user
*inbuf
;
1513 void __user
*outbuf
;
1521 struct ib_device
*device
;
1522 struct ib_uobject
*uobject
;
1523 atomic_t usecnt
; /* count all resources */
1525 u32 unsafe_global_rkey
;
1528 * Implementation details of the RDMA core, don't use in drivers:
1530 struct ib_mr
*__internal_mr
;
1531 struct rdma_restrack_entry res
;
1535 struct ib_device
*device
;
1536 atomic_t usecnt
; /* count all exposed resources */
1537 struct inode
*inode
;
1538 struct rw_semaphore tgt_qps_rwsem
;
1539 struct xarray tgt_qps
;
1543 struct ib_device
*device
;
1545 struct ib_uobject
*uobject
;
1546 const struct ib_gid_attr
*sgid_attr
;
1547 enum rdma_ah_attr_type type
;
1550 typedef void (*ib_comp_handler
)(struct ib_cq
*cq
, void *cq_context
);
1552 enum ib_poll_context
{
1553 IB_POLL_SOFTIRQ
, /* poll from softirq context */
1554 IB_POLL_WORKQUEUE
, /* poll from workqueue */
1555 IB_POLL_UNBOUND_WORKQUEUE
, /* poll from unbound workqueue */
1556 IB_POLL_LAST_POOL_TYPE
= IB_POLL_UNBOUND_WORKQUEUE
,
1558 IB_POLL_DIRECT
, /* caller context, no hw completions */
1562 struct ib_device
*device
;
1563 struct ib_ucq_object
*uobject
;
1564 ib_comp_handler comp_handler
;
1565 void (*event_handler
)(struct ib_event
*, void *);
1568 unsigned int cqe_used
;
1569 atomic_t usecnt
; /* count number of work queues */
1570 enum ib_poll_context poll_ctx
;
1572 struct list_head pool_entry
;
1574 struct irq_poll iop
;
1575 struct work_struct work
;
1577 struct workqueue_struct
*comp_wq
;
1580 /* updated only by trace points */
1584 unsigned int comp_vector
;
1587 * Implementation details of the RDMA core, don't use in drivers:
1589 struct rdma_restrack_entry res
;
1593 struct ib_device
*device
;
1595 struct ib_usrq_object
*uobject
;
1596 void (*event_handler
)(struct ib_event
*, void *);
1598 enum ib_srq_type srq_type
;
1605 struct ib_xrcd
*xrcd
;
1612 enum ib_raw_packet_caps
{
1613 /* Strip cvlan from incoming packet and report it in the matching work
1614 * completion is supported.
1616 IB_RAW_PACKET_CAP_CVLAN_STRIPPING
= (1 << 0),
1617 /* Scatter FCS field of an incoming packet to host memory is supported.
1619 IB_RAW_PACKET_CAP_SCATTER_FCS
= (1 << 1),
1620 /* Checksum offloads are supported (for both send and receive). */
1621 IB_RAW_PACKET_CAP_IP_CSUM
= (1 << 2),
1622 /* When a packet is received for an RQ with no receive WQEs, the
1623 * packet processing is delayed.
1625 IB_RAW_PACKET_CAP_DELAY_DROP
= (1 << 3),
1629 IB_WQT_RQ
= IB_UVERBS_WQT_RQ
,
1639 struct ib_device
*device
;
1640 struct ib_uwq_object
*uobject
;
1642 void (*event_handler
)(struct ib_event
*, void *);
1646 enum ib_wq_state state
;
1647 enum ib_wq_type wq_type
;
1652 IB_WQ_FLAGS_CVLAN_STRIPPING
= IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING
,
1653 IB_WQ_FLAGS_SCATTER_FCS
= IB_UVERBS_WQ_FLAGS_SCATTER_FCS
,
1654 IB_WQ_FLAGS_DELAY_DROP
= IB_UVERBS_WQ_FLAGS_DELAY_DROP
,
1655 IB_WQ_FLAGS_PCI_WRITE_END_PADDING
=
1656 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING
,
1659 struct ib_wq_init_attr
{
1661 enum ib_wq_type wq_type
;
1665 void (*event_handler
)(struct ib_event
*, void *);
1666 u32 create_flags
; /* Use enum ib_wq_flags */
1669 enum ib_wq_attr_mask
{
1670 IB_WQ_STATE
= 1 << 0,
1671 IB_WQ_CUR_STATE
= 1 << 1,
1672 IB_WQ_FLAGS
= 1 << 2,
1676 enum ib_wq_state wq_state
;
1677 enum ib_wq_state curr_wq_state
;
1678 u32 flags
; /* Use enum ib_wq_flags */
1679 u32 flags_mask
; /* Use enum ib_wq_flags */
1682 struct ib_rwq_ind_table
{
1683 struct ib_device
*device
;
1684 struct ib_uobject
*uobject
;
1687 u32 log_ind_tbl_size
;
1688 struct ib_wq
**ind_tbl
;
1691 struct ib_rwq_ind_table_init_attr
{
1692 u32 log_ind_tbl_size
;
1693 /* Each entry is a pointer to Receive Work Queue */
1694 struct ib_wq
**ind_tbl
;
1697 enum port_pkey_state
{
1698 IB_PORT_PKEY_NOT_VALID
= 0,
1699 IB_PORT_PKEY_VALID
= 1,
1700 IB_PORT_PKEY_LISTED
= 2,
1703 struct ib_qp_security
;
1705 struct ib_port_pkey
{
1706 enum port_pkey_state state
;
1709 struct list_head qp_list
;
1710 struct list_head to_error_list
;
1711 struct ib_qp_security
*sec
;
1714 struct ib_ports_pkeys
{
1715 struct ib_port_pkey main
;
1716 struct ib_port_pkey alt
;
1719 struct ib_qp_security
{
1721 struct ib_device
*dev
;
1722 /* Hold this mutex when changing port and pkey settings. */
1724 struct ib_ports_pkeys
*ports_pkeys
;
1725 /* A list of all open shared QP handles. Required to enforce security
1726 * properly for all users of a shared QP.
1728 struct list_head shared_qp_list
;
1731 atomic_t error_list_count
;
1732 struct completion error_complete
;
1733 int error_comps_pending
;
1737 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1738 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1741 struct ib_device
*device
;
1743 struct ib_cq
*send_cq
;
1744 struct ib_cq
*recv_cq
;
1747 struct list_head rdma_mrs
;
1748 struct list_head sig_mrs
;
1750 struct ib_xrcd
*xrcd
; /* XRC TGT QPs only */
1751 struct list_head xrcd_list
;
1753 /* count times opened, mcast attaches, flow attaches */
1755 struct list_head open_list
;
1756 struct ib_qp
*real_qp
;
1757 struct ib_uqp_object
*uobject
;
1758 void (*event_handler
)(struct ib_event
*, void *);
1760 /* sgid_attrs associated with the AV's */
1761 const struct ib_gid_attr
*av_sgid_attr
;
1762 const struct ib_gid_attr
*alt_path_sgid_attr
;
1766 enum ib_qp_type qp_type
;
1767 struct ib_rwq_ind_table
*rwq_ind_tbl
;
1768 struct ib_qp_security
*qp_sec
;
1773 * Implementation details of the RDMA core, don't use in drivers:
1775 struct rdma_restrack_entry res
;
1777 /* The counter the qp is bind to */
1778 struct rdma_counter
*counter
;
1782 struct ib_device
*device
;
1785 struct ib_uobject
*uobject
;
1790 struct ib_device
*device
;
1796 unsigned int page_size
;
1797 enum ib_mr_type type
;
1800 struct ib_uobject
*uobject
; /* user */
1801 struct list_head qp_entry
; /* FR */
1805 struct ib_sig_attrs
*sig_attrs
; /* only for IB_MR_TYPE_INTEGRITY MRs */
1807 * Implementation details of the RDMA core, don't use in drivers:
1809 struct rdma_restrack_entry res
;
1813 struct ib_device
*device
;
1815 struct ib_uobject
*uobject
;
1817 enum ib_mw_type type
;
1820 /* Supported steering options */
1821 enum ib_flow_attr_type
{
1822 /* steering according to rule specifications */
1823 IB_FLOW_ATTR_NORMAL
= 0x0,
1824 /* default unicast and multicast rule -
1825 * receive all Eth traffic which isn't steered to any QP
1827 IB_FLOW_ATTR_ALL_DEFAULT
= 0x1,
1828 /* default multicast rule -
1829 * receive all Eth multicast traffic which isn't steered to any QP
1831 IB_FLOW_ATTR_MC_DEFAULT
= 0x2,
1832 /* sniffer rule - receive all port traffic */
1833 IB_FLOW_ATTR_SNIFFER
= 0x3
1836 /* Supported steering header types */
1837 enum ib_flow_spec_type
{
1839 IB_FLOW_SPEC_ETH
= 0x20,
1840 IB_FLOW_SPEC_IB
= 0x22,
1842 IB_FLOW_SPEC_IPV4
= 0x30,
1843 IB_FLOW_SPEC_IPV6
= 0x31,
1844 IB_FLOW_SPEC_ESP
= 0x34,
1846 IB_FLOW_SPEC_TCP
= 0x40,
1847 IB_FLOW_SPEC_UDP
= 0x41,
1848 IB_FLOW_SPEC_VXLAN_TUNNEL
= 0x50,
1849 IB_FLOW_SPEC_GRE
= 0x51,
1850 IB_FLOW_SPEC_MPLS
= 0x60,
1851 IB_FLOW_SPEC_INNER
= 0x100,
1853 IB_FLOW_SPEC_ACTION_TAG
= 0x1000,
1854 IB_FLOW_SPEC_ACTION_DROP
= 0x1001,
1855 IB_FLOW_SPEC_ACTION_HANDLE
= 0x1002,
1856 IB_FLOW_SPEC_ACTION_COUNT
= 0x1003,
1858 #define IB_FLOW_SPEC_LAYER_MASK 0xF0
1859 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1861 enum ib_flow_flags
{
1862 IB_FLOW_ATTR_FLAGS_DONT_TRAP
= 1UL << 1, /* Continue match, no steal */
1863 IB_FLOW_ATTR_FLAGS_EGRESS
= 1UL << 2, /* Egress flow */
1864 IB_FLOW_ATTR_FLAGS_RESERVED
= 1UL << 3 /* Must be last */
1867 struct ib_flow_eth_filter
{
1876 struct ib_flow_spec_eth
{
1879 struct ib_flow_eth_filter val
;
1880 struct ib_flow_eth_filter mask
;
1883 struct ib_flow_ib_filter
{
1890 struct ib_flow_spec_ib
{
1893 struct ib_flow_ib_filter val
;
1894 struct ib_flow_ib_filter mask
;
1897 /* IPv4 header flags */
1898 enum ib_ipv4_flags
{
1899 IB_IPV4_DONT_FRAG
= 0x2, /* Don't enable packet fragmentation */
1900 IB_IPV4_MORE_FRAG
= 0X4 /* For All fragmented packets except the
1901 last have this flag set */
1904 struct ib_flow_ipv4_filter
{
1915 struct ib_flow_spec_ipv4
{
1918 struct ib_flow_ipv4_filter val
;
1919 struct ib_flow_ipv4_filter mask
;
1922 struct ib_flow_ipv6_filter
{
1933 struct ib_flow_spec_ipv6
{
1936 struct ib_flow_ipv6_filter val
;
1937 struct ib_flow_ipv6_filter mask
;
1940 struct ib_flow_tcp_udp_filter
{
1947 struct ib_flow_spec_tcp_udp
{
1950 struct ib_flow_tcp_udp_filter val
;
1951 struct ib_flow_tcp_udp_filter mask
;
1954 struct ib_flow_tunnel_filter
{
1959 /* ib_flow_spec_tunnel describes the Vxlan tunnel
1960 * the tunnel_id from val has the vni value
1962 struct ib_flow_spec_tunnel
{
1965 struct ib_flow_tunnel_filter val
;
1966 struct ib_flow_tunnel_filter mask
;
1969 struct ib_flow_esp_filter
{
1976 struct ib_flow_spec_esp
{
1979 struct ib_flow_esp_filter val
;
1980 struct ib_flow_esp_filter mask
;
1983 struct ib_flow_gre_filter
{
1984 __be16 c_ks_res0_ver
;
1991 struct ib_flow_spec_gre
{
1994 struct ib_flow_gre_filter val
;
1995 struct ib_flow_gre_filter mask
;
1998 struct ib_flow_mpls_filter
{
2004 struct ib_flow_spec_mpls
{
2007 struct ib_flow_mpls_filter val
;
2008 struct ib_flow_mpls_filter mask
;
2011 struct ib_flow_spec_action_tag
{
2012 enum ib_flow_spec_type type
;
2017 struct ib_flow_spec_action_drop
{
2018 enum ib_flow_spec_type type
;
2022 struct ib_flow_spec_action_handle
{
2023 enum ib_flow_spec_type type
;
2025 struct ib_flow_action
*act
;
2028 enum ib_counters_description
{
2033 struct ib_flow_spec_action_count
{
2034 enum ib_flow_spec_type type
;
2036 struct ib_counters
*counters
;
2039 union ib_flow_spec
{
2044 struct ib_flow_spec_eth eth
;
2045 struct ib_flow_spec_ib ib
;
2046 struct ib_flow_spec_ipv4 ipv4
;
2047 struct ib_flow_spec_tcp_udp tcp_udp
;
2048 struct ib_flow_spec_ipv6 ipv6
;
2049 struct ib_flow_spec_tunnel tunnel
;
2050 struct ib_flow_spec_esp esp
;
2051 struct ib_flow_spec_gre gre
;
2052 struct ib_flow_spec_mpls mpls
;
2053 struct ib_flow_spec_action_tag flow_tag
;
2054 struct ib_flow_spec_action_drop drop
;
2055 struct ib_flow_spec_action_handle action
;
2056 struct ib_flow_spec_action_count flow_count
;
2059 struct ib_flow_attr
{
2060 enum ib_flow_attr_type type
;
2066 union ib_flow_spec flows
[];
2071 struct ib_device
*device
;
2072 struct ib_uobject
*uobject
;
2075 enum ib_flow_action_type
{
2076 IB_FLOW_ACTION_UNSPECIFIED
,
2077 IB_FLOW_ACTION_ESP
= 1,
2080 struct ib_flow_action_attrs_esp_keymats
{
2081 enum ib_uverbs_flow_action_esp_keymat protocol
;
2083 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm
;
2087 struct ib_flow_action_attrs_esp_replays
{
2088 enum ib_uverbs_flow_action_esp_replay protocol
;
2090 struct ib_uverbs_flow_action_esp_replay_bmp bmp
;
2094 enum ib_flow_action_attrs_esp_flags
{
2095 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2096 * This is done in order to share the same flags between user-space and
2097 * kernel and spare an unnecessary translation.
2101 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED
= 1ULL << 32,
2102 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS
= 1ULL << 33,
2105 struct ib_flow_spec_list
{
2106 struct ib_flow_spec_list
*next
;
2107 union ib_flow_spec spec
;
2110 struct ib_flow_action_attrs_esp
{
2111 struct ib_flow_action_attrs_esp_keymats
*keymat
;
2112 struct ib_flow_action_attrs_esp_replays
*replay
;
2113 struct ib_flow_spec_list
*encap
;
2114 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2115 * Value of 0 is a valid value.
2121 /* Use enum ib_flow_action_attrs_esp_flags */
2123 u64 hard_limit_pkts
;
2126 struct ib_flow_action
{
2127 struct ib_device
*device
;
2128 struct ib_uobject
*uobject
;
2129 enum ib_flow_action_type type
;
2136 enum ib_process_mad_flags
{
2137 IB_MAD_IGNORE_MKEY
= 1,
2138 IB_MAD_IGNORE_BKEY
= 2,
2139 IB_MAD_IGNORE_ALL
= IB_MAD_IGNORE_MKEY
| IB_MAD_IGNORE_BKEY
2142 enum ib_mad_result
{
2143 IB_MAD_RESULT_FAILURE
= 0, /* (!SUCCESS is the important flag) */
2144 IB_MAD_RESULT_SUCCESS
= 1 << 0, /* MAD was successfully processed */
2145 IB_MAD_RESULT_REPLY
= 1 << 1, /* Reply packet needs to be sent */
2146 IB_MAD_RESULT_CONSUMED
= 1 << 2 /* Packet consumed: stop processing */
2149 struct ib_port_cache
{
2151 struct ib_pkey_cache
*pkey
;
2152 struct ib_gid_table
*gid
;
2154 enum ib_port_state port_state
;
2157 struct ib_port_immutable
{
2164 struct ib_port_data
{
2165 struct ib_device
*ib_dev
;
2167 struct ib_port_immutable immutable
;
2169 spinlock_t pkey_list_lock
;
2170 struct list_head pkey_list
;
2172 struct ib_port_cache cache
;
2174 spinlock_t netdev_lock
;
2175 struct net_device __rcu
*netdev
;
2176 struct hlist_node ndev_hash_link
;
2177 struct rdma_port_counter port_counter
;
2178 struct rdma_hw_stats
*hw_stats
;
2181 /* rdma netdev type - specifies protocol type */
2182 enum rdma_netdev_t
{
2183 RDMA_NETDEV_OPA_VNIC
,
2188 * struct rdma_netdev - rdma netdev
2189 * For cases where netstack interfacing is required.
2191 struct rdma_netdev
{
2193 struct ib_device
*hca
;
2198 * cleanup function must be specified.
2199 * FIXME: This is only used for OPA_VNIC and that usage should be
2202 void (*free_rdma_netdev
)(struct net_device
*netdev
);
2204 /* control functions */
2205 void (*set_id
)(struct net_device
*netdev
, int id
);
2207 int (*send
)(struct net_device
*dev
, struct sk_buff
*skb
,
2208 struct ib_ah
*address
, u32 dqpn
);
2210 int (*attach_mcast
)(struct net_device
*dev
, struct ib_device
*hca
,
2211 union ib_gid
*gid
, u16 mlid
,
2212 int set_qkey
, u32 qkey
);
2213 int (*detach_mcast
)(struct net_device
*dev
, struct ib_device
*hca
,
2214 union ib_gid
*gid
, u16 mlid
);
2217 struct rdma_netdev_alloc_params
{
2223 int (*initialize_rdma_netdev
)(struct ib_device
*device
, u8 port_num
,
2224 struct net_device
*netdev
, void *param
);
2227 struct ib_odp_counters
{
2229 atomic64_t invalidations
;
2230 atomic64_t prefetch
;
2233 struct ib_counters
{
2234 struct ib_device
*device
;
2235 struct ib_uobject
*uobject
;
2236 /* num of objects attached */
2240 struct ib_counters_read_attr
{
2243 u32 flags
; /* use enum ib_read_counters_flags */
2246 struct uverbs_attr_bundle
;
2248 struct iw_cm_conn_param
;
2250 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2251 .size_##ib_struct = \
2252 (sizeof(struct drv_struct) + \
2253 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2254 BUILD_BUG_ON_ZERO( \
2255 !__same_type(((struct drv_struct *)NULL)->member, \
2258 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2259 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2261 #define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2262 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2264 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2266 struct rdma_user_mmap_entry
{
2268 struct ib_ucontext
*ucontext
;
2269 unsigned long start_pgoff
;
2271 bool driver_removed
;
2274 /* Return the offset (in bytes) the user should pass to libc's mmap() */
2276 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry
*entry
)
2278 return (u64
)entry
->start_pgoff
<< PAGE_SHIFT
;
2282 * struct ib_device_ops - InfiniBand device operations
2283 * This structure defines all the InfiniBand device operations, providers will
2284 * need to define the supported operations, otherwise they will be set to null.
2286 struct ib_device_ops
{
2287 struct module
*owner
;
2288 enum rdma_driver_id driver_id
;
2290 unsigned int uverbs_no_driver_id_binding
:1;
2292 int (*post_send
)(struct ib_qp
*qp
, const struct ib_send_wr
*send_wr
,
2293 const struct ib_send_wr
**bad_send_wr
);
2294 int (*post_recv
)(struct ib_qp
*qp
, const struct ib_recv_wr
*recv_wr
,
2295 const struct ib_recv_wr
**bad_recv_wr
);
2296 void (*drain_rq
)(struct ib_qp
*qp
);
2297 void (*drain_sq
)(struct ib_qp
*qp
);
2298 int (*poll_cq
)(struct ib_cq
*cq
, int num_entries
, struct ib_wc
*wc
);
2299 int (*peek_cq
)(struct ib_cq
*cq
, int wc_cnt
);
2300 int (*req_notify_cq
)(struct ib_cq
*cq
, enum ib_cq_notify_flags flags
);
2301 int (*req_ncomp_notif
)(struct ib_cq
*cq
, int wc_cnt
);
2302 int (*post_srq_recv
)(struct ib_srq
*srq
,
2303 const struct ib_recv_wr
*recv_wr
,
2304 const struct ib_recv_wr
**bad_recv_wr
);
2305 int (*process_mad
)(struct ib_device
*device
, int process_mad_flags
,
2306 u8 port_num
, const struct ib_wc
*in_wc
,
2307 const struct ib_grh
*in_grh
,
2308 const struct ib_mad
*in_mad
, struct ib_mad
*out_mad
,
2309 size_t *out_mad_size
, u16
*out_mad_pkey_index
);
2310 int (*query_device
)(struct ib_device
*device
,
2311 struct ib_device_attr
*device_attr
,
2312 struct ib_udata
*udata
);
2313 int (*modify_device
)(struct ib_device
*device
, int device_modify_mask
,
2314 struct ib_device_modify
*device_modify
);
2315 void (*get_dev_fw_str
)(struct ib_device
*device
, char *str
);
2316 const struct cpumask
*(*get_vector_affinity
)(struct ib_device
*ibdev
,
2318 int (*query_port
)(struct ib_device
*device
, u8 port_num
,
2319 struct ib_port_attr
*port_attr
);
2320 int (*modify_port
)(struct ib_device
*device
, u8 port_num
,
2321 int port_modify_mask
,
2322 struct ib_port_modify
*port_modify
);
2324 * The following mandatory functions are used only at device
2325 * registration. Keep functions such as these at the end of this
2326 * structure to avoid cache line misses when accessing struct ib_device
2329 int (*get_port_immutable
)(struct ib_device
*device
, u8 port_num
,
2330 struct ib_port_immutable
*immutable
);
2331 enum rdma_link_layer (*get_link_layer
)(struct ib_device
*device
,
2334 * When calling get_netdev, the HW vendor's driver should return the
2335 * net device of device @device at port @port_num or NULL if such
2336 * a net device doesn't exist. The vendor driver should call dev_hold
2337 * on this net device. The HW vendor's device driver must guarantee
2338 * that this function returns NULL before the net device has finished
2339 * NETDEV_UNREGISTER state.
2341 struct net_device
*(*get_netdev
)(struct ib_device
*device
, u8 port_num
);
2343 * rdma netdev operation
2345 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2346 * must return -EOPNOTSUPP if it doesn't support the specified type.
2348 struct net_device
*(*alloc_rdma_netdev
)(
2349 struct ib_device
*device
, u8 port_num
, enum rdma_netdev_t type
,
2350 const char *name
, unsigned char name_assign_type
,
2351 void (*setup
)(struct net_device
*));
2353 int (*rdma_netdev_get_params
)(struct ib_device
*device
, u8 port_num
,
2354 enum rdma_netdev_t type
,
2355 struct rdma_netdev_alloc_params
*params
);
2357 * query_gid should be return GID value for @device, when @port_num
2358 * link layer is either IB or iWarp. It is no-op if @port_num port
2359 * is RoCE link layer.
2361 int (*query_gid
)(struct ib_device
*device
, u8 port_num
, int index
,
2364 * When calling add_gid, the HW vendor's driver should add the gid
2365 * of device of port at gid index available at @attr. Meta-info of
2366 * that gid (for example, the network device related to this gid) is
2367 * available at @attr. @context allows the HW vendor driver to store
2368 * extra information together with a GID entry. The HW vendor driver may
2369 * allocate memory to contain this information and store it in @context
2370 * when a new GID entry is written to. Params are consistent until the
2371 * next call of add_gid or delete_gid. The function should return 0 on
2372 * success or error otherwise. The function could be called
2373 * concurrently for different ports. This function is only called when
2374 * roce_gid_table is used.
2376 int (*add_gid
)(const struct ib_gid_attr
*attr
, void **context
);
2378 * When calling del_gid, the HW vendor's driver should delete the
2379 * gid of device @device at gid index gid_index of port port_num
2380 * available in @attr.
2381 * Upon the deletion of a GID entry, the HW vendor must free any
2382 * allocated memory. The caller will clear @context afterwards.
2383 * This function is only called when roce_gid_table is used.
2385 int (*del_gid
)(const struct ib_gid_attr
*attr
, void **context
);
2386 int (*query_pkey
)(struct ib_device
*device
, u8 port_num
, u16 index
,
2388 int (*alloc_ucontext
)(struct ib_ucontext
*context
,
2389 struct ib_udata
*udata
);
2390 void (*dealloc_ucontext
)(struct ib_ucontext
*context
);
2391 int (*mmap
)(struct ib_ucontext
*context
, struct vm_area_struct
*vma
);
2393 * This will be called once refcount of an entry in mmap_xa reaches
2394 * zero. The type of the memory that was mapped may differ between
2395 * entries and is opaque to the rdma_user_mmap interface.
2396 * Therefore needs to be implemented by the driver in mmap_free.
2398 void (*mmap_free
)(struct rdma_user_mmap_entry
*entry
);
2399 void (*disassociate_ucontext
)(struct ib_ucontext
*ibcontext
);
2400 int (*alloc_pd
)(struct ib_pd
*pd
, struct ib_udata
*udata
);
2401 int (*dealloc_pd
)(struct ib_pd
*pd
, struct ib_udata
*udata
);
2402 int (*create_ah
)(struct ib_ah
*ah
, struct rdma_ah_init_attr
*attr
,
2403 struct ib_udata
*udata
);
2404 int (*modify_ah
)(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
2405 int (*query_ah
)(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
2406 int (*destroy_ah
)(struct ib_ah
*ah
, u32 flags
);
2407 int (*create_srq
)(struct ib_srq
*srq
,
2408 struct ib_srq_init_attr
*srq_init_attr
,
2409 struct ib_udata
*udata
);
2410 int (*modify_srq
)(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
,
2411 enum ib_srq_attr_mask srq_attr_mask
,
2412 struct ib_udata
*udata
);
2413 int (*query_srq
)(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
);
2414 int (*destroy_srq
)(struct ib_srq
*srq
, struct ib_udata
*udata
);
2415 struct ib_qp
*(*create_qp
)(struct ib_pd
*pd
,
2416 struct ib_qp_init_attr
*qp_init_attr
,
2417 struct ib_udata
*udata
);
2418 int (*modify_qp
)(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
2419 int qp_attr_mask
, struct ib_udata
*udata
);
2420 int (*query_qp
)(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
2421 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
);
2422 int (*destroy_qp
)(struct ib_qp
*qp
, struct ib_udata
*udata
);
2423 int (*create_cq
)(struct ib_cq
*cq
, const struct ib_cq_init_attr
*attr
,
2424 struct ib_udata
*udata
);
2425 int (*modify_cq
)(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
);
2426 int (*destroy_cq
)(struct ib_cq
*cq
, struct ib_udata
*udata
);
2427 int (*resize_cq
)(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
);
2428 struct ib_mr
*(*get_dma_mr
)(struct ib_pd
*pd
, int mr_access_flags
);
2429 struct ib_mr
*(*reg_user_mr
)(struct ib_pd
*pd
, u64 start
, u64 length
,
2430 u64 virt_addr
, int mr_access_flags
,
2431 struct ib_udata
*udata
);
2432 int (*rereg_user_mr
)(struct ib_mr
*mr
, int flags
, u64 start
, u64 length
,
2433 u64 virt_addr
, int mr_access_flags
,
2434 struct ib_pd
*pd
, struct ib_udata
*udata
);
2435 int (*dereg_mr
)(struct ib_mr
*mr
, struct ib_udata
*udata
);
2436 struct ib_mr
*(*alloc_mr
)(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
2438 struct ib_mr
*(*alloc_mr_integrity
)(struct ib_pd
*pd
,
2439 u32 max_num_data_sg
,
2440 u32 max_num_meta_sg
);
2441 int (*advise_mr
)(struct ib_pd
*pd
,
2442 enum ib_uverbs_advise_mr_advice advice
, u32 flags
,
2443 struct ib_sge
*sg_list
, u32 num_sge
,
2444 struct uverbs_attr_bundle
*attrs
);
2445 int (*map_mr_sg
)(struct ib_mr
*mr
, struct scatterlist
*sg
, int sg_nents
,
2446 unsigned int *sg_offset
);
2447 int (*check_mr_status
)(struct ib_mr
*mr
, u32 check_mask
,
2448 struct ib_mr_status
*mr_status
);
2449 int (*alloc_mw
)(struct ib_mw
*mw
, struct ib_udata
*udata
);
2450 int (*dealloc_mw
)(struct ib_mw
*mw
);
2451 int (*attach_mcast
)(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
);
2452 int (*detach_mcast
)(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
);
2453 int (*alloc_xrcd
)(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
);
2454 int (*dealloc_xrcd
)(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
);
2455 struct ib_flow
*(*create_flow
)(struct ib_qp
*qp
,
2456 struct ib_flow_attr
*flow_attr
,
2457 struct ib_udata
*udata
);
2458 int (*destroy_flow
)(struct ib_flow
*flow_id
);
2459 struct ib_flow_action
*(*create_flow_action_esp
)(
2460 struct ib_device
*device
,
2461 const struct ib_flow_action_attrs_esp
*attr
,
2462 struct uverbs_attr_bundle
*attrs
);
2463 int (*destroy_flow_action
)(struct ib_flow_action
*action
);
2464 int (*modify_flow_action_esp
)(
2465 struct ib_flow_action
*action
,
2466 const struct ib_flow_action_attrs_esp
*attr
,
2467 struct uverbs_attr_bundle
*attrs
);
2468 int (*set_vf_link_state
)(struct ib_device
*device
, int vf
, u8 port
,
2470 int (*get_vf_config
)(struct ib_device
*device
, int vf
, u8 port
,
2471 struct ifla_vf_info
*ivf
);
2472 int (*get_vf_stats
)(struct ib_device
*device
, int vf
, u8 port
,
2473 struct ifla_vf_stats
*stats
);
2474 int (*get_vf_guid
)(struct ib_device
*device
, int vf
, u8 port
,
2475 struct ifla_vf_guid
*node_guid
,
2476 struct ifla_vf_guid
*port_guid
);
2477 int (*set_vf_guid
)(struct ib_device
*device
, int vf
, u8 port
, u64 guid
,
2479 struct ib_wq
*(*create_wq
)(struct ib_pd
*pd
,
2480 struct ib_wq_init_attr
*init_attr
,
2481 struct ib_udata
*udata
);
2482 int (*destroy_wq
)(struct ib_wq
*wq
, struct ib_udata
*udata
);
2483 int (*modify_wq
)(struct ib_wq
*wq
, struct ib_wq_attr
*attr
,
2484 u32 wq_attr_mask
, struct ib_udata
*udata
);
2485 int (*create_rwq_ind_table
)(struct ib_rwq_ind_table
*ib_rwq_ind_table
,
2486 struct ib_rwq_ind_table_init_attr
*init_attr
,
2487 struct ib_udata
*udata
);
2488 int (*destroy_rwq_ind_table
)(struct ib_rwq_ind_table
*wq_ind_table
);
2489 struct ib_dm
*(*alloc_dm
)(struct ib_device
*device
,
2490 struct ib_ucontext
*context
,
2491 struct ib_dm_alloc_attr
*attr
,
2492 struct uverbs_attr_bundle
*attrs
);
2493 int (*dealloc_dm
)(struct ib_dm
*dm
, struct uverbs_attr_bundle
*attrs
);
2494 struct ib_mr
*(*reg_dm_mr
)(struct ib_pd
*pd
, struct ib_dm
*dm
,
2495 struct ib_dm_mr_attr
*attr
,
2496 struct uverbs_attr_bundle
*attrs
);
2497 int (*create_counters
)(struct ib_counters
*counters
,
2498 struct uverbs_attr_bundle
*attrs
);
2499 int (*destroy_counters
)(struct ib_counters
*counters
);
2500 int (*read_counters
)(struct ib_counters
*counters
,
2501 struct ib_counters_read_attr
*counters_read_attr
,
2502 struct uverbs_attr_bundle
*attrs
);
2503 int (*map_mr_sg_pi
)(struct ib_mr
*mr
, struct scatterlist
*data_sg
,
2504 int data_sg_nents
, unsigned int *data_sg_offset
,
2505 struct scatterlist
*meta_sg
, int meta_sg_nents
,
2506 unsigned int *meta_sg_offset
);
2509 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2510 * driver initialized data. The struct is kfree()'ed by the sysfs
2511 * core when the device is removed. A lifespan of -1 in the return
2512 * struct tells the core to set a default lifespan.
2514 struct rdma_hw_stats
*(*alloc_hw_stats
)(struct ib_device
*device
,
2517 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2518 * @index - The index in the value array we wish to have updated, or
2519 * num_counters if we want all stats updated
2521 * < 0 - Error, no counters updated
2522 * index - Updated the single counter pointed to by index
2523 * num_counters - Updated all counters (will reset the timestamp
2524 * and prevent further calls for lifespan milliseconds)
2525 * Drivers are allowed to update all counters in leiu of just the
2526 * one given in index at their option
2528 int (*get_hw_stats
)(struct ib_device
*device
,
2529 struct rdma_hw_stats
*stats
, u8 port
, int index
);
2531 * This function is called once for each port when a ib device is
2534 int (*init_port
)(struct ib_device
*device
, u8 port_num
,
2535 struct kobject
*port_sysfs
);
2537 * Allows rdma drivers to add their own restrack attributes.
2539 int (*fill_res_mr_entry
)(struct sk_buff
*msg
, struct ib_mr
*ibmr
);
2540 int (*fill_res_mr_entry_raw
)(struct sk_buff
*msg
, struct ib_mr
*ibmr
);
2541 int (*fill_res_cq_entry
)(struct sk_buff
*msg
, struct ib_cq
*ibcq
);
2542 int (*fill_res_cq_entry_raw
)(struct sk_buff
*msg
, struct ib_cq
*ibcq
);
2543 int (*fill_res_qp_entry
)(struct sk_buff
*msg
, struct ib_qp
*ibqp
);
2544 int (*fill_res_qp_entry_raw
)(struct sk_buff
*msg
, struct ib_qp
*ibqp
);
2545 int (*fill_res_cm_id_entry
)(struct sk_buff
*msg
, struct rdma_cm_id
*id
);
2547 /* Device lifecycle callbacks */
2549 * Called after the device becomes registered, before clients are
2552 int (*enable_driver
)(struct ib_device
*dev
);
2554 * This is called as part of ib_dealloc_device().
2556 void (*dealloc_driver
)(struct ib_device
*dev
);
2558 /* iWarp CM callbacks */
2559 void (*iw_add_ref
)(struct ib_qp
*qp
);
2560 void (*iw_rem_ref
)(struct ib_qp
*qp
);
2561 struct ib_qp
*(*iw_get_qp
)(struct ib_device
*device
, int qpn
);
2562 int (*iw_connect
)(struct iw_cm_id
*cm_id
,
2563 struct iw_cm_conn_param
*conn_param
);
2564 int (*iw_accept
)(struct iw_cm_id
*cm_id
,
2565 struct iw_cm_conn_param
*conn_param
);
2566 int (*iw_reject
)(struct iw_cm_id
*cm_id
, const void *pdata
,
2568 int (*iw_create_listen
)(struct iw_cm_id
*cm_id
, int backlog
);
2569 int (*iw_destroy_listen
)(struct iw_cm_id
*cm_id
);
2571 * counter_bind_qp - Bind a QP to a counter.
2572 * @counter - The counter to be bound. If counter->id is zero then
2573 * the driver needs to allocate a new counter and set counter->id
2575 int (*counter_bind_qp
)(struct rdma_counter
*counter
, struct ib_qp
*qp
);
2577 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2578 * counter and bind it onto the default one
2580 int (*counter_unbind_qp
)(struct ib_qp
*qp
);
2582 * counter_dealloc -De-allocate the hw counter
2584 int (*counter_dealloc
)(struct rdma_counter
*counter
);
2586 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2587 * the driver initialized data.
2589 struct rdma_hw_stats
*(*counter_alloc_stats
)(
2590 struct rdma_counter
*counter
);
2592 * counter_update_stats - Query the stats value of this counter
2594 int (*counter_update_stats
)(struct rdma_counter
*counter
);
2597 * Allows rdma drivers to add their own restrack attributes
2598 * dumped via 'rdma stat' iproute2 command.
2600 int (*fill_stat_mr_entry
)(struct sk_buff
*msg
, struct ib_mr
*ibmr
);
2602 /* query driver for its ucontext properties */
2603 int (*query_ucontext
)(struct ib_ucontext
*context
,
2604 struct uverbs_attr_bundle
*attrs
);
2606 DECLARE_RDMA_OBJ_SIZE(ib_ah
);
2607 DECLARE_RDMA_OBJ_SIZE(ib_counters
);
2608 DECLARE_RDMA_OBJ_SIZE(ib_cq
);
2609 DECLARE_RDMA_OBJ_SIZE(ib_mw
);
2610 DECLARE_RDMA_OBJ_SIZE(ib_pd
);
2611 DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table
);
2612 DECLARE_RDMA_OBJ_SIZE(ib_srq
);
2613 DECLARE_RDMA_OBJ_SIZE(ib_ucontext
);
2614 DECLARE_RDMA_OBJ_SIZE(ib_xrcd
);
2617 struct ib_core_device
{
2618 /* device must be the first element in structure until,
2619 * union of ib_core_device and device exists in ib_device.
2622 possible_net_t rdma_net
;
2623 struct kobject
*ports_kobj
;
2624 struct list_head port_list
;
2625 struct ib_device
*owner
; /* reach back to owner ib_device */
2628 struct rdma_restrack_root
;
2630 /* Do not access @dma_device directly from ULP nor from HW drivers. */
2631 struct device
*dma_device
;
2632 struct ib_device_ops ops
;
2633 char name
[IB_DEVICE_NAME_MAX
];
2634 struct rcu_head rcu_head
;
2636 struct list_head event_handler_list
;
2637 /* Protects event_handler_list */
2638 struct rw_semaphore event_handler_rwsem
;
2640 /* Protects QP's event_handler calls and open_qp list */
2641 spinlock_t qp_open_list_lock
;
2643 struct rw_semaphore client_data_rwsem
;
2644 struct xarray client_data
;
2645 struct mutex unregistration_lock
;
2647 /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2648 rwlock_t cache_lock
;
2650 * port_data is indexed by port number
2652 struct ib_port_data
*port_data
;
2654 int num_comp_vectors
;
2658 struct ib_core_device coredev
;
2661 /* First group for device attributes,
2662 * Second group for driver provided attributes (optional).
2663 * It is NULL terminated array.
2665 const struct attribute_group
*groups
[3];
2667 u64 uverbs_cmd_mask
;
2668 u64 uverbs_ex_cmd_mask
;
2670 char node_desc
[IB_DEVICE_NODE_DESC_MAX
];
2674 /* Indicates kernel verbs support, should not be used in drivers */
2675 u16 kverbs_provider
:1;
2676 /* CQ adaptive moderation (RDMA DIM) */
2680 struct ib_device_attr attrs
;
2681 struct attribute_group
*hw_stats_ag
;
2682 struct rdma_hw_stats
*hw_stats
;
2684 #ifdef CONFIG_CGROUP_RDMA
2685 struct rdmacg_device cg_device
;
2690 spinlock_t cq_pools_lock
;
2691 struct list_head cq_pools
[IB_POLL_LAST_POOL_TYPE
+ 1];
2693 struct rdma_restrack_root
*res
;
2695 const struct uapi_definition
*driver_def
;
2698 * Positive refcount indicates that the device is currently
2699 * registered and cannot be unregistered.
2701 refcount_t refcount
;
2702 struct completion unreg_completion
;
2703 struct work_struct unregistration_work
;
2705 const struct rdma_link_ops
*link_ops
;
2707 /* Protects compat_devs xarray modifications */
2708 struct mutex compat_devs_mutex
;
2709 /* Maintains compat devices for each net namespace */
2710 struct xarray compat_devs
;
2712 /* Used by iWarp CM */
2713 char iw_ifname
[IFNAMSIZ
];
2714 u32 iw_driver_flags
;
2718 struct ib_client_nl_info
;
2721 int (*add
)(struct ib_device
*ibdev
);
2722 void (*remove
)(struct ib_device
*, void *client_data
);
2723 void (*rename
)(struct ib_device
*dev
, void *client_data
);
2724 int (*get_nl_info
)(struct ib_device
*ibdev
, void *client_data
,
2725 struct ib_client_nl_info
*res
);
2726 int (*get_global_nl_info
)(struct ib_client_nl_info
*res
);
2728 /* Returns the net_dev belonging to this ib_client and matching the
2730 * @dev: An RDMA device that the net_dev use for communication.
2731 * @port: A physical port number on the RDMA device.
2732 * @pkey: P_Key that the net_dev uses if applicable.
2733 * @gid: A GID that the net_dev uses to communicate.
2734 * @addr: An IP address the net_dev is configured with.
2735 * @client_data: The device's client data set by ib_set_client_data().
2737 * An ib_client that implements a net_dev on top of RDMA devices
2738 * (such as IP over IB) should implement this callback, allowing the
2739 * rdma_cm module to find the right net_dev for a given request.
2741 * The caller is responsible for calling dev_put on the returned
2743 struct net_device
*(*get_net_dev_by_params
)(
2744 struct ib_device
*dev
,
2747 const union ib_gid
*gid
,
2748 const struct sockaddr
*addr
,
2752 struct completion uses_zero
;
2755 /* kverbs are not required by the client */
2760 * IB block DMA iterator
2762 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2763 * to a HW supported page size.
2765 struct ib_block_iter
{
2766 /* internal states */
2767 struct scatterlist
*__sg
; /* sg holding the current aligned block */
2768 dma_addr_t __dma_addr
; /* unaligned DMA address of this block */
2769 unsigned int __sg_nents
; /* number of SG entries */
2770 unsigned int __sg_advance
; /* number of bytes to advance in sg in next step */
2771 unsigned int __pg_bit
; /* alignment of current block */
2774 struct ib_device
*_ib_alloc_device(size_t size
);
2775 #define ib_alloc_device(drv_struct, member) \
2776 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2777 BUILD_BUG_ON_ZERO(offsetof( \
2778 struct drv_struct, member))), \
2779 struct drv_struct, member)
2781 void ib_dealloc_device(struct ib_device
*device
);
2783 void ib_get_device_fw_str(struct ib_device
*device
, char *str
);
2785 int ib_register_device(struct ib_device
*device
, const char *name
,
2786 struct device
*dma_device
);
2787 void ib_unregister_device(struct ib_device
*device
);
2788 void ib_unregister_driver(enum rdma_driver_id driver_id
);
2789 void ib_unregister_device_and_put(struct ib_device
*device
);
2790 void ib_unregister_device_queued(struct ib_device
*ib_dev
);
2792 int ib_register_client (struct ib_client
*client
);
2793 void ib_unregister_client(struct ib_client
*client
);
2795 void __rdma_block_iter_start(struct ib_block_iter
*biter
,
2796 struct scatterlist
*sglist
,
2798 unsigned long pgsz
);
2799 bool __rdma_block_iter_next(struct ib_block_iter
*biter
);
2802 * rdma_block_iter_dma_address - get the aligned dma address of the current
2803 * block held by the block iterator.
2804 * @biter: block iterator holding the memory block
2806 static inline dma_addr_t
2807 rdma_block_iter_dma_address(struct ib_block_iter
*biter
)
2809 return biter
->__dma_addr
& ~(BIT_ULL(biter
->__pg_bit
) - 1);
2813 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2814 * @sglist: sglist to iterate over
2815 * @biter: block iterator holding the memory block
2816 * @nents: maximum number of sg entries to iterate over
2817 * @pgsz: best HW supported page size to use
2819 * Callers may use rdma_block_iter_dma_address() to get each
2820 * blocks aligned DMA address.
2822 #define rdma_for_each_block(sglist, biter, nents, pgsz) \
2823 for (__rdma_block_iter_start(biter, sglist, nents, \
2825 __rdma_block_iter_next(biter);)
2828 * ib_get_client_data - Get IB client context
2829 * @device:Device to get context for
2830 * @client:Client to get context for
2832 * ib_get_client_data() returns the client context data set with
2833 * ib_set_client_data(). This can only be called while the client is
2834 * registered to the device, once the ib_client remove() callback returns this
2837 static inline void *ib_get_client_data(struct ib_device
*device
,
2838 struct ib_client
*client
)
2840 return xa_load(&device
->client_data
, client
->client_id
);
2842 void ib_set_client_data(struct ib_device
*device
, struct ib_client
*client
,
2844 void ib_set_device_ops(struct ib_device
*device
,
2845 const struct ib_device_ops
*ops
);
2847 int rdma_user_mmap_io(struct ib_ucontext
*ucontext
, struct vm_area_struct
*vma
,
2848 unsigned long pfn
, unsigned long size
, pgprot_t prot
,
2849 struct rdma_user_mmap_entry
*entry
);
2850 int rdma_user_mmap_entry_insert(struct ib_ucontext
*ucontext
,
2851 struct rdma_user_mmap_entry
*entry
,
2853 int rdma_user_mmap_entry_insert_range(struct ib_ucontext
*ucontext
,
2854 struct rdma_user_mmap_entry
*entry
,
2855 size_t length
, u32 min_pgoff
,
2858 struct rdma_user_mmap_entry
*
2859 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext
*ucontext
,
2860 unsigned long pgoff
);
2861 struct rdma_user_mmap_entry
*
2862 rdma_user_mmap_entry_get(struct ib_ucontext
*ucontext
,
2863 struct vm_area_struct
*vma
);
2864 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry
*entry
);
2866 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry
*entry
);
2868 static inline int ib_copy_from_udata(void *dest
, struct ib_udata
*udata
, size_t len
)
2870 return copy_from_user(dest
, udata
->inbuf
, len
) ? -EFAULT
: 0;
2873 static inline int ib_copy_to_udata(struct ib_udata
*udata
, void *src
, size_t len
)
2875 return copy_to_user(udata
->outbuf
, src
, len
) ? -EFAULT
: 0;
2878 static inline bool ib_is_buffer_cleared(const void __user
*p
,
2884 if (len
> USHRT_MAX
)
2887 buf
= memdup_user(p
, len
);
2891 ret
= !memchr_inv(buf
, 0, len
);
2896 static inline bool ib_is_udata_cleared(struct ib_udata
*udata
,
2900 return ib_is_buffer_cleared(udata
->inbuf
+ offset
, len
);
2904 * ib_is_destroy_retryable - Check whether the uobject destruction
2906 * @ret: The initial destruction return code
2907 * @why: remove reason
2908 * @uobj: The uobject that is destroyed
2910 * This function is a helper function that IB layer and low-level drivers
2911 * can use to consider whether the destruction of the given uobject is
2913 * It checks the original return code, if it wasn't success the destruction
2914 * is retryable according to the ucontext state (i.e. cleanup_retryable) and
2915 * the remove reason. (i.e. why).
2916 * Must be called with the object locked for destroy.
2918 static inline bool ib_is_destroy_retryable(int ret
, enum rdma_remove_reason why
,
2919 struct ib_uobject
*uobj
)
2921 return ret
&& (why
== RDMA_REMOVE_DESTROY
||
2922 uobj
->context
->cleanup_retryable
);
2926 * ib_destroy_usecnt - Called during destruction to check the usecnt
2927 * @usecnt: The usecnt atomic
2928 * @why: remove reason
2929 * @uobj: The uobject that is destroyed
2931 * Non-zero usecnts will block destruction unless destruction was triggered by
2932 * a ucontext cleanup.
2934 static inline int ib_destroy_usecnt(atomic_t
*usecnt
,
2935 enum rdma_remove_reason why
,
2936 struct ib_uobject
*uobj
)
2938 if (atomic_read(usecnt
) && ib_is_destroy_retryable(-EBUSY
, why
, uobj
))
2944 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2945 * contains all required attributes and no attributes not allowed for
2946 * the given QP state transition.
2947 * @cur_state: Current QP state
2948 * @next_state: Next QP state
2950 * @mask: Mask of supplied QP attributes
2952 * This function is a helper function that a low-level driver's
2953 * modify_qp method can use to validate the consumer's input. It
2954 * checks that cur_state and next_state are valid QP states, that a
2955 * transition from cur_state to next_state is allowed by the IB spec,
2956 * and that the attribute mask supplied is allowed for the transition.
2958 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state next_state
,
2959 enum ib_qp_type type
, enum ib_qp_attr_mask mask
);
2961 void ib_register_event_handler(struct ib_event_handler
*event_handler
);
2962 void ib_unregister_event_handler(struct ib_event_handler
*event_handler
);
2963 void ib_dispatch_event(const struct ib_event
*event
);
2965 int ib_query_port(struct ib_device
*device
,
2966 u8 port_num
, struct ib_port_attr
*port_attr
);
2968 enum rdma_link_layer
rdma_port_get_link_layer(struct ib_device
*device
,
2972 * rdma_cap_ib_switch - Check if the device is IB switch
2973 * @device: Device to check
2975 * Device driver is responsible for setting is_switch bit on
2976 * in ib_device structure at init time.
2978 * Return: true if the device is IB switch.
2980 static inline bool rdma_cap_ib_switch(const struct ib_device
*device
)
2982 return device
->is_switch
;
2986 * rdma_start_port - Return the first valid port number for the device
2989 * @device: Device to be checked
2991 * Return start port number
2993 static inline u8
rdma_start_port(const struct ib_device
*device
)
2995 return rdma_cap_ib_switch(device
) ? 0 : 1;
2999 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3000 * @device - The struct ib_device * to iterate over
3001 * @iter - The unsigned int to store the port number
3003 #define rdma_for_each_port(device, iter) \
3004 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
3005 unsigned int, iter))); \
3006 iter <= rdma_end_port(device); (iter)++)
3009 * rdma_end_port - Return the last valid port number for the device
3012 * @device: Device to be checked
3014 * Return last port number
3016 static inline u8
rdma_end_port(const struct ib_device
*device
)
3018 return rdma_cap_ib_switch(device
) ? 0 : device
->phys_port_cnt
;
3021 static inline int rdma_is_port_valid(const struct ib_device
*device
,
3024 return (port
>= rdma_start_port(device
) &&
3025 port
<= rdma_end_port(device
));
3028 static inline bool rdma_is_grh_required(const struct ib_device
*device
,
3031 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3032 RDMA_CORE_PORT_IB_GRH_REQUIRED
;
3035 static inline bool rdma_protocol_ib(const struct ib_device
*device
, u8 port_num
)
3037 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3038 RDMA_CORE_CAP_PROT_IB
;
3041 static inline bool rdma_protocol_roce(const struct ib_device
*device
, u8 port_num
)
3043 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3044 (RDMA_CORE_CAP_PROT_ROCE
| RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
);
3047 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device
*device
, u8 port_num
)
3049 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3050 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
;
3053 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device
*device
, u8 port_num
)
3055 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3056 RDMA_CORE_CAP_PROT_ROCE
;
3059 static inline bool rdma_protocol_iwarp(const struct ib_device
*device
, u8 port_num
)
3061 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3062 RDMA_CORE_CAP_PROT_IWARP
;
3065 static inline bool rdma_ib_or_roce(const struct ib_device
*device
, u8 port_num
)
3067 return rdma_protocol_ib(device
, port_num
) ||
3068 rdma_protocol_roce(device
, port_num
);
3071 static inline bool rdma_protocol_raw_packet(const struct ib_device
*device
, u8 port_num
)
3073 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3074 RDMA_CORE_CAP_PROT_RAW_PACKET
;
3077 static inline bool rdma_protocol_usnic(const struct ib_device
*device
, u8 port_num
)
3079 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3080 RDMA_CORE_CAP_PROT_USNIC
;
3084 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3085 * Management Datagrams.
3086 * @device: Device to check
3087 * @port_num: Port number to check
3089 * Management Datagrams (MAD) are a required part of the InfiniBand
3090 * specification and are supported on all InfiniBand devices. A slightly
3091 * extended version are also supported on OPA interfaces.
3093 * Return: true if the port supports sending/receiving of MAD packets.
3095 static inline bool rdma_cap_ib_mad(const struct ib_device
*device
, u8 port_num
)
3097 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3098 RDMA_CORE_CAP_IB_MAD
;
3102 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3103 * Management Datagrams.
3104 * @device: Device to check
3105 * @port_num: Port number to check
3107 * Intel OmniPath devices extend and/or replace the InfiniBand Management
3108 * datagrams with their own versions. These OPA MADs share many but not all of
3109 * the characteristics of InfiniBand MADs.
3111 * OPA MADs differ in the following ways:
3113 * 1) MADs are variable size up to 2K
3114 * IBTA defined MADs remain fixed at 256 bytes
3115 * 2) OPA SMPs must carry valid PKeys
3116 * 3) OPA SMP packets are a different format
3118 * Return: true if the port supports OPA MAD packet formats.
3120 static inline bool rdma_cap_opa_mad(struct ib_device
*device
, u8 port_num
)
3122 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3123 RDMA_CORE_CAP_OPA_MAD
;
3127 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3128 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3129 * @device: Device to check
3130 * @port_num: Port number to check
3132 * Each InfiniBand node is required to provide a Subnet Management Agent
3133 * that the subnet manager can access. Prior to the fabric being fully
3134 * configured by the subnet manager, the SMA is accessed via a well known
3135 * interface called the Subnet Management Interface (SMI). This interface
3136 * uses directed route packets to communicate with the SM to get around the
3137 * chicken and egg problem of the SM needing to know what's on the fabric
3138 * in order to configure the fabric, and needing to configure the fabric in
3139 * order to send packets to the devices on the fabric. These directed
3140 * route packets do not need the fabric fully configured in order to reach
3141 * their destination. The SMI is the only method allowed to send
3142 * directed route packets on an InfiniBand fabric.
3144 * Return: true if the port provides an SMI.
3146 static inline bool rdma_cap_ib_smi(const struct ib_device
*device
, u8 port_num
)
3148 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3149 RDMA_CORE_CAP_IB_SMI
;
3153 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3154 * Communication Manager.
3155 * @device: Device to check
3156 * @port_num: Port number to check
3158 * The InfiniBand Communication Manager is one of many pre-defined General
3159 * Service Agents (GSA) that are accessed via the General Service
3160 * Interface (GSI). It's role is to facilitate establishment of connections
3161 * between nodes as well as other management related tasks for established
3164 * Return: true if the port supports an IB CM (this does not guarantee that
3165 * a CM is actually running however).
3167 static inline bool rdma_cap_ib_cm(const struct ib_device
*device
, u8 port_num
)
3169 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3170 RDMA_CORE_CAP_IB_CM
;
3174 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3175 * Communication Manager.
3176 * @device: Device to check
3177 * @port_num: Port number to check
3179 * Similar to above, but specific to iWARP connections which have a different
3180 * managment protocol than InfiniBand.
3182 * Return: true if the port supports an iWARP CM (this does not guarantee that
3183 * a CM is actually running however).
3185 static inline bool rdma_cap_iw_cm(const struct ib_device
*device
, u8 port_num
)
3187 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3188 RDMA_CORE_CAP_IW_CM
;
3192 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3193 * Subnet Administration.
3194 * @device: Device to check
3195 * @port_num: Port number to check
3197 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3198 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
3199 * fabrics, devices should resolve routes to other hosts by contacting the
3200 * SA to query the proper route.
3202 * Return: true if the port should act as a client to the fabric Subnet
3203 * Administration interface. This does not imply that the SA service is
3206 static inline bool rdma_cap_ib_sa(const struct ib_device
*device
, u8 port_num
)
3208 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3209 RDMA_CORE_CAP_IB_SA
;
3213 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3215 * @device: Device to check
3216 * @port_num: Port number to check
3218 * InfiniBand multicast registration is more complex than normal IPv4 or
3219 * IPv6 multicast registration. Each Host Channel Adapter must register
3220 * with the Subnet Manager when it wishes to join a multicast group. It
3221 * should do so only once regardless of how many queue pairs it subscribes
3222 * to this group. And it should leave the group only after all queue pairs
3223 * attached to the group have been detached.
3225 * Return: true if the port must undertake the additional adminstrative
3226 * overhead of registering/unregistering with the SM and tracking of the
3227 * total number of queue pairs attached to the multicast group.
3229 static inline bool rdma_cap_ib_mcast(const struct ib_device
*device
, u8 port_num
)
3231 return rdma_cap_ib_sa(device
, port_num
);
3235 * rdma_cap_af_ib - Check if the port of device has the capability
3236 * Native Infiniband Address.
3237 * @device: Device to check
3238 * @port_num: Port number to check
3240 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3241 * GID. RoCE uses a different mechanism, but still generates a GID via
3242 * a prescribed mechanism and port specific data.
3244 * Return: true if the port uses a GID address to identify devices on the
3247 static inline bool rdma_cap_af_ib(const struct ib_device
*device
, u8 port_num
)
3249 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3250 RDMA_CORE_CAP_AF_IB
;
3254 * rdma_cap_eth_ah - Check if the port of device has the capability
3255 * Ethernet Address Handle.
3256 * @device: Device to check
3257 * @port_num: Port number to check
3259 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3260 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3261 * port. Normally, packet headers are generated by the sending host
3262 * adapter, but when sending connectionless datagrams, we must manually
3263 * inject the proper headers for the fabric we are communicating over.
3265 * Return: true if we are running as a RoCE port and must force the
3266 * addition of a Global Route Header built from our Ethernet Address
3267 * Handle into our header list for connectionless packets.
3269 static inline bool rdma_cap_eth_ah(const struct ib_device
*device
, u8 port_num
)
3271 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3272 RDMA_CORE_CAP_ETH_AH
;
3276 * rdma_cap_opa_ah - Check if the port of device supports
3277 * OPA Address handles
3278 * @device: Device to check
3279 * @port_num: Port number to check
3281 * Return: true if we are running on an OPA device which supports
3282 * the extended OPA addressing.
3284 static inline bool rdma_cap_opa_ah(struct ib_device
*device
, u8 port_num
)
3286 return (device
->port_data
[port_num
].immutable
.core_cap_flags
&
3287 RDMA_CORE_CAP_OPA_AH
) == RDMA_CORE_CAP_OPA_AH
;
3291 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3294 * @port_num: Port number
3296 * This MAD size includes the MAD headers and MAD payload. No other headers
3299 * Return the max MAD size required by the Port. Will return 0 if the port
3300 * does not support MADs
3302 static inline size_t rdma_max_mad_size(const struct ib_device
*device
, u8 port_num
)
3304 return device
->port_data
[port_num
].immutable
.max_mad_size
;
3308 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3309 * @device: Device to check
3310 * @port_num: Port number to check
3312 * RoCE GID table mechanism manages the various GIDs for a device.
3314 * NOTE: if allocating the port's GID table has failed, this call will still
3315 * return true, but any RoCE GID table API will fail.
3317 * Return: true if the port uses RoCE GID table mechanism in order to manage
3320 static inline bool rdma_cap_roce_gid_table(const struct ib_device
*device
,
3323 return rdma_protocol_roce(device
, port_num
) &&
3324 device
->ops
.add_gid
&& device
->ops
.del_gid
;
3328 * Check if the device supports READ W/ INVALIDATE.
3330 static inline bool rdma_cap_read_inv(struct ib_device
*dev
, u32 port_num
)
3333 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
3334 * has support for it yet.
3336 return rdma_protocol_iwarp(dev
, port_num
);
3340 * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3342 * @port_num: 1 based Port number
3344 * Return true if port is an Intel OPA port , false if not
3346 static inline bool rdma_core_cap_opa_port(struct ib_device
*device
,
3349 return (device
->port_data
[port_num
].immutable
.core_cap_flags
&
3350 RDMA_CORE_PORT_INTEL_OPA
) == RDMA_CORE_PORT_INTEL_OPA
;
3354 * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3356 * @port_num: Port number
3357 * @mtu: enum value of MTU
3359 * Return the MTU size supported by the port as an integer value. Will return
3360 * -1 if enum value of mtu is not supported.
3362 static inline int rdma_mtu_enum_to_int(struct ib_device
*device
, u8 port
,
3365 if (rdma_core_cap_opa_port(device
, port
))
3366 return opa_mtu_enum_to_int((enum opa_mtu
)mtu
);
3368 return ib_mtu_enum_to_int((enum ib_mtu
)mtu
);
3372 * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3374 * @port_num: Port number
3375 * @attr: port attribute
3377 * Return the MTU size supported by the port as an integer value.
3379 static inline int rdma_mtu_from_attr(struct ib_device
*device
, u8 port
,
3380 struct ib_port_attr
*attr
)
3382 if (rdma_core_cap_opa_port(device
, port
))
3383 return attr
->phys_mtu
;
3385 return ib_mtu_enum_to_int(attr
->max_mtu
);
3388 int ib_set_vf_link_state(struct ib_device
*device
, int vf
, u8 port
,
3390 int ib_get_vf_config(struct ib_device
*device
, int vf
, u8 port
,
3391 struct ifla_vf_info
*info
);
3392 int ib_get_vf_stats(struct ib_device
*device
, int vf
, u8 port
,
3393 struct ifla_vf_stats
*stats
);
3394 int ib_get_vf_guid(struct ib_device
*device
, int vf
, u8 port
,
3395 struct ifla_vf_guid
*node_guid
,
3396 struct ifla_vf_guid
*port_guid
);
3397 int ib_set_vf_guid(struct ib_device
*device
, int vf
, u8 port
, u64 guid
,
3400 int ib_query_pkey(struct ib_device
*device
,
3401 u8 port_num
, u16 index
, u16
*pkey
);
3403 int ib_modify_device(struct ib_device
*device
,
3404 int device_modify_mask
,
3405 struct ib_device_modify
*device_modify
);
3407 int ib_modify_port(struct ib_device
*device
,
3408 u8 port_num
, int port_modify_mask
,
3409 struct ib_port_modify
*port_modify
);
3411 int ib_find_gid(struct ib_device
*device
, union ib_gid
*gid
,
3412 u8
*port_num
, u16
*index
);
3414 int ib_find_pkey(struct ib_device
*device
,
3415 u8 port_num
, u16 pkey
, u16
*index
);
3419 * Create a memory registration for all memory in the system and place
3420 * the rkey for it into pd->unsafe_global_rkey. This can be used by
3421 * ULPs to avoid the overhead of dynamic MRs.
3423 * This flag is generally considered unsafe and must only be used in
3424 * extremly trusted environments. Every use of it will log a warning
3425 * in the kernel log.
3427 IB_PD_UNSAFE_GLOBAL_RKEY
= 0x01,
3430 struct ib_pd
*__ib_alloc_pd(struct ib_device
*device
, unsigned int flags
,
3431 const char *caller
);
3433 #define ib_alloc_pd(device, flags) \
3434 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3436 int ib_dealloc_pd_user(struct ib_pd
*pd
, struct ib_udata
*udata
);
3439 * ib_dealloc_pd - Deallocate kernel PD
3440 * @pd: The protection domain
3442 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3444 static inline void ib_dealloc_pd(struct ib_pd
*pd
)
3446 int ret
= ib_dealloc_pd_user(pd
, NULL
);
3448 WARN_ONCE(ret
, "Destroy of kernel PD shouldn't fail");
3451 enum rdma_create_ah_flags
{
3452 /* In a sleepable context */
3453 RDMA_CREATE_AH_SLEEPABLE
= BIT(0),
3457 * rdma_create_ah - Creates an address handle for the given address vector.
3458 * @pd: The protection domain associated with the address handle.
3459 * @ah_attr: The attributes of the address vector.
3460 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3462 * The address handle is used to reference a local or global destination
3463 * in all UD QP post sends.
3465 struct ib_ah
*rdma_create_ah(struct ib_pd
*pd
, struct rdma_ah_attr
*ah_attr
,
3469 * rdma_create_user_ah - Creates an address handle for the given address vector.
3470 * It resolves destination mac address for ah attribute of RoCE type.
3471 * @pd: The protection domain associated with the address handle.
3472 * @ah_attr: The attributes of the address vector.
3473 * @udata: pointer to user's input output buffer information need by
3476 * It returns 0 on success and returns appropriate error code on error.
3477 * The address handle is used to reference a local or global destination
3478 * in all UD QP post sends.
3480 struct ib_ah
*rdma_create_user_ah(struct ib_pd
*pd
,
3481 struct rdma_ah_attr
*ah_attr
,
3482 struct ib_udata
*udata
);
3484 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3486 * @hdr: the L3 header to parse
3487 * @net_type: type of header to parse
3488 * @sgid: place to store source gid
3489 * @dgid: place to store destination gid
3491 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr
*hdr
,
3492 enum rdma_network_type net_type
,
3493 union ib_gid
*sgid
, union ib_gid
*dgid
);
3496 * ib_get_rdma_header_version - Get the header version
3497 * @hdr: the L3 header to parse
3499 int ib_get_rdma_header_version(const union rdma_network_hdr
*hdr
);
3502 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3504 * @device: Device on which the received message arrived.
3505 * @port_num: Port on which the received message arrived.
3506 * @wc: Work completion associated with the received message.
3507 * @grh: References the received global route header. This parameter is
3508 * ignored unless the work completion indicates that the GRH is valid.
3509 * @ah_attr: Returned attributes that can be used when creating an address
3510 * handle for replying to the message.
3511 * When ib_init_ah_attr_from_wc() returns success,
3512 * (a) for IB link layer it optionally contains a reference to SGID attribute
3513 * when GRH is present for IB link layer.
3514 * (b) for RoCE link layer it contains a reference to SGID attribute.
3515 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3516 * attributes which are initialized using ib_init_ah_attr_from_wc().
3519 int ib_init_ah_attr_from_wc(struct ib_device
*device
, u8 port_num
,
3520 const struct ib_wc
*wc
, const struct ib_grh
*grh
,
3521 struct rdma_ah_attr
*ah_attr
);
3524 * ib_create_ah_from_wc - Creates an address handle associated with the
3525 * sender of the specified work completion.
3526 * @pd: The protection domain associated with the address handle.
3527 * @wc: Work completion information associated with a received message.
3528 * @grh: References the received global route header. This parameter is
3529 * ignored unless the work completion indicates that the GRH is valid.
3530 * @port_num: The outbound port number to associate with the address.
3532 * The address handle is used to reference a local or global destination
3533 * in all UD QP post sends.
3535 struct ib_ah
*ib_create_ah_from_wc(struct ib_pd
*pd
, const struct ib_wc
*wc
,
3536 const struct ib_grh
*grh
, u8 port_num
);
3539 * rdma_modify_ah - Modifies the address vector associated with an address
3541 * @ah: The address handle to modify.
3542 * @ah_attr: The new address vector attributes to associate with the
3545 int rdma_modify_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
3548 * rdma_query_ah - Queries the address vector associated with an address
3550 * @ah: The address handle to query.
3551 * @ah_attr: The address vector attributes associated with the address
3554 int rdma_query_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
3556 enum rdma_destroy_ah_flags
{
3557 /* In a sleepable context */
3558 RDMA_DESTROY_AH_SLEEPABLE
= BIT(0),
3562 * rdma_destroy_ah_user - Destroys an address handle.
3563 * @ah: The address handle to destroy.
3564 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3565 * @udata: Valid user data or NULL for kernel objects
3567 int rdma_destroy_ah_user(struct ib_ah
*ah
, u32 flags
, struct ib_udata
*udata
);
3570 * rdma_destroy_ah - Destroys an kernel address handle.
3571 * @ah: The address handle to destroy.
3572 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3574 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3576 static inline void rdma_destroy_ah(struct ib_ah
*ah
, u32 flags
)
3578 int ret
= rdma_destroy_ah_user(ah
, flags
, NULL
);
3580 WARN_ONCE(ret
, "Destroy of kernel AH shouldn't fail");
3583 struct ib_srq
*ib_create_srq_user(struct ib_pd
*pd
,
3584 struct ib_srq_init_attr
*srq_init_attr
,
3585 struct ib_usrq_object
*uobject
,
3586 struct ib_udata
*udata
);
3587 static inline struct ib_srq
*
3588 ib_create_srq(struct ib_pd
*pd
, struct ib_srq_init_attr
*srq_init_attr
)
3590 if (!pd
->device
->ops
.create_srq
)
3591 return ERR_PTR(-EOPNOTSUPP
);
3593 return ib_create_srq_user(pd
, srq_init_attr
, NULL
, NULL
);
3597 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3598 * @srq: The SRQ to modify.
3599 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
3600 * the current values of selected SRQ attributes are returned.
3601 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3602 * are being modified.
3604 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3605 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3606 * the number of receives queued drops below the limit.
3608 int ib_modify_srq(struct ib_srq
*srq
,
3609 struct ib_srq_attr
*srq_attr
,
3610 enum ib_srq_attr_mask srq_attr_mask
);
3613 * ib_query_srq - Returns the attribute list and current values for the
3615 * @srq: The SRQ to query.
3616 * @srq_attr: The attributes of the specified SRQ.
3618 int ib_query_srq(struct ib_srq
*srq
,
3619 struct ib_srq_attr
*srq_attr
);
3622 * ib_destroy_srq_user - Destroys the specified SRQ.
3623 * @srq: The SRQ to destroy.
3624 * @udata: Valid user data or NULL for kernel objects
3626 int ib_destroy_srq_user(struct ib_srq
*srq
, struct ib_udata
*udata
);
3629 * ib_destroy_srq - Destroys the specified kernel SRQ.
3630 * @srq: The SRQ to destroy.
3632 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3634 static inline void ib_destroy_srq(struct ib_srq
*srq
)
3636 int ret
= ib_destroy_srq_user(srq
, NULL
);
3638 WARN_ONCE(ret
, "Destroy of kernel SRQ shouldn't fail");
3642 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3643 * @srq: The SRQ to post the work request on.
3644 * @recv_wr: A list of work requests to post on the receive queue.
3645 * @bad_recv_wr: On an immediate failure, this parameter will reference
3646 * the work request that failed to be posted on the QP.
3648 static inline int ib_post_srq_recv(struct ib_srq
*srq
,
3649 const struct ib_recv_wr
*recv_wr
,
3650 const struct ib_recv_wr
**bad_recv_wr
)
3652 const struct ib_recv_wr
*dummy
;
3654 return srq
->device
->ops
.post_srq_recv(srq
, recv_wr
,
3655 bad_recv_wr
? : &dummy
);
3658 struct ib_qp
*ib_create_qp(struct ib_pd
*pd
,
3659 struct ib_qp_init_attr
*qp_init_attr
);
3662 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3663 * @qp: The QP to modify.
3664 * @attr: On input, specifies the QP attributes to modify. On output,
3665 * the current values of selected QP attributes are returned.
3666 * @attr_mask: A bit-mask used to specify which attributes of the QP
3667 * are being modified.
3668 * @udata: pointer to user's input output buffer information
3669 * are being modified.
3670 * It returns 0 on success and returns appropriate error code on error.
3672 int ib_modify_qp_with_udata(struct ib_qp
*qp
,
3673 struct ib_qp_attr
*attr
,
3675 struct ib_udata
*udata
);
3678 * ib_modify_qp - Modifies the attributes for the specified QP and then
3679 * transitions the QP to the given state.
3680 * @qp: The QP to modify.
3681 * @qp_attr: On input, specifies the QP attributes to modify. On output,
3682 * the current values of selected QP attributes are returned.
3683 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3684 * are being modified.
3686 int ib_modify_qp(struct ib_qp
*qp
,
3687 struct ib_qp_attr
*qp_attr
,
3691 * ib_query_qp - Returns the attribute list and current values for the
3693 * @qp: The QP to query.
3694 * @qp_attr: The attributes of the specified QP.
3695 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3696 * @qp_init_attr: Additional attributes of the selected QP.
3698 * The qp_attr_mask may be used to limit the query to gathering only the
3699 * selected attributes.
3701 int ib_query_qp(struct ib_qp
*qp
,
3702 struct ib_qp_attr
*qp_attr
,
3704 struct ib_qp_init_attr
*qp_init_attr
);
3707 * ib_destroy_qp - Destroys the specified QP.
3708 * @qp: The QP to destroy.
3709 * @udata: Valid udata or NULL for kernel objects
3711 int ib_destroy_qp_user(struct ib_qp
*qp
, struct ib_udata
*udata
);
3714 * ib_destroy_qp - Destroys the specified kernel QP.
3715 * @qp: The QP to destroy.
3717 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3719 static inline int ib_destroy_qp(struct ib_qp
*qp
)
3721 return ib_destroy_qp_user(qp
, NULL
);
3725 * ib_open_qp - Obtain a reference to an existing sharable QP.
3726 * @xrcd - XRC domain
3727 * @qp_open_attr: Attributes identifying the QP to open.
3729 * Returns a reference to a sharable QP.
3731 struct ib_qp
*ib_open_qp(struct ib_xrcd
*xrcd
,
3732 struct ib_qp_open_attr
*qp_open_attr
);
3735 * ib_close_qp - Release an external reference to a QP.
3736 * @qp: The QP handle to release
3738 * The opened QP handle is released by the caller. The underlying
3739 * shared QP is not destroyed until all internal references are released.
3741 int ib_close_qp(struct ib_qp
*qp
);
3744 * ib_post_send - Posts a list of work requests to the send queue of
3746 * @qp: The QP to post the work request on.
3747 * @send_wr: A list of work requests to post on the send queue.
3748 * @bad_send_wr: On an immediate failure, this parameter will reference
3749 * the work request that failed to be posted on the QP.
3751 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3752 * error is returned, the QP state shall not be affected,
3753 * ib_post_send() will return an immediate error after queueing any
3754 * earlier work requests in the list.
3756 static inline int ib_post_send(struct ib_qp
*qp
,
3757 const struct ib_send_wr
*send_wr
,
3758 const struct ib_send_wr
**bad_send_wr
)
3760 const struct ib_send_wr
*dummy
;
3762 return qp
->device
->ops
.post_send(qp
, send_wr
, bad_send_wr
? : &dummy
);
3766 * ib_post_recv - Posts a list of work requests to the receive queue of
3768 * @qp: The QP to post the work request on.
3769 * @recv_wr: A list of work requests to post on the receive queue.
3770 * @bad_recv_wr: On an immediate failure, this parameter will reference
3771 * the work request that failed to be posted on the QP.
3773 static inline int ib_post_recv(struct ib_qp
*qp
,
3774 const struct ib_recv_wr
*recv_wr
,
3775 const struct ib_recv_wr
**bad_recv_wr
)
3777 const struct ib_recv_wr
*dummy
;
3779 return qp
->device
->ops
.post_recv(qp
, recv_wr
, bad_recv_wr
? : &dummy
);
3782 struct ib_cq
*__ib_alloc_cq(struct ib_device
*dev
, void *private, int nr_cqe
,
3783 int comp_vector
, enum ib_poll_context poll_ctx
,
3784 const char *caller
);
3785 static inline struct ib_cq
*ib_alloc_cq(struct ib_device
*dev
, void *private,
3786 int nr_cqe
, int comp_vector
,
3787 enum ib_poll_context poll_ctx
)
3789 return __ib_alloc_cq(dev
, private, nr_cqe
, comp_vector
, poll_ctx
,
3793 struct ib_cq
*__ib_alloc_cq_any(struct ib_device
*dev
, void *private,
3794 int nr_cqe
, enum ib_poll_context poll_ctx
,
3795 const char *caller
);
3798 * ib_alloc_cq_any: Allocate kernel CQ
3799 * @dev: The IB device
3800 * @private: Private data attached to the CQE
3801 * @nr_cqe: Number of CQEs in the CQ
3802 * @poll_ctx: Context used for polling the CQ
3804 static inline struct ib_cq
*ib_alloc_cq_any(struct ib_device
*dev
,
3805 void *private, int nr_cqe
,
3806 enum ib_poll_context poll_ctx
)
3808 return __ib_alloc_cq_any(dev
, private, nr_cqe
, poll_ctx
,
3812 void ib_free_cq(struct ib_cq
*cq
);
3813 int ib_process_cq_direct(struct ib_cq
*cq
, int budget
);
3816 * ib_create_cq - Creates a CQ on the specified device.
3817 * @device: The device on which to create the CQ.
3818 * @comp_handler: A user-specified callback that is invoked when a
3819 * completion event occurs on the CQ.
3820 * @event_handler: A user-specified callback that is invoked when an
3821 * asynchronous event not associated with a completion occurs on the CQ.
3822 * @cq_context: Context associated with the CQ returned to the user via
3823 * the associated completion and event handlers.
3824 * @cq_attr: The attributes the CQ should be created upon.
3826 * Users can examine the cq structure to determine the actual CQ size.
3828 struct ib_cq
*__ib_create_cq(struct ib_device
*device
,
3829 ib_comp_handler comp_handler
,
3830 void (*event_handler
)(struct ib_event
*, void *),
3832 const struct ib_cq_init_attr
*cq_attr
,
3833 const char *caller
);
3834 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3835 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3838 * ib_resize_cq - Modifies the capacity of the CQ.
3839 * @cq: The CQ to resize.
3840 * @cqe: The minimum size of the CQ.
3842 * Users can examine the cq structure to determine the actual CQ size.
3844 int ib_resize_cq(struct ib_cq
*cq
, int cqe
);
3847 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3848 * @cq: The CQ to modify.
3849 * @cq_count: number of CQEs that will trigger an event
3850 * @cq_period: max period of time in usec before triggering an event
3853 int rdma_set_cq_moderation(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
);
3856 * ib_destroy_cq_user - Destroys the specified CQ.
3857 * @cq: The CQ to destroy.
3858 * @udata: Valid user data or NULL for kernel objects
3860 int ib_destroy_cq_user(struct ib_cq
*cq
, struct ib_udata
*udata
);
3863 * ib_destroy_cq - Destroys the specified kernel CQ.
3864 * @cq: The CQ to destroy.
3866 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3868 static inline void ib_destroy_cq(struct ib_cq
*cq
)
3870 int ret
= ib_destroy_cq_user(cq
, NULL
);
3872 WARN_ONCE(ret
, "Destroy of kernel CQ shouldn't fail");
3876 * ib_poll_cq - poll a CQ for completion(s)
3877 * @cq:the CQ being polled
3878 * @num_entries:maximum number of completions to return
3879 * @wc:array of at least @num_entries &struct ib_wc where completions
3882 * Poll a CQ for (possibly multiple) completions. If the return value
3883 * is < 0, an error occurred. If the return value is >= 0, it is the
3884 * number of completions returned. If the return value is
3885 * non-negative and < num_entries, then the CQ was emptied.
3887 static inline int ib_poll_cq(struct ib_cq
*cq
, int num_entries
,
3890 return cq
->device
->ops
.poll_cq(cq
, num_entries
, wc
);
3894 * ib_req_notify_cq - Request completion notification on a CQ.
3895 * @cq: The CQ to generate an event for.
3897 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3898 * to request an event on the next solicited event or next work
3899 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3900 * may also be |ed in to request a hint about missed events, as
3904 * < 0 means an error occurred while requesting notification
3905 * == 0 means notification was requested successfully, and if
3906 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3907 * were missed and it is safe to wait for another event. In
3908 * this case is it guaranteed that any work completions added
3909 * to the CQ since the last CQ poll will trigger a completion
3910 * notification event.
3911 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3912 * in. It means that the consumer must poll the CQ again to
3913 * make sure it is empty to avoid missing an event because of a
3914 * race between requesting notification and an entry being
3915 * added to the CQ. This return value means it is possible
3916 * (but not guaranteed) that a work completion has been added
3917 * to the CQ since the last poll without triggering a
3918 * completion notification event.
3920 static inline int ib_req_notify_cq(struct ib_cq
*cq
,
3921 enum ib_cq_notify_flags flags
)
3923 return cq
->device
->ops
.req_notify_cq(cq
, flags
);
3926 struct ib_cq
*ib_cq_pool_get(struct ib_device
*dev
, unsigned int nr_cqe
,
3927 int comp_vector_hint
,
3928 enum ib_poll_context poll_ctx
);
3930 void ib_cq_pool_put(struct ib_cq
*cq
, unsigned int nr_cqe
);
3933 * ib_req_ncomp_notif - Request completion notification when there are
3934 * at least the specified number of unreaped completions on the CQ.
3935 * @cq: The CQ to generate an event for.
3936 * @wc_cnt: The number of unreaped completions that should be on the
3937 * CQ before an event is generated.
3939 static inline int ib_req_ncomp_notif(struct ib_cq
*cq
, int wc_cnt
)
3941 return cq
->device
->ops
.req_ncomp_notif
?
3942 cq
->device
->ops
.req_ncomp_notif(cq
, wc_cnt
) :
3947 * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
3948 * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
3949 * address into the dma address.
3951 static inline bool ib_uses_virt_dma(struct ib_device
*dev
)
3953 return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA
) && !dev
->dma_device
;
3957 * ib_dma_mapping_error - check a DMA addr for error
3958 * @dev: The device for which the dma_addr was created
3959 * @dma_addr: The DMA address to check
3961 static inline int ib_dma_mapping_error(struct ib_device
*dev
, u64 dma_addr
)
3963 if (ib_uses_virt_dma(dev
))
3965 return dma_mapping_error(dev
->dma_device
, dma_addr
);
3969 * ib_dma_map_single - Map a kernel virtual address to DMA address
3970 * @dev: The device for which the dma_addr is to be created
3971 * @cpu_addr: The kernel virtual address
3972 * @size: The size of the region in bytes
3973 * @direction: The direction of the DMA
3975 static inline u64
ib_dma_map_single(struct ib_device
*dev
,
3976 void *cpu_addr
, size_t size
,
3977 enum dma_data_direction direction
)
3979 if (ib_uses_virt_dma(dev
))
3980 return (uintptr_t)cpu_addr
;
3981 return dma_map_single(dev
->dma_device
, cpu_addr
, size
, direction
);
3985 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3986 * @dev: The device for which the DMA address was created
3987 * @addr: The DMA address
3988 * @size: The size of the region in bytes
3989 * @direction: The direction of the DMA
3991 static inline void ib_dma_unmap_single(struct ib_device
*dev
,
3992 u64 addr
, size_t size
,
3993 enum dma_data_direction direction
)
3995 if (!ib_uses_virt_dma(dev
))
3996 dma_unmap_single(dev
->dma_device
, addr
, size
, direction
);
4000 * ib_dma_map_page - Map a physical page to DMA address
4001 * @dev: The device for which the dma_addr is to be created
4002 * @page: The page to be mapped
4003 * @offset: The offset within the page
4004 * @size: The size of the region in bytes
4005 * @direction: The direction of the DMA
4007 static inline u64
ib_dma_map_page(struct ib_device
*dev
,
4009 unsigned long offset
,
4011 enum dma_data_direction direction
)
4013 if (ib_uses_virt_dma(dev
))
4014 return (uintptr_t)(page_address(page
) + offset
);
4015 return dma_map_page(dev
->dma_device
, page
, offset
, size
, direction
);
4019 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4020 * @dev: The device for which the DMA address was created
4021 * @addr: The DMA address
4022 * @size: The size of the region in bytes
4023 * @direction: The direction of the DMA
4025 static inline void ib_dma_unmap_page(struct ib_device
*dev
,
4026 u64 addr
, size_t size
,
4027 enum dma_data_direction direction
)
4029 if (!ib_uses_virt_dma(dev
))
4030 dma_unmap_page(dev
->dma_device
, addr
, size
, direction
);
4033 int ib_dma_virt_map_sg(struct ib_device
*dev
, struct scatterlist
*sg
, int nents
);
4034 static inline int ib_dma_map_sg_attrs(struct ib_device
*dev
,
4035 struct scatterlist
*sg
, int nents
,
4036 enum dma_data_direction direction
,
4037 unsigned long dma_attrs
)
4039 if (ib_uses_virt_dma(dev
))
4040 return ib_dma_virt_map_sg(dev
, sg
, nents
);
4041 return dma_map_sg_attrs(dev
->dma_device
, sg
, nents
, direction
,
4045 static inline void ib_dma_unmap_sg_attrs(struct ib_device
*dev
,
4046 struct scatterlist
*sg
, int nents
,
4047 enum dma_data_direction direction
,
4048 unsigned long dma_attrs
)
4050 if (!ib_uses_virt_dma(dev
))
4051 dma_unmap_sg_attrs(dev
->dma_device
, sg
, nents
, direction
,
4056 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4057 * @dev: The device for which the DMA addresses are to be created
4058 * @sg: The array of scatter/gather entries
4059 * @nents: The number of scatter/gather entries
4060 * @direction: The direction of the DMA
4062 static inline int ib_dma_map_sg(struct ib_device
*dev
,
4063 struct scatterlist
*sg
, int nents
,
4064 enum dma_data_direction direction
)
4066 return ib_dma_map_sg_attrs(dev
, sg
, nents
, direction
, 0);
4070 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4071 * @dev: The device for which the DMA addresses were created
4072 * @sg: The array of scatter/gather entries
4073 * @nents: The number of scatter/gather entries
4074 * @direction: The direction of the DMA
4076 static inline void ib_dma_unmap_sg(struct ib_device
*dev
,
4077 struct scatterlist
*sg
, int nents
,
4078 enum dma_data_direction direction
)
4080 ib_dma_unmap_sg_attrs(dev
, sg
, nents
, direction
, 0);
4084 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4085 * @dev: The device to query
4087 * The returned value represents a size in bytes.
4089 static inline unsigned int ib_dma_max_seg_size(struct ib_device
*dev
)
4091 if (ib_uses_virt_dma(dev
))
4093 return dma_get_max_seg_size(dev
->dma_device
);
4097 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4098 * @dev: The device for which the DMA address was created
4099 * @addr: The DMA address
4100 * @size: The size of the region in bytes
4101 * @dir: The direction of the DMA
4103 static inline void ib_dma_sync_single_for_cpu(struct ib_device
*dev
,
4106 enum dma_data_direction dir
)
4108 if (!ib_uses_virt_dma(dev
))
4109 dma_sync_single_for_cpu(dev
->dma_device
, addr
, size
, dir
);
4113 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4114 * @dev: The device for which the DMA address was created
4115 * @addr: The DMA address
4116 * @size: The size of the region in bytes
4117 * @dir: The direction of the DMA
4119 static inline void ib_dma_sync_single_for_device(struct ib_device
*dev
,
4122 enum dma_data_direction dir
)
4124 if (!ib_uses_virt_dma(dev
))
4125 dma_sync_single_for_device(dev
->dma_device
, addr
, size
, dir
);
4129 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
4130 * @dev: The device for which the DMA address is requested
4131 * @size: The size of the region to allocate in bytes
4132 * @dma_handle: A pointer for returning the DMA address of the region
4133 * @flag: memory allocator flags
4135 static inline void *ib_dma_alloc_coherent(struct ib_device
*dev
,
4137 dma_addr_t
*dma_handle
,
4140 return dma_alloc_coherent(dev
->dma_device
, size
, dma_handle
, flag
);
4144 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
4145 * @dev: The device for which the DMA addresses were allocated
4146 * @size: The size of the region
4147 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
4148 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
4150 static inline void ib_dma_free_coherent(struct ib_device
*dev
,
4151 size_t size
, void *cpu_addr
,
4152 dma_addr_t dma_handle
)
4154 dma_free_coherent(dev
->dma_device
, size
, cpu_addr
, dma_handle
);
4157 /* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4158 * space. This function should be called when 'current' is the owning MM.
4160 struct ib_mr
*ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
4161 u64 virt_addr
, int mr_access_flags
);
4163 /* ib_advise_mr - give an advice about an address range in a memory region */
4164 int ib_advise_mr(struct ib_pd
*pd
, enum ib_uverbs_advise_mr_advice advice
,
4165 u32 flags
, struct ib_sge
*sg_list
, u32 num_sge
);
4167 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4168 * HCA translation table.
4169 * @mr: The memory region to deregister.
4170 * @udata: Valid user data or NULL for kernel object
4172 * This function can fail, if the memory region has memory windows bound to it.
4174 int ib_dereg_mr_user(struct ib_mr
*mr
, struct ib_udata
*udata
);
4177 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4178 * HCA translation table.
4179 * @mr: The memory region to deregister.
4181 * This function can fail, if the memory region has memory windows bound to it.
4183 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4185 static inline int ib_dereg_mr(struct ib_mr
*mr
)
4187 return ib_dereg_mr_user(mr
, NULL
);
4190 struct ib_mr
*ib_alloc_mr(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
4193 struct ib_mr
*ib_alloc_mr_integrity(struct ib_pd
*pd
,
4194 u32 max_num_data_sg
,
4195 u32 max_num_meta_sg
);
4198 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4200 * @mr - struct ib_mr pointer to be updated.
4201 * @newkey - new key to be used.
4203 static inline void ib_update_fast_reg_key(struct ib_mr
*mr
, u8 newkey
)
4205 mr
->lkey
= (mr
->lkey
& 0xffffff00) | newkey
;
4206 mr
->rkey
= (mr
->rkey
& 0xffffff00) | newkey
;
4210 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4211 * for calculating a new rkey for type 2 memory windows.
4212 * @rkey - the rkey to increment.
4214 static inline u32
ib_inc_rkey(u32 rkey
)
4216 const u32 mask
= 0x000000ff;
4217 return ((rkey
+ 1) & mask
) | (rkey
& ~mask
);
4221 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4222 * @qp: QP to attach to the multicast group. The QP must be type
4224 * @gid: Multicast group GID.
4225 * @lid: Multicast group LID in host byte order.
4227 * In order to send and receive multicast packets, subnet
4228 * administration must have created the multicast group and configured
4229 * the fabric appropriately. The port associated with the specified
4230 * QP must also be a member of the multicast group.
4232 int ib_attach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
);
4235 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4236 * @qp: QP to detach from the multicast group.
4237 * @gid: Multicast group GID.
4238 * @lid: Multicast group LID in host byte order.
4240 int ib_detach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
);
4242 struct ib_xrcd
*ib_alloc_xrcd_user(struct ib_device
*device
,
4243 struct inode
*inode
, struct ib_udata
*udata
);
4244 int ib_dealloc_xrcd_user(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
);
4246 static inline int ib_check_mr_access(int flags
)
4249 * Local write permission is required if remote write or
4250 * remote atomic permission is also requested.
4252 if (flags
& (IB_ACCESS_REMOTE_ATOMIC
| IB_ACCESS_REMOTE_WRITE
) &&
4253 !(flags
& IB_ACCESS_LOCAL_WRITE
))
4256 if (flags
& ~IB_ACCESS_SUPPORTED
)
4262 static inline bool ib_access_writable(int access_flags
)
4265 * We have writable memory backing the MR if any of the following
4266 * access flags are set. "Local write" and "remote write" obviously
4267 * require write access. "Remote atomic" can do things like fetch and
4268 * add, which will modify memory, and "MW bind" can change permissions
4269 * by binding a window.
4271 return access_flags
&
4272 (IB_ACCESS_LOCAL_WRITE
| IB_ACCESS_REMOTE_WRITE
|
4273 IB_ACCESS_REMOTE_ATOMIC
| IB_ACCESS_MW_BIND
);
4277 * ib_check_mr_status: lightweight check of MR status.
4278 * This routine may provide status checks on a selected
4279 * ib_mr. first use is for signature status check.
4281 * @mr: A memory region.
4282 * @check_mask: Bitmask of which checks to perform from
4283 * ib_mr_status_check enumeration.
4284 * @mr_status: The container of relevant status checks.
4285 * failed checks will be indicated in the status bitmask
4286 * and the relevant info shall be in the error item.
4288 int ib_check_mr_status(struct ib_mr
*mr
, u32 check_mask
,
4289 struct ib_mr_status
*mr_status
);
4292 * ib_device_try_get: Hold a registration lock
4293 * device: The device to lock
4295 * A device under an active registration lock cannot become unregistered. It
4296 * is only possible to obtain a registration lock on a device that is fully
4297 * registered, otherwise this function returns false.
4299 * The registration lock is only necessary for actions which require the
4300 * device to still be registered. Uses that only require the device pointer to
4301 * be valid should use get_device(&ibdev->dev) to hold the memory.
4304 static inline bool ib_device_try_get(struct ib_device
*dev
)
4306 return refcount_inc_not_zero(&dev
->refcount
);
4309 void ib_device_put(struct ib_device
*device
);
4310 struct ib_device
*ib_device_get_by_netdev(struct net_device
*ndev
,
4311 enum rdma_driver_id driver_id
);
4312 struct ib_device
*ib_device_get_by_name(const char *name
,
4313 enum rdma_driver_id driver_id
);
4314 struct net_device
*ib_get_net_dev_by_params(struct ib_device
*dev
, u8 port
,
4315 u16 pkey
, const union ib_gid
*gid
,
4316 const struct sockaddr
*addr
);
4317 int ib_device_set_netdev(struct ib_device
*ib_dev
, struct net_device
*ndev
,
4319 struct net_device
*ib_device_netdev(struct ib_device
*dev
, u8 port
);
4321 struct ib_wq
*ib_create_wq(struct ib_pd
*pd
,
4322 struct ib_wq_init_attr
*init_attr
);
4323 int ib_destroy_wq_user(struct ib_wq
*wq
, struct ib_udata
*udata
);
4324 int ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*attr
,
4327 int ib_map_mr_sg(struct ib_mr
*mr
, struct scatterlist
*sg
, int sg_nents
,
4328 unsigned int *sg_offset
, unsigned int page_size
);
4329 int ib_map_mr_sg_pi(struct ib_mr
*mr
, struct scatterlist
*data_sg
,
4330 int data_sg_nents
, unsigned int *data_sg_offset
,
4331 struct scatterlist
*meta_sg
, int meta_sg_nents
,
4332 unsigned int *meta_sg_offset
, unsigned int page_size
);
4335 ib_map_mr_sg_zbva(struct ib_mr
*mr
, struct scatterlist
*sg
, int sg_nents
,
4336 unsigned int *sg_offset
, unsigned int page_size
)
4340 n
= ib_map_mr_sg(mr
, sg
, sg_nents
, sg_offset
, page_size
);
4346 int ib_sg_to_pages(struct ib_mr
*mr
, struct scatterlist
*sgl
, int sg_nents
,
4347 unsigned int *sg_offset
, int (*set_page
)(struct ib_mr
*, u64
));
4349 void ib_drain_rq(struct ib_qp
*qp
);
4350 void ib_drain_sq(struct ib_qp
*qp
);
4351 void ib_drain_qp(struct ib_qp
*qp
);
4353 int ib_get_eth_speed(struct ib_device
*dev
, u8 port_num
, u16
*speed
, u8
*width
);
4355 static inline u8
*rdma_ah_retrieve_dmac(struct rdma_ah_attr
*attr
)
4357 if (attr
->type
== RDMA_AH_ATTR_TYPE_ROCE
)
4358 return attr
->roce
.dmac
;
4362 static inline void rdma_ah_set_dlid(struct rdma_ah_attr
*attr
, u32 dlid
)
4364 if (attr
->type
== RDMA_AH_ATTR_TYPE_IB
)
4365 attr
->ib
.dlid
= (u16
)dlid
;
4366 else if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4367 attr
->opa
.dlid
= dlid
;
4370 static inline u32
rdma_ah_get_dlid(const struct rdma_ah_attr
*attr
)
4372 if (attr
->type
== RDMA_AH_ATTR_TYPE_IB
)
4373 return attr
->ib
.dlid
;
4374 else if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4375 return attr
->opa
.dlid
;
4379 static inline void rdma_ah_set_sl(struct rdma_ah_attr
*attr
, u8 sl
)
4384 static inline u8
rdma_ah_get_sl(const struct rdma_ah_attr
*attr
)
4389 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr
*attr
,
4392 if (attr
->type
== RDMA_AH_ATTR_TYPE_IB
)
4393 attr
->ib
.src_path_bits
= src_path_bits
;
4394 else if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4395 attr
->opa
.src_path_bits
= src_path_bits
;
4398 static inline u8
rdma_ah_get_path_bits(const struct rdma_ah_attr
*attr
)
4400 if (attr
->type
== RDMA_AH_ATTR_TYPE_IB
)
4401 return attr
->ib
.src_path_bits
;
4402 else if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4403 return attr
->opa
.src_path_bits
;
4407 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr
*attr
,
4410 if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4411 attr
->opa
.make_grd
= make_grd
;
4414 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr
*attr
)
4416 if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4417 return attr
->opa
.make_grd
;
4421 static inline void rdma_ah_set_port_num(struct rdma_ah_attr
*attr
, u8 port_num
)
4423 attr
->port_num
= port_num
;
4426 static inline u8
rdma_ah_get_port_num(const struct rdma_ah_attr
*attr
)
4428 return attr
->port_num
;
4431 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr
*attr
,
4434 attr
->static_rate
= static_rate
;
4437 static inline u8
rdma_ah_get_static_rate(const struct rdma_ah_attr
*attr
)
4439 return attr
->static_rate
;
4442 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr
*attr
,
4443 enum ib_ah_flags flag
)
4445 attr
->ah_flags
= flag
;
4448 static inline enum ib_ah_flags
4449 rdma_ah_get_ah_flags(const struct rdma_ah_attr
*attr
)
4451 return attr
->ah_flags
;
4454 static inline const struct ib_global_route
4455 *rdma_ah_read_grh(const struct rdma_ah_attr
*attr
)
4460 /*To retrieve and modify the grh */
4461 static inline struct ib_global_route
4462 *rdma_ah_retrieve_grh(struct rdma_ah_attr
*attr
)
4467 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr
*attr
, void *dgid
)
4469 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(attr
);
4471 memcpy(grh
->dgid
.raw
, dgid
, sizeof(grh
->dgid
));
4474 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr
*attr
,
4477 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(attr
);
4479 grh
->dgid
.global
.subnet_prefix
= prefix
;
4482 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr
*attr
,
4485 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(attr
);
4487 grh
->dgid
.global
.interface_id
= if_id
;
4490 static inline void rdma_ah_set_grh(struct rdma_ah_attr
*attr
,
4491 union ib_gid
*dgid
, u32 flow_label
,
4492 u8 sgid_index
, u8 hop_limit
,
4495 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(attr
);
4497 attr
->ah_flags
= IB_AH_GRH
;
4500 grh
->flow_label
= flow_label
;
4501 grh
->sgid_index
= sgid_index
;
4502 grh
->hop_limit
= hop_limit
;
4503 grh
->traffic_class
= traffic_class
;
4504 grh
->sgid_attr
= NULL
;
4507 void rdma_destroy_ah_attr(struct rdma_ah_attr
*ah_attr
);
4508 void rdma_move_grh_sgid_attr(struct rdma_ah_attr
*attr
, union ib_gid
*dgid
,
4509 u32 flow_label
, u8 hop_limit
, u8 traffic_class
,
4510 const struct ib_gid_attr
*sgid_attr
);
4511 void rdma_copy_ah_attr(struct rdma_ah_attr
*dest
,
4512 const struct rdma_ah_attr
*src
);
4513 void rdma_replace_ah_attr(struct rdma_ah_attr
*old
,
4514 const struct rdma_ah_attr
*new);
4515 void rdma_move_ah_attr(struct rdma_ah_attr
*dest
, struct rdma_ah_attr
*src
);
4518 * rdma_ah_find_type - Return address handle type.
4520 * @dev: Device to be checked
4521 * @port_num: Port number
4523 static inline enum rdma_ah_attr_type
rdma_ah_find_type(struct ib_device
*dev
,
4526 if (rdma_protocol_roce(dev
, port_num
))
4527 return RDMA_AH_ATTR_TYPE_ROCE
;
4528 if (rdma_protocol_ib(dev
, port_num
)) {
4529 if (rdma_cap_opa_ah(dev
, port_num
))
4530 return RDMA_AH_ATTR_TYPE_OPA
;
4531 return RDMA_AH_ATTR_TYPE_IB
;
4534 return RDMA_AH_ATTR_TYPE_UNDEFINED
;
4538 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4539 * In the current implementation the only way to get
4540 * get the 32bit lid is from other sources for OPA.
4541 * For IB, lids will always be 16bits so cast the
4542 * value accordingly.
4546 static inline u16
ib_lid_cpu16(u32 lid
)
4548 WARN_ON_ONCE(lid
& 0xFFFF0000);
4553 * ib_lid_be16 - Return lid in 16bit BE encoding.
4557 static inline __be16
ib_lid_be16(u32 lid
)
4559 WARN_ON_ONCE(lid
& 0xFFFF0000);
4560 return cpu_to_be16((u16
)lid
);
4564 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4566 * @device: the rdma device
4567 * @comp_vector: index of completion vector
4569 * Returns NULL on failure, otherwise a corresponding cpu map of the
4570 * completion vector (returns all-cpus map if the device driver doesn't
4571 * implement get_vector_affinity).
4573 static inline const struct cpumask
*
4574 ib_get_vector_affinity(struct ib_device
*device
, int comp_vector
)
4576 if (comp_vector
< 0 || comp_vector
>= device
->num_comp_vectors
||
4577 !device
->ops
.get_vector_affinity
)
4580 return device
->ops
.get_vector_affinity(device
, comp_vector
);
4585 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4586 * and add their gids, as needed, to the relevant RoCE devices.
4588 * @device: the rdma device
4590 void rdma_roce_rescan_device(struct ib_device
*ibdev
);
4592 struct ib_ucontext
*ib_uverbs_get_ucontext_file(struct ib_uverbs_file
*ufile
);
4594 int uverbs_destroy_def_handler(struct uverbs_attr_bundle
*attrs
);
4596 struct net_device
*rdma_alloc_netdev(struct ib_device
*device
, u8 port_num
,
4597 enum rdma_netdev_t type
, const char *name
,
4598 unsigned char name_assign_type
,
4599 void (*setup
)(struct net_device
*));
4601 int rdma_init_netdev(struct ib_device
*device
, u8 port_num
,
4602 enum rdma_netdev_t type
, const char *name
,
4603 unsigned char name_assign_type
,
4604 void (*setup
)(struct net_device
*),
4605 struct net_device
*netdev
);
4608 * rdma_set_device_sysfs_group - Set device attributes group to have
4609 * driver specific sysfs entries at
4610 * for infiniband class.
4612 * @device: device pointer for which attributes to be created
4613 * @group: Pointer to group which should be added when device
4614 * is registered with sysfs.
4615 * rdma_set_device_sysfs_group() allows existing drivers to expose one
4616 * group per device to have sysfs attributes.
4618 * NOTE: New drivers should not make use of this API; instead new device
4619 * parameter should be exposed via netlink command. This API and mechanism
4620 * exist only for existing drivers.
4623 rdma_set_device_sysfs_group(struct ib_device
*dev
,
4624 const struct attribute_group
*group
)
4626 dev
->groups
[1] = group
;
4630 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4632 * @device: device pointer for which ib_device pointer to retrieve
4634 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4637 static inline struct ib_device
*rdma_device_to_ibdev(struct device
*device
)
4639 struct ib_core_device
*coredev
=
4640 container_of(device
, struct ib_core_device
, dev
);
4642 return coredev
->owner
;
4646 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4647 * ib_device holder structure from device pointer.
4649 * NOTE: New drivers should not make use of this API; This API is only for
4650 * existing drivers who have exposed sysfs entries using
4651 * rdma_set_device_sysfs_group().
4653 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4654 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4656 bool rdma_dev_access_netns(const struct ib_device
*device
,
4657 const struct net
*net
);
4659 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4660 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4661 #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4664 * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4667 * This function will convert the 20 bit flow_label input to a valid RoCE v2
4668 * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4671 static inline u16
rdma_flow_label_to_udp_sport(u32 fl
)
4673 u32 fl_low
= fl
& 0x03fff, fl_high
= fl
& 0xFC000;
4675 fl_low
^= fl_high
>> 14;
4676 return (u16
)(fl_low
| IB_ROCE_UDP_ENCAP_VALID_PORT_MIN
);
4680 * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4681 * local and remote qpn values
4683 * This function folded the multiplication results of two qpns, 24 bit each,
4684 * fields, and converts it to a 20 bit results.
4686 * This function will create symmetric flow_label value based on the local
4687 * and remote qpn values. this will allow both the requester and responder
4688 * to calculate the same flow_label for a given connection.
4690 * This helper function should be used by driver in case the upper layer
4691 * provide a zero flow_label value. This is to improve entropy of RDMA
4692 * traffic in the network.
4694 static inline u32
rdma_calc_flow_label(u32 lqpn
, u32 rqpn
)
4696 u64 v
= (u64
)lqpn
* rqpn
;
4701 return (u32
)(v
& IB_GRH_FLOWLABEL_MASK
);
4703 #endif /* IB_VERBS_H */