1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
5 * Copyright (c) 2004 Intel Corporation. All rights reserved.
6 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
7 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
8 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
9 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
15 #include <linux/ethtool.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/kref.h>
20 #include <linux/list.h>
21 #include <linux/rwsem.h>
22 #include <linux/workqueue.h>
23 #include <linux/irq_poll.h>
24 #include <uapi/linux/if_ether.h>
27 #include <linux/string.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/refcount.h>
31 #include <linux/if_link.h>
32 #include <linux/atomic.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/uaccess.h>
35 #include <linux/cgroup_rdma.h>
36 #include <linux/irqflags.h>
37 #include <linux/preempt.h>
38 #include <linux/dim.h>
39 #include <uapi/rdma/ib_user_verbs.h>
40 #include <rdma/rdma_counter.h>
41 #include <rdma/restrack.h>
42 #include <rdma/signature.h>
43 #include <uapi/rdma/rdma_user_ioctl.h>
44 #include <uapi/rdma/ib_user_ioctl_verbs.h>
46 #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
50 struct ib_usrq_object
;
54 extern struct workqueue_struct
*ib_wq
;
55 extern struct workqueue_struct
*ib_comp_wq
;
56 extern struct workqueue_struct
*ib_comp_unbound_wq
;
61 void ibdev_printk(const char *level
, const struct ib_device
*ibdev
,
62 const char *format
, ...);
64 void ibdev_emerg(const struct ib_device
*ibdev
, const char *format
, ...);
66 void ibdev_alert(const struct ib_device
*ibdev
, const char *format
, ...);
68 void ibdev_crit(const struct ib_device
*ibdev
, const char *format
, ...);
70 void ibdev_err(const struct ib_device
*ibdev
, const char *format
, ...);
72 void ibdev_warn(const struct ib_device
*ibdev
, const char *format
, ...);
74 void ibdev_notice(const struct ib_device
*ibdev
, const char *format
, ...);
76 void ibdev_info(const struct ib_device
*ibdev
, const char *format
, ...);
78 #if defined(CONFIG_DYNAMIC_DEBUG) || \
79 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
80 #define ibdev_dbg(__dev, format, args...) \
81 dynamic_ibdev_dbg(__dev, format, ##args)
85 void ibdev_dbg(const struct ib_device
*ibdev
, const char *format
, ...) {}
88 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
90 static DEFINE_RATELIMIT_STATE(_rs, \
91 DEFAULT_RATELIMIT_INTERVAL, \
92 DEFAULT_RATELIMIT_BURST); \
93 if (__ratelimit(&_rs)) \
94 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
97 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
98 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
99 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \
100 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
101 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \
102 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
103 #define ibdev_err_ratelimited(ibdev, fmt, ...) \
104 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
105 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \
106 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
107 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \
108 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
109 #define ibdev_info_ratelimited(ibdev, fmt, ...) \
110 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
112 #if defined(CONFIG_DYNAMIC_DEBUG) || \
113 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
114 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
115 #define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
117 static DEFINE_RATELIMIT_STATE(_rs, \
118 DEFAULT_RATELIMIT_INTERVAL, \
119 DEFAULT_RATELIMIT_BURST); \
120 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
121 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
122 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
126 __printf(2, 3) __cold
128 void ibdev_dbg_ratelimited(const struct ib_device
*ibdev
, const char *format
, ...) {}
134 __be64 subnet_prefix
;
139 extern union ib_gid zgid
;
142 IB_GID_TYPE_IB
= IB_UVERBS_GID_TYPE_IB
,
143 IB_GID_TYPE_ROCE
= IB_UVERBS_GID_TYPE_ROCE_V1
,
144 IB_GID_TYPE_ROCE_UDP_ENCAP
= IB_UVERBS_GID_TYPE_ROCE_V2
,
148 #define ROCE_V2_UDP_DPORT 4791
150 struct net_device __rcu
*ndev
;
151 struct ib_device
*device
;
153 enum ib_gid_type gid_type
;
159 /* set the local administered indication */
160 IB_SA_WELL_KNOWN_GUID
= BIT_ULL(57) | 2,
163 enum rdma_transport_type
{
165 RDMA_TRANSPORT_IWARP
,
166 RDMA_TRANSPORT_USNIC
,
167 RDMA_TRANSPORT_USNIC_UDP
,
168 RDMA_TRANSPORT_UNSPECIFIED
,
171 enum rdma_protocol_type
{
175 RDMA_PROTOCOL_USNIC_UDP
178 __attribute_const__
enum rdma_transport_type
179 rdma_node_get_transport(unsigned int node_type
);
181 enum rdma_network_type
{
183 RDMA_NETWORK_ROCE_V1
,
188 static inline enum ib_gid_type
ib_network_to_gid_type(enum rdma_network_type network_type
)
190 if (network_type
== RDMA_NETWORK_IPV4
||
191 network_type
== RDMA_NETWORK_IPV6
)
192 return IB_GID_TYPE_ROCE_UDP_ENCAP
;
193 else if (network_type
== RDMA_NETWORK_ROCE_V1
)
194 return IB_GID_TYPE_ROCE
;
196 return IB_GID_TYPE_IB
;
199 static inline enum rdma_network_type
200 rdma_gid_attr_network_type(const struct ib_gid_attr
*attr
)
202 if (attr
->gid_type
== IB_GID_TYPE_IB
)
203 return RDMA_NETWORK_IB
;
205 if (attr
->gid_type
== IB_GID_TYPE_ROCE
)
206 return RDMA_NETWORK_ROCE_V1
;
208 if (ipv6_addr_v4mapped((struct in6_addr
*)&attr
->gid
))
209 return RDMA_NETWORK_IPV4
;
211 return RDMA_NETWORK_IPV6
;
214 enum rdma_link_layer
{
215 IB_LINK_LAYER_UNSPECIFIED
,
216 IB_LINK_LAYER_INFINIBAND
,
217 IB_LINK_LAYER_ETHERNET
,
220 enum ib_device_cap_flags
{
221 IB_DEVICE_RESIZE_MAX_WR
= (1 << 0),
222 IB_DEVICE_BAD_PKEY_CNTR
= (1 << 1),
223 IB_DEVICE_BAD_QKEY_CNTR
= (1 << 2),
224 IB_DEVICE_RAW_MULTI
= (1 << 3),
225 IB_DEVICE_AUTO_PATH_MIG
= (1 << 4),
226 IB_DEVICE_CHANGE_PHY_PORT
= (1 << 5),
227 IB_DEVICE_UD_AV_PORT_ENFORCE
= (1 << 6),
228 IB_DEVICE_CURR_QP_STATE_MOD
= (1 << 7),
229 IB_DEVICE_SHUTDOWN_PORT
= (1 << 8),
230 /* Not in use, former INIT_TYPE = (1 << 9),*/
231 IB_DEVICE_PORT_ACTIVE_EVENT
= (1 << 10),
232 IB_DEVICE_SYS_IMAGE_GUID
= (1 << 11),
233 IB_DEVICE_RC_RNR_NAK_GEN
= (1 << 12),
234 IB_DEVICE_SRQ_RESIZE
= (1 << 13),
235 IB_DEVICE_N_NOTIFY_CQ
= (1 << 14),
238 * This device supports a per-device lkey or stag that can be
239 * used without performing a memory registration for the local
240 * memory. Note that ULPs should never check this flag, but
241 * instead of use the local_dma_lkey flag in the ib_pd structure,
242 * which will always contain a usable lkey.
244 IB_DEVICE_LOCAL_DMA_LKEY
= (1 << 15),
245 /* Reserved, old SEND_W_INV = (1 << 16),*/
246 IB_DEVICE_MEM_WINDOW
= (1 << 17),
248 * Devices should set IB_DEVICE_UD_IP_SUM if they support
249 * insertion of UDP and TCP checksum on outgoing UD IPoIB
250 * messages and can verify the validity of checksum for
251 * incoming messages. Setting this flag implies that the
252 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
254 IB_DEVICE_UD_IP_CSUM
= (1 << 18),
255 IB_DEVICE_UD_TSO
= (1 << 19),
256 IB_DEVICE_XRC
= (1 << 20),
259 * This device supports the IB "base memory management extension",
260 * which includes support for fast registrations (IB_WR_REG_MR,
261 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
262 * also be set by any iWarp device which must support FRs to comply
263 * to the iWarp verbs spec. iWarp devices also support the
264 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
267 IB_DEVICE_MEM_MGT_EXTENSIONS
= (1 << 21),
268 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK
= (1 << 22),
269 IB_DEVICE_MEM_WINDOW_TYPE_2A
= (1 << 23),
270 IB_DEVICE_MEM_WINDOW_TYPE_2B
= (1 << 24),
271 IB_DEVICE_RC_IP_CSUM
= (1 << 25),
272 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
273 IB_DEVICE_RAW_IP_CSUM
= (1 << 26),
275 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
276 * support execution of WQEs that involve synchronization
277 * of I/O operations with single completion queue managed
280 IB_DEVICE_CROSS_CHANNEL
= (1 << 27),
281 IB_DEVICE_MANAGED_FLOW_STEERING
= (1 << 29),
282 IB_DEVICE_INTEGRITY_HANDOVER
= (1 << 30),
283 IB_DEVICE_ON_DEMAND_PAGING
= (1ULL << 31),
284 IB_DEVICE_SG_GAPS_REG
= (1ULL << 32),
285 IB_DEVICE_VIRTUAL_FUNCTION
= (1ULL << 33),
286 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
287 IB_DEVICE_RAW_SCATTER_FCS
= (1ULL << 34),
288 IB_DEVICE_RDMA_NETDEV_OPA
= (1ULL << 35),
289 /* The device supports padding incoming writes to cacheline. */
290 IB_DEVICE_PCI_WRITE_END_PADDING
= (1ULL << 36),
291 IB_DEVICE_ALLOW_USER_UNREG
= (1ULL << 37),
300 enum ib_odp_general_cap_bits
{
301 IB_ODP_SUPPORT
= 1 << 0,
302 IB_ODP_SUPPORT_IMPLICIT
= 1 << 1,
305 enum ib_odp_transport_cap_bits
{
306 IB_ODP_SUPPORT_SEND
= 1 << 0,
307 IB_ODP_SUPPORT_RECV
= 1 << 1,
308 IB_ODP_SUPPORT_WRITE
= 1 << 2,
309 IB_ODP_SUPPORT_READ
= 1 << 3,
310 IB_ODP_SUPPORT_ATOMIC
= 1 << 4,
311 IB_ODP_SUPPORT_SRQ_RECV
= 1 << 5,
315 uint64_t general_caps
;
317 uint32_t rc_odp_caps
;
318 uint32_t uc_odp_caps
;
319 uint32_t ud_odp_caps
;
320 uint32_t xrc_odp_caps
;
321 } per_transport_caps
;
325 /* Corresponding bit will be set if qp type from
326 * 'enum ib_qp_type' is supported, e.g.
327 * supported_qpts |= 1 << IB_QPT_UD
330 u32 max_rwq_indirection_tables
;
331 u32 max_rwq_indirection_table_size
;
334 enum ib_tm_cap_flags
{
335 /* Support tag matching with rendezvous offload for RC transport */
336 IB_TM_CAP_RNDV_RC
= 1 << 0,
340 /* Max size of RNDV header */
341 u32 max_rndv_hdr_size
;
342 /* Max number of entries in tag matching list */
344 /* From enum ib_tm_cap_flags */
346 /* Max number of outstanding list operations */
348 /* Max number of SGE in tag matching entry */
352 struct ib_cq_init_attr
{
358 enum ib_cq_attr_mask
{
359 IB_CQ_MODERATE
= 1 << 0,
363 u16 max_cq_moderation_count
;
364 u16 max_cq_moderation_period
;
367 struct ib_dm_mr_attr
{
373 struct ib_dm_alloc_attr
{
379 struct ib_device_attr
{
381 __be64 sys_image_guid
;
389 u64 device_cap_flags
;
400 int max_qp_init_rd_atom
;
401 int max_ee_init_rd_atom
;
402 enum ib_atomic_cap atomic_cap
;
403 enum ib_atomic_cap masked_atomic_cap
;
410 int max_mcast_qp_attach
;
411 int max_total_mcast_qp_attach
;
416 unsigned int max_fast_reg_page_list_len
;
417 unsigned int max_pi_fast_reg_page_list_len
;
419 u8 local_ca_ack_delay
;
422 struct ib_odp_caps odp_caps
;
423 uint64_t timestamp_mask
;
424 uint64_t hca_core_clock
; /* in KHZ */
425 struct ib_rss_caps rss_caps
;
427 u32 raw_packet_caps
; /* Use ib_raw_packet_caps enum */
428 struct ib_tm_caps tm_caps
;
429 struct ib_cq_caps cq_caps
;
431 /* Max entries for sgl for optimized performance per READ */
448 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu
)
451 case IB_MTU_256
: return 256;
452 case IB_MTU_512
: return 512;
453 case IB_MTU_1024
: return 1024;
454 case IB_MTU_2048
: return 2048;
455 case IB_MTU_4096
: return 4096;
460 static inline enum ib_mtu
ib_mtu_int_to_enum(int mtu
)
464 else if (mtu
>= 2048)
466 else if (mtu
>= 1024)
474 static inline int opa_mtu_enum_to_int(enum opa_mtu mtu
)
482 return(ib_mtu_enum_to_int((enum ib_mtu
)mtu
));
486 static inline enum opa_mtu
opa_mtu_int_to_enum(int mtu
)
489 return OPA_MTU_10240
;
490 else if (mtu
>= 8192)
493 return ((enum opa_mtu
)ib_mtu_int_to_enum(mtu
));
502 IB_PORT_ACTIVE_DEFER
= 5
505 enum ib_port_phys_state
{
506 IB_PORT_PHYS_STATE_SLEEP
= 1,
507 IB_PORT_PHYS_STATE_POLLING
= 2,
508 IB_PORT_PHYS_STATE_DISABLED
= 3,
509 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING
= 4,
510 IB_PORT_PHYS_STATE_LINK_UP
= 5,
511 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY
= 6,
512 IB_PORT_PHYS_STATE_PHY_TEST
= 7,
523 static inline int ib_width_enum_to_int(enum ib_port_width width
)
526 case IB_WIDTH_1X
: return 1;
527 case IB_WIDTH_2X
: return 2;
528 case IB_WIDTH_4X
: return 4;
529 case IB_WIDTH_8X
: return 8;
530 case IB_WIDTH_12X
: return 12;
547 * struct rdma_hw_stats
548 * @lock - Mutex to protect parallel write access to lifespan and values
549 * of counters, which are 64bits and not guaranteeed to be written
550 * atomicaly on 32bits systems.
551 * @timestamp - Used by the core code to track when the last update was
552 * @lifespan - Used by the core code to determine how old the counters
553 * should be before being updated again. Stored in jiffies, defaults
554 * to 10 milliseconds, drivers can override the default be specifying
555 * their own value during their allocation routine.
556 * @name - Array of pointers to static names used for the counters in
558 * @num_counters - How many hardware counters there are. If name is
559 * shorter than this number, a kernel oops will result. Driver authors
560 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
561 * in their code to prevent this.
562 * @value - Array of u64 counters that are accessed by the sysfs code and
563 * filled in by the drivers get_stats routine
565 struct rdma_hw_stats
{
566 struct mutex lock
; /* Protect lifespan and values[] */
567 unsigned long timestamp
;
568 unsigned long lifespan
;
569 const char * const *names
;
574 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
576 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
578 * @names - Array of static const char *
579 * @num_counters - How many elements in array
580 * @lifespan - How many milliseconds between updates
582 static inline struct rdma_hw_stats
*rdma_alloc_hw_stats_struct(
583 const char * const *names
, int num_counters
,
584 unsigned long lifespan
)
586 struct rdma_hw_stats
*stats
;
588 stats
= kzalloc(sizeof(*stats
) + num_counters
* sizeof(u64
),
592 stats
->names
= names
;
593 stats
->num_counters
= num_counters
;
594 stats
->lifespan
= msecs_to_jiffies(lifespan
);
600 /* Define bits for the various functionality this port needs to be supported by
603 /* Management 0x00000FFF */
604 #define RDMA_CORE_CAP_IB_MAD 0x00000001
605 #define RDMA_CORE_CAP_IB_SMI 0x00000002
606 #define RDMA_CORE_CAP_IB_CM 0x00000004
607 #define RDMA_CORE_CAP_IW_CM 0x00000008
608 #define RDMA_CORE_CAP_IB_SA 0x00000010
609 #define RDMA_CORE_CAP_OPA_MAD 0x00000020
611 /* Address format 0x000FF000 */
612 #define RDMA_CORE_CAP_AF_IB 0x00001000
613 #define RDMA_CORE_CAP_ETH_AH 0x00002000
614 #define RDMA_CORE_CAP_OPA_AH 0x00004000
615 #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
617 /* Protocol 0xFFF00000 */
618 #define RDMA_CORE_CAP_PROT_IB 0x00100000
619 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000
620 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000
621 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
622 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
623 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000
625 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
626 | RDMA_CORE_CAP_PROT_ROCE \
627 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
629 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
630 | RDMA_CORE_CAP_IB_MAD \
631 | RDMA_CORE_CAP_IB_SMI \
632 | RDMA_CORE_CAP_IB_CM \
633 | RDMA_CORE_CAP_IB_SA \
634 | RDMA_CORE_CAP_AF_IB)
635 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
636 | RDMA_CORE_CAP_IB_MAD \
637 | RDMA_CORE_CAP_IB_CM \
638 | RDMA_CORE_CAP_AF_IB \
639 | RDMA_CORE_CAP_ETH_AH)
640 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
641 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
642 | RDMA_CORE_CAP_IB_MAD \
643 | RDMA_CORE_CAP_IB_CM \
644 | RDMA_CORE_CAP_AF_IB \
645 | RDMA_CORE_CAP_ETH_AH)
646 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
647 | RDMA_CORE_CAP_IW_CM)
648 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
649 | RDMA_CORE_CAP_OPA_MAD)
651 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
653 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
655 struct ib_port_attr
{
657 enum ib_port_state state
;
659 enum ib_mtu active_mtu
;
662 unsigned int ip_gids
:1;
663 /* This is the value from PortInfo CapabilityMask, defined by IBA */
682 enum ib_device_modify_flags
{
683 IB_DEVICE_MODIFY_SYS_IMAGE_GUID
= 1 << 0,
684 IB_DEVICE_MODIFY_NODE_DESC
= 1 << 1
687 #define IB_DEVICE_NODE_DESC_MAX 64
689 struct ib_device_modify
{
691 char node_desc
[IB_DEVICE_NODE_DESC_MAX
];
694 enum ib_port_modify_flags
{
695 IB_PORT_SHUTDOWN
= 1,
696 IB_PORT_INIT_TYPE
= (1<<2),
697 IB_PORT_RESET_QKEY_CNTR
= (1<<3),
698 IB_PORT_OPA_MASK_CHG
= (1<<4)
701 struct ib_port_modify
{
702 u32 set_port_cap_mask
;
703 u32 clr_port_cap_mask
;
711 IB_EVENT_QP_ACCESS_ERR
,
715 IB_EVENT_PATH_MIG_ERR
,
716 IB_EVENT_DEVICE_FATAL
,
717 IB_EVENT_PORT_ACTIVE
,
720 IB_EVENT_PKEY_CHANGE
,
723 IB_EVENT_SRQ_LIMIT_REACHED
,
724 IB_EVENT_QP_LAST_WQE_REACHED
,
725 IB_EVENT_CLIENT_REREGISTER
,
730 const char *__attribute_const__
ib_event_msg(enum ib_event_type event
);
733 struct ib_device
*device
;
741 enum ib_event_type event
;
744 struct ib_event_handler
{
745 struct ib_device
*device
;
746 void (*handler
)(struct ib_event_handler
*, struct ib_event
*);
747 struct list_head list
;
750 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
752 (_ptr)->device = _device; \
753 (_ptr)->handler = _handler; \
754 INIT_LIST_HEAD(&(_ptr)->list); \
757 struct ib_global_route
{
758 const struct ib_gid_attr
*sgid_attr
;
767 __be32 version_tclass_flow
;
775 union rdma_network_hdr
{
778 /* The IB spec states that if it's IPv4, the header
779 * is located in the last 20 bytes of the header.
782 struct iphdr roce4grh
;
786 #define IB_QPN_MASK 0xFFFFFF
789 IB_MULTICAST_QPN
= 0xffffff
792 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
793 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
800 IB_RATE_PORT_CURRENT
= 0,
801 IB_RATE_2_5_GBPS
= 2,
809 IB_RATE_120_GBPS
= 10,
810 IB_RATE_14_GBPS
= 11,
811 IB_RATE_56_GBPS
= 12,
812 IB_RATE_112_GBPS
= 13,
813 IB_RATE_168_GBPS
= 14,
814 IB_RATE_25_GBPS
= 15,
815 IB_RATE_100_GBPS
= 16,
816 IB_RATE_200_GBPS
= 17,
817 IB_RATE_300_GBPS
= 18,
818 IB_RATE_28_GBPS
= 19,
819 IB_RATE_50_GBPS
= 20,
820 IB_RATE_400_GBPS
= 21,
821 IB_RATE_600_GBPS
= 22,
825 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
826 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
827 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
828 * @rate: rate to convert.
830 __attribute_const__
int ib_rate_to_mult(enum ib_rate rate
);
833 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
834 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
835 * @rate: rate to convert.
837 __attribute_const__
int ib_rate_to_mbps(enum ib_rate rate
);
841 * enum ib_mr_type - memory region type
842 * @IB_MR_TYPE_MEM_REG: memory region that is used for
843 * normal registration
844 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
845 * register any arbitrary sg lists (without
846 * the normal mr constraints - see
848 * @IB_MR_TYPE_DM: memory region that is used for device
849 * memory registration
850 * @IB_MR_TYPE_USER: memory region that is used for the user-space
852 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations
853 * without address translations (VA=PA)
854 * @IB_MR_TYPE_INTEGRITY: memory region that is used for
855 * data integrity operations
863 IB_MR_TYPE_INTEGRITY
,
866 enum ib_mr_status_check
{
867 IB_MR_CHECK_SIG_STATUS
= 1,
871 * struct ib_mr_status - Memory region status container
873 * @fail_status: Bitmask of MR checks status. For each
874 * failed check a corresponding status bit is set.
875 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
878 struct ib_mr_status
{
880 struct ib_sig_err sig_err
;
884 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
886 * @mult: multiple to convert.
888 __attribute_const__
enum ib_rate
mult_to_ib_rate(int mult
);
890 struct rdma_ah_init_attr
{
891 struct rdma_ah_attr
*ah_attr
;
893 struct net_device
*xmit_slave
;
896 enum rdma_ah_attr_type
{
897 RDMA_AH_ATTR_TYPE_UNDEFINED
,
898 RDMA_AH_ATTR_TYPE_IB
,
899 RDMA_AH_ATTR_TYPE_ROCE
,
900 RDMA_AH_ATTR_TYPE_OPA
,
908 struct roce_ah_attr
{
918 struct rdma_ah_attr
{
919 struct ib_global_route grh
;
924 enum rdma_ah_attr_type type
;
926 struct ib_ah_attr ib
;
927 struct roce_ah_attr roce
;
928 struct opa_ah_attr opa
;
936 IB_WC_LOC_EEC_OP_ERR
,
941 IB_WC_LOC_ACCESS_ERR
,
942 IB_WC_REM_INV_REQ_ERR
,
943 IB_WC_REM_ACCESS_ERR
,
946 IB_WC_RNR_RETRY_EXC_ERR
,
947 IB_WC_LOC_RDD_VIOL_ERR
,
948 IB_WC_REM_INV_RD_REQ_ERR
,
951 IB_WC_INV_EEC_STATE_ERR
,
953 IB_WC_RESP_TIMEOUT_ERR
,
957 const char *__attribute_const__
ib_wc_status_msg(enum ib_wc_status status
);
960 IB_WC_SEND
= IB_UVERBS_WC_SEND
,
961 IB_WC_RDMA_WRITE
= IB_UVERBS_WC_RDMA_WRITE
,
962 IB_WC_RDMA_READ
= IB_UVERBS_WC_RDMA_READ
,
963 IB_WC_COMP_SWAP
= IB_UVERBS_WC_COMP_SWAP
,
964 IB_WC_FETCH_ADD
= IB_UVERBS_WC_FETCH_ADD
,
965 IB_WC_BIND_MW
= IB_UVERBS_WC_BIND_MW
,
966 IB_WC_LOCAL_INV
= IB_UVERBS_WC_LOCAL_INV
,
967 IB_WC_LSO
= IB_UVERBS_WC_TSO
,
969 IB_WC_MASKED_COMP_SWAP
,
970 IB_WC_MASKED_FETCH_ADD
,
972 * Set value of IB_WC_RECV so consumers can test if a completion is a
973 * receive by testing (opcode & IB_WC_RECV).
976 IB_WC_RECV_RDMA_WITH_IMM
981 IB_WC_WITH_IMM
= (1<<1),
982 IB_WC_WITH_INVALIDATE
= (1<<2),
983 IB_WC_IP_CSUM_OK
= (1<<3),
984 IB_WC_WITH_SMAC
= (1<<4),
985 IB_WC_WITH_VLAN
= (1<<5),
986 IB_WC_WITH_NETWORK_HDR_TYPE
= (1<<6),
992 struct ib_cqe
*wr_cqe
;
994 enum ib_wc_status status
;
995 enum ib_wc_opcode opcode
;
1001 u32 invalidate_rkey
;
1009 u8 port_num
; /* valid only for DR SMPs on switches */
1012 u8 network_hdr_type
;
1015 enum ib_cq_notify_flags
{
1016 IB_CQ_SOLICITED
= 1 << 0,
1017 IB_CQ_NEXT_COMP
= 1 << 1,
1018 IB_CQ_SOLICITED_MASK
= IB_CQ_SOLICITED
| IB_CQ_NEXT_COMP
,
1019 IB_CQ_REPORT_MISSED_EVENTS
= 1 << 2,
1023 IB_SRQT_BASIC
= IB_UVERBS_SRQT_BASIC
,
1024 IB_SRQT_XRC
= IB_UVERBS_SRQT_XRC
,
1025 IB_SRQT_TM
= IB_UVERBS_SRQT_TM
,
1028 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type
)
1030 return srq_type
== IB_SRQT_XRC
||
1031 srq_type
== IB_SRQT_TM
;
1034 enum ib_srq_attr_mask
{
1035 IB_SRQ_MAX_WR
= 1 << 0,
1036 IB_SRQ_LIMIT
= 1 << 1,
1039 struct ib_srq_attr
{
1045 struct ib_srq_init_attr
{
1046 void (*event_handler
)(struct ib_event
*, void *);
1048 struct ib_srq_attr attr
;
1049 enum ib_srq_type srq_type
;
1055 struct ib_xrcd
*xrcd
;
1070 u32 max_inline_data
;
1073 * Maximum number of rdma_rw_ctx structures in flight at a time.
1074 * ib_create_qp() will calculate the right amount of neededed WRs
1075 * and MRs based on this.
1087 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1088 * here (and in that order) since the MAD layer uses them as
1089 * indices into a 2-entry table.
1094 IB_QPT_RC
= IB_UVERBS_QPT_RC
,
1095 IB_QPT_UC
= IB_UVERBS_QPT_UC
,
1096 IB_QPT_UD
= IB_UVERBS_QPT_UD
,
1098 IB_QPT_RAW_ETHERTYPE
,
1099 IB_QPT_RAW_PACKET
= IB_UVERBS_QPT_RAW_PACKET
,
1100 IB_QPT_XRC_INI
= IB_UVERBS_QPT_XRC_INI
,
1101 IB_QPT_XRC_TGT
= IB_UVERBS_QPT_XRC_TGT
,
1103 IB_QPT_DRIVER
= IB_UVERBS_QPT_DRIVER
,
1104 /* Reserve a range for qp types internal to the low level driver.
1105 * These qp types will not be visible at the IB core layer, so the
1106 * IB_QPT_MAX usages should not be affected in the core layer
1108 IB_QPT_RESERVED1
= 0x1000,
1120 enum ib_qp_create_flags
{
1121 IB_QP_CREATE_IPOIB_UD_LSO
= 1 << 0,
1122 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
=
1123 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
,
1124 IB_QP_CREATE_CROSS_CHANNEL
= 1 << 2,
1125 IB_QP_CREATE_MANAGED_SEND
= 1 << 3,
1126 IB_QP_CREATE_MANAGED_RECV
= 1 << 4,
1127 IB_QP_CREATE_NETIF_QP
= 1 << 5,
1128 IB_QP_CREATE_INTEGRITY_EN
= 1 << 6,
1129 IB_QP_CREATE_NETDEV_USE
= 1 << 7,
1130 IB_QP_CREATE_SCATTER_FCS
=
1131 IB_UVERBS_QP_CREATE_SCATTER_FCS
,
1132 IB_QP_CREATE_CVLAN_STRIPPING
=
1133 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING
,
1134 IB_QP_CREATE_SOURCE_QPN
= 1 << 10,
1135 IB_QP_CREATE_PCI_WRITE_END_PADDING
=
1136 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING
,
1137 /* reserve bits 26-31 for low level drivers' internal use */
1138 IB_QP_CREATE_RESERVED_START
= 1 << 26,
1139 IB_QP_CREATE_RESERVED_END
= 1 << 31,
1143 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1144 * callback to destroy the passed in QP.
1147 struct ib_qp_init_attr
{
1148 /* Consumer's event_handler callback must not block */
1149 void (*event_handler
)(struct ib_event
*, void *);
1152 struct ib_cq
*send_cq
;
1153 struct ib_cq
*recv_cq
;
1155 struct ib_xrcd
*xrcd
; /* XRC TGT QPs only */
1156 struct ib_qp_cap cap
;
1157 enum ib_sig_type sq_sig_type
;
1158 enum ib_qp_type qp_type
;
1162 * Only needed for special QP types, or when using the RW API.
1165 struct ib_rwq_ind_table
*rwq_ind_tbl
;
1169 struct ib_qp_open_attr
{
1170 void (*event_handler
)(struct ib_event
*, void *);
1173 enum ib_qp_type qp_type
;
1176 enum ib_rnr_timeout
{
1177 IB_RNR_TIMER_655_36
= 0,
1178 IB_RNR_TIMER_000_01
= 1,
1179 IB_RNR_TIMER_000_02
= 2,
1180 IB_RNR_TIMER_000_03
= 3,
1181 IB_RNR_TIMER_000_04
= 4,
1182 IB_RNR_TIMER_000_06
= 5,
1183 IB_RNR_TIMER_000_08
= 6,
1184 IB_RNR_TIMER_000_12
= 7,
1185 IB_RNR_TIMER_000_16
= 8,
1186 IB_RNR_TIMER_000_24
= 9,
1187 IB_RNR_TIMER_000_32
= 10,
1188 IB_RNR_TIMER_000_48
= 11,
1189 IB_RNR_TIMER_000_64
= 12,
1190 IB_RNR_TIMER_000_96
= 13,
1191 IB_RNR_TIMER_001_28
= 14,
1192 IB_RNR_TIMER_001_92
= 15,
1193 IB_RNR_TIMER_002_56
= 16,
1194 IB_RNR_TIMER_003_84
= 17,
1195 IB_RNR_TIMER_005_12
= 18,
1196 IB_RNR_TIMER_007_68
= 19,
1197 IB_RNR_TIMER_010_24
= 20,
1198 IB_RNR_TIMER_015_36
= 21,
1199 IB_RNR_TIMER_020_48
= 22,
1200 IB_RNR_TIMER_030_72
= 23,
1201 IB_RNR_TIMER_040_96
= 24,
1202 IB_RNR_TIMER_061_44
= 25,
1203 IB_RNR_TIMER_081_92
= 26,
1204 IB_RNR_TIMER_122_88
= 27,
1205 IB_RNR_TIMER_163_84
= 28,
1206 IB_RNR_TIMER_245_76
= 29,
1207 IB_RNR_TIMER_327_68
= 30,
1208 IB_RNR_TIMER_491_52
= 31
1211 enum ib_qp_attr_mask
{
1213 IB_QP_CUR_STATE
= (1<<1),
1214 IB_QP_EN_SQD_ASYNC_NOTIFY
= (1<<2),
1215 IB_QP_ACCESS_FLAGS
= (1<<3),
1216 IB_QP_PKEY_INDEX
= (1<<4),
1217 IB_QP_PORT
= (1<<5),
1218 IB_QP_QKEY
= (1<<6),
1220 IB_QP_PATH_MTU
= (1<<8),
1221 IB_QP_TIMEOUT
= (1<<9),
1222 IB_QP_RETRY_CNT
= (1<<10),
1223 IB_QP_RNR_RETRY
= (1<<11),
1224 IB_QP_RQ_PSN
= (1<<12),
1225 IB_QP_MAX_QP_RD_ATOMIC
= (1<<13),
1226 IB_QP_ALT_PATH
= (1<<14),
1227 IB_QP_MIN_RNR_TIMER
= (1<<15),
1228 IB_QP_SQ_PSN
= (1<<16),
1229 IB_QP_MAX_DEST_RD_ATOMIC
= (1<<17),
1230 IB_QP_PATH_MIG_STATE
= (1<<18),
1231 IB_QP_CAP
= (1<<19),
1232 IB_QP_DEST_QPN
= (1<<20),
1233 IB_QP_RESERVED1
= (1<<21),
1234 IB_QP_RESERVED2
= (1<<22),
1235 IB_QP_RESERVED3
= (1<<23),
1236 IB_QP_RESERVED4
= (1<<24),
1237 IB_QP_RATE_LIMIT
= (1<<25),
1239 IB_QP_ATTR_STANDARD_BITS
= GENMASK(20, 0),
1264 enum ib_qp_state qp_state
;
1265 enum ib_qp_state cur_qp_state
;
1266 enum ib_mtu path_mtu
;
1267 enum ib_mig_state path_mig_state
;
1272 int qp_access_flags
;
1273 struct ib_qp_cap cap
;
1274 struct rdma_ah_attr ah_attr
;
1275 struct rdma_ah_attr alt_ah_attr
;
1278 u8 en_sqd_async_notify
;
1281 u8 max_dest_rd_atomic
;
1290 struct net_device
*xmit_slave
;
1294 /* These are shared with userspace */
1295 IB_WR_RDMA_WRITE
= IB_UVERBS_WR_RDMA_WRITE
,
1296 IB_WR_RDMA_WRITE_WITH_IMM
= IB_UVERBS_WR_RDMA_WRITE_WITH_IMM
,
1297 IB_WR_SEND
= IB_UVERBS_WR_SEND
,
1298 IB_WR_SEND_WITH_IMM
= IB_UVERBS_WR_SEND_WITH_IMM
,
1299 IB_WR_RDMA_READ
= IB_UVERBS_WR_RDMA_READ
,
1300 IB_WR_ATOMIC_CMP_AND_SWP
= IB_UVERBS_WR_ATOMIC_CMP_AND_SWP
,
1301 IB_WR_ATOMIC_FETCH_AND_ADD
= IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD
,
1302 IB_WR_BIND_MW
= IB_UVERBS_WR_BIND_MW
,
1303 IB_WR_LSO
= IB_UVERBS_WR_TSO
,
1304 IB_WR_SEND_WITH_INV
= IB_UVERBS_WR_SEND_WITH_INV
,
1305 IB_WR_RDMA_READ_WITH_INV
= IB_UVERBS_WR_RDMA_READ_WITH_INV
,
1306 IB_WR_LOCAL_INV
= IB_UVERBS_WR_LOCAL_INV
,
1307 IB_WR_MASKED_ATOMIC_CMP_AND_SWP
=
1308 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP
,
1309 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
=
1310 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD
,
1312 /* These are kernel only and can not be issued by userspace */
1313 IB_WR_REG_MR
= 0x20,
1314 IB_WR_REG_MR_INTEGRITY
,
1316 /* reserve values for low level drivers' internal use.
1317 * These values will not be used at all in the ib core layer.
1319 IB_WR_RESERVED1
= 0xf0,
1331 enum ib_send_flags
{
1333 IB_SEND_SIGNALED
= (1<<1),
1334 IB_SEND_SOLICITED
= (1<<2),
1335 IB_SEND_INLINE
= (1<<3),
1336 IB_SEND_IP_CSUM
= (1<<4),
1338 /* reserve bits 26-31 for low level drivers' internal use */
1339 IB_SEND_RESERVED_START
= (1 << 26),
1340 IB_SEND_RESERVED_END
= (1 << 31),
1350 void (*done
)(struct ib_cq
*cq
, struct ib_wc
*wc
);
1354 struct ib_send_wr
*next
;
1357 struct ib_cqe
*wr_cqe
;
1359 struct ib_sge
*sg_list
;
1361 enum ib_wr_opcode opcode
;
1365 u32 invalidate_rkey
;
1370 struct ib_send_wr wr
;
1375 static inline const struct ib_rdma_wr
*rdma_wr(const struct ib_send_wr
*wr
)
1377 return container_of(wr
, struct ib_rdma_wr
, wr
);
1380 struct ib_atomic_wr
{
1381 struct ib_send_wr wr
;
1385 u64 compare_add_mask
;
1390 static inline const struct ib_atomic_wr
*atomic_wr(const struct ib_send_wr
*wr
)
1392 return container_of(wr
, struct ib_atomic_wr
, wr
);
1396 struct ib_send_wr wr
;
1403 u16 pkey_index
; /* valid for GSI only */
1404 u8 port_num
; /* valid for DR SMPs on switch only */
1407 static inline const struct ib_ud_wr
*ud_wr(const struct ib_send_wr
*wr
)
1409 return container_of(wr
, struct ib_ud_wr
, wr
);
1413 struct ib_send_wr wr
;
1419 static inline const struct ib_reg_wr
*reg_wr(const struct ib_send_wr
*wr
)
1421 return container_of(wr
, struct ib_reg_wr
, wr
);
1425 struct ib_recv_wr
*next
;
1428 struct ib_cqe
*wr_cqe
;
1430 struct ib_sge
*sg_list
;
1434 enum ib_access_flags
{
1435 IB_ACCESS_LOCAL_WRITE
= IB_UVERBS_ACCESS_LOCAL_WRITE
,
1436 IB_ACCESS_REMOTE_WRITE
= IB_UVERBS_ACCESS_REMOTE_WRITE
,
1437 IB_ACCESS_REMOTE_READ
= IB_UVERBS_ACCESS_REMOTE_READ
,
1438 IB_ACCESS_REMOTE_ATOMIC
= IB_UVERBS_ACCESS_REMOTE_ATOMIC
,
1439 IB_ACCESS_MW_BIND
= IB_UVERBS_ACCESS_MW_BIND
,
1440 IB_ZERO_BASED
= IB_UVERBS_ACCESS_ZERO_BASED
,
1441 IB_ACCESS_ON_DEMAND
= IB_UVERBS_ACCESS_ON_DEMAND
,
1442 IB_ACCESS_HUGETLB
= IB_UVERBS_ACCESS_HUGETLB
,
1443 IB_ACCESS_RELAXED_ORDERING
= IB_UVERBS_ACCESS_RELAXED_ORDERING
,
1445 IB_ACCESS_OPTIONAL
= IB_UVERBS_ACCESS_OPTIONAL_RANGE
,
1446 IB_ACCESS_SUPPORTED
=
1447 ((IB_ACCESS_HUGETLB
<< 1) - 1) | IB_ACCESS_OPTIONAL
,
1451 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1452 * are hidden here instead of a uapi header!
1454 enum ib_mr_rereg_flags
{
1455 IB_MR_REREG_TRANS
= 1,
1456 IB_MR_REREG_PD
= (1<<1),
1457 IB_MR_REREG_ACCESS
= (1<<2),
1458 IB_MR_REREG_SUPPORTED
= ((IB_MR_REREG_ACCESS
<< 1) - 1)
1463 enum rdma_remove_reason
{
1465 * Userspace requested uobject deletion or initial try
1466 * to remove uobject via cleanup. Call could fail
1468 RDMA_REMOVE_DESTROY
,
1469 /* Context deletion. This call should delete the actual object itself */
1471 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1472 RDMA_REMOVE_DRIVER_REMOVE
,
1473 /* uobj is being cleaned-up before being committed */
1475 /* The driver failed to destroy the uobject and is being disconnected */
1476 RDMA_REMOVE_DRIVER_FAILURE
,
1479 struct ib_rdmacg_object
{
1480 #ifdef CONFIG_CGROUP_RDMA
1481 struct rdma_cgroup
*cg
; /* owner rdma cgroup */
1485 struct ib_ucontext
{
1486 struct ib_device
*device
;
1487 struct ib_uverbs_file
*ufile
;
1489 struct ib_rdmacg_object cg_obj
;
1491 * Implementation details of the RDMA core, don't use in drivers:
1493 struct rdma_restrack_entry res
;
1494 struct xarray mmap_xa
;
1498 u64 user_handle
; /* handle given to us by userspace */
1499 /* ufile & ucontext owning this object */
1500 struct ib_uverbs_file
*ufile
;
1501 /* FIXME, save memory: ufile->context == context */
1502 struct ib_ucontext
*context
; /* associated user context */
1503 void *object
; /* containing object */
1504 struct list_head list
; /* link to context's list */
1505 struct ib_rdmacg_object cg_obj
; /* rdmacg object */
1506 int id
; /* index into kernel idr */
1508 atomic_t usecnt
; /* protects exclusive access */
1509 struct rcu_head rcu
; /* kfree_rcu() overhead */
1511 const struct uverbs_api_object
*uapi_object
;
1515 const void __user
*inbuf
;
1516 void __user
*outbuf
;
1524 struct ib_device
*device
;
1525 struct ib_uobject
*uobject
;
1526 atomic_t usecnt
; /* count all resources */
1528 u32 unsafe_global_rkey
;
1531 * Implementation details of the RDMA core, don't use in drivers:
1533 struct ib_mr
*__internal_mr
;
1534 struct rdma_restrack_entry res
;
1538 struct ib_device
*device
;
1539 atomic_t usecnt
; /* count all exposed resources */
1540 struct inode
*inode
;
1541 struct rw_semaphore tgt_qps_rwsem
;
1542 struct xarray tgt_qps
;
1546 struct ib_device
*device
;
1548 struct ib_uobject
*uobject
;
1549 const struct ib_gid_attr
*sgid_attr
;
1550 enum rdma_ah_attr_type type
;
1553 typedef void (*ib_comp_handler
)(struct ib_cq
*cq
, void *cq_context
);
1555 enum ib_poll_context
{
1556 IB_POLL_SOFTIRQ
, /* poll from softirq context */
1557 IB_POLL_WORKQUEUE
, /* poll from workqueue */
1558 IB_POLL_UNBOUND_WORKQUEUE
, /* poll from unbound workqueue */
1559 IB_POLL_LAST_POOL_TYPE
= IB_POLL_UNBOUND_WORKQUEUE
,
1561 IB_POLL_DIRECT
, /* caller context, no hw completions */
1565 struct ib_device
*device
;
1566 struct ib_ucq_object
*uobject
;
1567 ib_comp_handler comp_handler
;
1568 void (*event_handler
)(struct ib_event
*, void *);
1571 unsigned int cqe_used
;
1572 atomic_t usecnt
; /* count number of work queues */
1573 enum ib_poll_context poll_ctx
;
1575 struct list_head pool_entry
;
1577 struct irq_poll iop
;
1578 struct work_struct work
;
1580 struct workqueue_struct
*comp_wq
;
1583 /* updated only by trace points */
1587 unsigned int comp_vector
;
1590 * Implementation details of the RDMA core, don't use in drivers:
1592 struct rdma_restrack_entry res
;
1596 struct ib_device
*device
;
1598 struct ib_usrq_object
*uobject
;
1599 void (*event_handler
)(struct ib_event
*, void *);
1601 enum ib_srq_type srq_type
;
1608 struct ib_xrcd
*xrcd
;
1615 enum ib_raw_packet_caps
{
1616 /* Strip cvlan from incoming packet and report it in the matching work
1617 * completion is supported.
1619 IB_RAW_PACKET_CAP_CVLAN_STRIPPING
= (1 << 0),
1620 /* Scatter FCS field of an incoming packet to host memory is supported.
1622 IB_RAW_PACKET_CAP_SCATTER_FCS
= (1 << 1),
1623 /* Checksum offloads are supported (for both send and receive). */
1624 IB_RAW_PACKET_CAP_IP_CSUM
= (1 << 2),
1625 /* When a packet is received for an RQ with no receive WQEs, the
1626 * packet processing is delayed.
1628 IB_RAW_PACKET_CAP_DELAY_DROP
= (1 << 3),
1632 IB_WQT_RQ
= IB_UVERBS_WQT_RQ
,
1642 struct ib_device
*device
;
1643 struct ib_uwq_object
*uobject
;
1645 void (*event_handler
)(struct ib_event
*, void *);
1649 enum ib_wq_state state
;
1650 enum ib_wq_type wq_type
;
1655 IB_WQ_FLAGS_CVLAN_STRIPPING
= IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING
,
1656 IB_WQ_FLAGS_SCATTER_FCS
= IB_UVERBS_WQ_FLAGS_SCATTER_FCS
,
1657 IB_WQ_FLAGS_DELAY_DROP
= IB_UVERBS_WQ_FLAGS_DELAY_DROP
,
1658 IB_WQ_FLAGS_PCI_WRITE_END_PADDING
=
1659 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING
,
1662 struct ib_wq_init_attr
{
1664 enum ib_wq_type wq_type
;
1668 void (*event_handler
)(struct ib_event
*, void *);
1669 u32 create_flags
; /* Use enum ib_wq_flags */
1672 enum ib_wq_attr_mask
{
1673 IB_WQ_STATE
= 1 << 0,
1674 IB_WQ_CUR_STATE
= 1 << 1,
1675 IB_WQ_FLAGS
= 1 << 2,
1679 enum ib_wq_state wq_state
;
1680 enum ib_wq_state curr_wq_state
;
1681 u32 flags
; /* Use enum ib_wq_flags */
1682 u32 flags_mask
; /* Use enum ib_wq_flags */
1685 struct ib_rwq_ind_table
{
1686 struct ib_device
*device
;
1687 struct ib_uobject
*uobject
;
1690 u32 log_ind_tbl_size
;
1691 struct ib_wq
**ind_tbl
;
1694 struct ib_rwq_ind_table_init_attr
{
1695 u32 log_ind_tbl_size
;
1696 /* Each entry is a pointer to Receive Work Queue */
1697 struct ib_wq
**ind_tbl
;
1700 enum port_pkey_state
{
1701 IB_PORT_PKEY_NOT_VALID
= 0,
1702 IB_PORT_PKEY_VALID
= 1,
1703 IB_PORT_PKEY_LISTED
= 2,
1706 struct ib_qp_security
;
1708 struct ib_port_pkey
{
1709 enum port_pkey_state state
;
1712 struct list_head qp_list
;
1713 struct list_head to_error_list
;
1714 struct ib_qp_security
*sec
;
1717 struct ib_ports_pkeys
{
1718 struct ib_port_pkey main
;
1719 struct ib_port_pkey alt
;
1722 struct ib_qp_security
{
1724 struct ib_device
*dev
;
1725 /* Hold this mutex when changing port and pkey settings. */
1727 struct ib_ports_pkeys
*ports_pkeys
;
1728 /* A list of all open shared QP handles. Required to enforce security
1729 * properly for all users of a shared QP.
1731 struct list_head shared_qp_list
;
1734 atomic_t error_list_count
;
1735 struct completion error_complete
;
1736 int error_comps_pending
;
1740 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1741 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1744 struct ib_device
*device
;
1746 struct ib_cq
*send_cq
;
1747 struct ib_cq
*recv_cq
;
1750 struct list_head rdma_mrs
;
1751 struct list_head sig_mrs
;
1753 struct ib_xrcd
*xrcd
; /* XRC TGT QPs only */
1754 struct list_head xrcd_list
;
1756 /* count times opened, mcast attaches, flow attaches */
1758 struct list_head open_list
;
1759 struct ib_qp
*real_qp
;
1760 struct ib_uqp_object
*uobject
;
1761 void (*event_handler
)(struct ib_event
*, void *);
1763 /* sgid_attrs associated with the AV's */
1764 const struct ib_gid_attr
*av_sgid_attr
;
1765 const struct ib_gid_attr
*alt_path_sgid_attr
;
1769 enum ib_qp_type qp_type
;
1770 struct ib_rwq_ind_table
*rwq_ind_tbl
;
1771 struct ib_qp_security
*qp_sec
;
1776 * Implementation details of the RDMA core, don't use in drivers:
1778 struct rdma_restrack_entry res
;
1780 /* The counter the qp is bind to */
1781 struct rdma_counter
*counter
;
1785 struct ib_device
*device
;
1788 struct ib_uobject
*uobject
;
1793 struct ib_device
*device
;
1799 unsigned int page_size
;
1800 enum ib_mr_type type
;
1803 struct ib_uobject
*uobject
; /* user */
1804 struct list_head qp_entry
; /* FR */
1808 struct ib_sig_attrs
*sig_attrs
; /* only for IB_MR_TYPE_INTEGRITY MRs */
1810 * Implementation details of the RDMA core, don't use in drivers:
1812 struct rdma_restrack_entry res
;
1816 struct ib_device
*device
;
1818 struct ib_uobject
*uobject
;
1820 enum ib_mw_type type
;
1823 /* Supported steering options */
1824 enum ib_flow_attr_type
{
1825 /* steering according to rule specifications */
1826 IB_FLOW_ATTR_NORMAL
= 0x0,
1827 /* default unicast and multicast rule -
1828 * receive all Eth traffic which isn't steered to any QP
1830 IB_FLOW_ATTR_ALL_DEFAULT
= 0x1,
1831 /* default multicast rule -
1832 * receive all Eth multicast traffic which isn't steered to any QP
1834 IB_FLOW_ATTR_MC_DEFAULT
= 0x2,
1835 /* sniffer rule - receive all port traffic */
1836 IB_FLOW_ATTR_SNIFFER
= 0x3
1839 /* Supported steering header types */
1840 enum ib_flow_spec_type
{
1842 IB_FLOW_SPEC_ETH
= 0x20,
1843 IB_FLOW_SPEC_IB
= 0x22,
1845 IB_FLOW_SPEC_IPV4
= 0x30,
1846 IB_FLOW_SPEC_IPV6
= 0x31,
1847 IB_FLOW_SPEC_ESP
= 0x34,
1849 IB_FLOW_SPEC_TCP
= 0x40,
1850 IB_FLOW_SPEC_UDP
= 0x41,
1851 IB_FLOW_SPEC_VXLAN_TUNNEL
= 0x50,
1852 IB_FLOW_SPEC_GRE
= 0x51,
1853 IB_FLOW_SPEC_MPLS
= 0x60,
1854 IB_FLOW_SPEC_INNER
= 0x100,
1856 IB_FLOW_SPEC_ACTION_TAG
= 0x1000,
1857 IB_FLOW_SPEC_ACTION_DROP
= 0x1001,
1858 IB_FLOW_SPEC_ACTION_HANDLE
= 0x1002,
1859 IB_FLOW_SPEC_ACTION_COUNT
= 0x1003,
1861 #define IB_FLOW_SPEC_LAYER_MASK 0xF0
1862 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1864 enum ib_flow_flags
{
1865 IB_FLOW_ATTR_FLAGS_DONT_TRAP
= 1UL << 1, /* Continue match, no steal */
1866 IB_FLOW_ATTR_FLAGS_EGRESS
= 1UL << 2, /* Egress flow */
1867 IB_FLOW_ATTR_FLAGS_RESERVED
= 1UL << 3 /* Must be last */
1870 struct ib_flow_eth_filter
{
1879 struct ib_flow_spec_eth
{
1882 struct ib_flow_eth_filter val
;
1883 struct ib_flow_eth_filter mask
;
1886 struct ib_flow_ib_filter
{
1893 struct ib_flow_spec_ib
{
1896 struct ib_flow_ib_filter val
;
1897 struct ib_flow_ib_filter mask
;
1900 /* IPv4 header flags */
1901 enum ib_ipv4_flags
{
1902 IB_IPV4_DONT_FRAG
= 0x2, /* Don't enable packet fragmentation */
1903 IB_IPV4_MORE_FRAG
= 0X4 /* For All fragmented packets except the
1904 last have this flag set */
1907 struct ib_flow_ipv4_filter
{
1918 struct ib_flow_spec_ipv4
{
1921 struct ib_flow_ipv4_filter val
;
1922 struct ib_flow_ipv4_filter mask
;
1925 struct ib_flow_ipv6_filter
{
1936 struct ib_flow_spec_ipv6
{
1939 struct ib_flow_ipv6_filter val
;
1940 struct ib_flow_ipv6_filter mask
;
1943 struct ib_flow_tcp_udp_filter
{
1950 struct ib_flow_spec_tcp_udp
{
1953 struct ib_flow_tcp_udp_filter val
;
1954 struct ib_flow_tcp_udp_filter mask
;
1957 struct ib_flow_tunnel_filter
{
1962 /* ib_flow_spec_tunnel describes the Vxlan tunnel
1963 * the tunnel_id from val has the vni value
1965 struct ib_flow_spec_tunnel
{
1968 struct ib_flow_tunnel_filter val
;
1969 struct ib_flow_tunnel_filter mask
;
1972 struct ib_flow_esp_filter
{
1979 struct ib_flow_spec_esp
{
1982 struct ib_flow_esp_filter val
;
1983 struct ib_flow_esp_filter mask
;
1986 struct ib_flow_gre_filter
{
1987 __be16 c_ks_res0_ver
;
1994 struct ib_flow_spec_gre
{
1997 struct ib_flow_gre_filter val
;
1998 struct ib_flow_gre_filter mask
;
2001 struct ib_flow_mpls_filter
{
2007 struct ib_flow_spec_mpls
{
2010 struct ib_flow_mpls_filter val
;
2011 struct ib_flow_mpls_filter mask
;
2014 struct ib_flow_spec_action_tag
{
2015 enum ib_flow_spec_type type
;
2020 struct ib_flow_spec_action_drop
{
2021 enum ib_flow_spec_type type
;
2025 struct ib_flow_spec_action_handle
{
2026 enum ib_flow_spec_type type
;
2028 struct ib_flow_action
*act
;
2031 enum ib_counters_description
{
2036 struct ib_flow_spec_action_count
{
2037 enum ib_flow_spec_type type
;
2039 struct ib_counters
*counters
;
2042 union ib_flow_spec
{
2047 struct ib_flow_spec_eth eth
;
2048 struct ib_flow_spec_ib ib
;
2049 struct ib_flow_spec_ipv4 ipv4
;
2050 struct ib_flow_spec_tcp_udp tcp_udp
;
2051 struct ib_flow_spec_ipv6 ipv6
;
2052 struct ib_flow_spec_tunnel tunnel
;
2053 struct ib_flow_spec_esp esp
;
2054 struct ib_flow_spec_gre gre
;
2055 struct ib_flow_spec_mpls mpls
;
2056 struct ib_flow_spec_action_tag flow_tag
;
2057 struct ib_flow_spec_action_drop drop
;
2058 struct ib_flow_spec_action_handle action
;
2059 struct ib_flow_spec_action_count flow_count
;
2062 struct ib_flow_attr
{
2063 enum ib_flow_attr_type type
;
2069 union ib_flow_spec flows
[];
2074 struct ib_device
*device
;
2075 struct ib_uobject
*uobject
;
2078 enum ib_flow_action_type
{
2079 IB_FLOW_ACTION_UNSPECIFIED
,
2080 IB_FLOW_ACTION_ESP
= 1,
2083 struct ib_flow_action_attrs_esp_keymats
{
2084 enum ib_uverbs_flow_action_esp_keymat protocol
;
2086 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm
;
2090 struct ib_flow_action_attrs_esp_replays
{
2091 enum ib_uverbs_flow_action_esp_replay protocol
;
2093 struct ib_uverbs_flow_action_esp_replay_bmp bmp
;
2097 enum ib_flow_action_attrs_esp_flags
{
2098 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2099 * This is done in order to share the same flags between user-space and
2100 * kernel and spare an unnecessary translation.
2104 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED
= 1ULL << 32,
2105 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS
= 1ULL << 33,
2108 struct ib_flow_spec_list
{
2109 struct ib_flow_spec_list
*next
;
2110 union ib_flow_spec spec
;
2113 struct ib_flow_action_attrs_esp
{
2114 struct ib_flow_action_attrs_esp_keymats
*keymat
;
2115 struct ib_flow_action_attrs_esp_replays
*replay
;
2116 struct ib_flow_spec_list
*encap
;
2117 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2118 * Value of 0 is a valid value.
2124 /* Use enum ib_flow_action_attrs_esp_flags */
2126 u64 hard_limit_pkts
;
2129 struct ib_flow_action
{
2130 struct ib_device
*device
;
2131 struct ib_uobject
*uobject
;
2132 enum ib_flow_action_type type
;
2139 enum ib_process_mad_flags
{
2140 IB_MAD_IGNORE_MKEY
= 1,
2141 IB_MAD_IGNORE_BKEY
= 2,
2142 IB_MAD_IGNORE_ALL
= IB_MAD_IGNORE_MKEY
| IB_MAD_IGNORE_BKEY
2145 enum ib_mad_result
{
2146 IB_MAD_RESULT_FAILURE
= 0, /* (!SUCCESS is the important flag) */
2147 IB_MAD_RESULT_SUCCESS
= 1 << 0, /* MAD was successfully processed */
2148 IB_MAD_RESULT_REPLY
= 1 << 1, /* Reply packet needs to be sent */
2149 IB_MAD_RESULT_CONSUMED
= 1 << 2 /* Packet consumed: stop processing */
2152 struct ib_port_cache
{
2154 struct ib_pkey_cache
*pkey
;
2155 struct ib_gid_table
*gid
;
2157 enum ib_port_state port_state
;
2160 struct ib_port_immutable
{
2167 struct ib_port_data
{
2168 struct ib_device
*ib_dev
;
2170 struct ib_port_immutable immutable
;
2172 spinlock_t pkey_list_lock
;
2173 struct list_head pkey_list
;
2175 struct ib_port_cache cache
;
2177 spinlock_t netdev_lock
;
2178 struct net_device __rcu
*netdev
;
2179 struct hlist_node ndev_hash_link
;
2180 struct rdma_port_counter port_counter
;
2181 struct rdma_hw_stats
*hw_stats
;
2184 /* rdma netdev type - specifies protocol type */
2185 enum rdma_netdev_t
{
2186 RDMA_NETDEV_OPA_VNIC
,
2191 * struct rdma_netdev - rdma netdev
2192 * For cases where netstack interfacing is required.
2194 struct rdma_netdev
{
2196 struct ib_device
*hca
;
2201 * cleanup function must be specified.
2202 * FIXME: This is only used for OPA_VNIC and that usage should be
2205 void (*free_rdma_netdev
)(struct net_device
*netdev
);
2207 /* control functions */
2208 void (*set_id
)(struct net_device
*netdev
, int id
);
2210 int (*send
)(struct net_device
*dev
, struct sk_buff
*skb
,
2211 struct ib_ah
*address
, u32 dqpn
);
2213 int (*attach_mcast
)(struct net_device
*dev
, struct ib_device
*hca
,
2214 union ib_gid
*gid
, u16 mlid
,
2215 int set_qkey
, u32 qkey
);
2216 int (*detach_mcast
)(struct net_device
*dev
, struct ib_device
*hca
,
2217 union ib_gid
*gid
, u16 mlid
);
2220 struct rdma_netdev_alloc_params
{
2226 int (*initialize_rdma_netdev
)(struct ib_device
*device
, u8 port_num
,
2227 struct net_device
*netdev
, void *param
);
2230 struct ib_odp_counters
{
2232 atomic64_t invalidations
;
2233 atomic64_t prefetch
;
2236 struct ib_counters
{
2237 struct ib_device
*device
;
2238 struct ib_uobject
*uobject
;
2239 /* num of objects attached */
2243 struct ib_counters_read_attr
{
2246 u32 flags
; /* use enum ib_read_counters_flags */
2249 struct uverbs_attr_bundle
;
2251 struct iw_cm_conn_param
;
2253 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2254 .size_##ib_struct = \
2255 (sizeof(struct drv_struct) + \
2256 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2257 BUILD_BUG_ON_ZERO( \
2258 !__same_type(((struct drv_struct *)NULL)->member, \
2261 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2262 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2264 #define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2265 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2267 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2269 struct rdma_user_mmap_entry
{
2271 struct ib_ucontext
*ucontext
;
2272 unsigned long start_pgoff
;
2274 bool driver_removed
;
2277 /* Return the offset (in bytes) the user should pass to libc's mmap() */
2279 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry
*entry
)
2281 return (u64
)entry
->start_pgoff
<< PAGE_SHIFT
;
2285 * struct ib_device_ops - InfiniBand device operations
2286 * This structure defines all the InfiniBand device operations, providers will
2287 * need to define the supported operations, otherwise they will be set to null.
2289 struct ib_device_ops
{
2290 struct module
*owner
;
2291 enum rdma_driver_id driver_id
;
2293 unsigned int uverbs_no_driver_id_binding
:1;
2295 int (*post_send
)(struct ib_qp
*qp
, const struct ib_send_wr
*send_wr
,
2296 const struct ib_send_wr
**bad_send_wr
);
2297 int (*post_recv
)(struct ib_qp
*qp
, const struct ib_recv_wr
*recv_wr
,
2298 const struct ib_recv_wr
**bad_recv_wr
);
2299 void (*drain_rq
)(struct ib_qp
*qp
);
2300 void (*drain_sq
)(struct ib_qp
*qp
);
2301 int (*poll_cq
)(struct ib_cq
*cq
, int num_entries
, struct ib_wc
*wc
);
2302 int (*peek_cq
)(struct ib_cq
*cq
, int wc_cnt
);
2303 int (*req_notify_cq
)(struct ib_cq
*cq
, enum ib_cq_notify_flags flags
);
2304 int (*req_ncomp_notif
)(struct ib_cq
*cq
, int wc_cnt
);
2305 int (*post_srq_recv
)(struct ib_srq
*srq
,
2306 const struct ib_recv_wr
*recv_wr
,
2307 const struct ib_recv_wr
**bad_recv_wr
);
2308 int (*process_mad
)(struct ib_device
*device
, int process_mad_flags
,
2309 u8 port_num
, const struct ib_wc
*in_wc
,
2310 const struct ib_grh
*in_grh
,
2311 const struct ib_mad
*in_mad
, struct ib_mad
*out_mad
,
2312 size_t *out_mad_size
, u16
*out_mad_pkey_index
);
2313 int (*query_device
)(struct ib_device
*device
,
2314 struct ib_device_attr
*device_attr
,
2315 struct ib_udata
*udata
);
2316 int (*modify_device
)(struct ib_device
*device
, int device_modify_mask
,
2317 struct ib_device_modify
*device_modify
);
2318 void (*get_dev_fw_str
)(struct ib_device
*device
, char *str
);
2319 const struct cpumask
*(*get_vector_affinity
)(struct ib_device
*ibdev
,
2321 int (*query_port
)(struct ib_device
*device
, u8 port_num
,
2322 struct ib_port_attr
*port_attr
);
2323 int (*modify_port
)(struct ib_device
*device
, u8 port_num
,
2324 int port_modify_mask
,
2325 struct ib_port_modify
*port_modify
);
2327 * The following mandatory functions are used only at device
2328 * registration. Keep functions such as these at the end of this
2329 * structure to avoid cache line misses when accessing struct ib_device
2332 int (*get_port_immutable
)(struct ib_device
*device
, u8 port_num
,
2333 struct ib_port_immutable
*immutable
);
2334 enum rdma_link_layer (*get_link_layer
)(struct ib_device
*device
,
2337 * When calling get_netdev, the HW vendor's driver should return the
2338 * net device of device @device at port @port_num or NULL if such
2339 * a net device doesn't exist. The vendor driver should call dev_hold
2340 * on this net device. The HW vendor's device driver must guarantee
2341 * that this function returns NULL before the net device has finished
2342 * NETDEV_UNREGISTER state.
2344 struct net_device
*(*get_netdev
)(struct ib_device
*device
, u8 port_num
);
2346 * rdma netdev operation
2348 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2349 * must return -EOPNOTSUPP if it doesn't support the specified type.
2351 struct net_device
*(*alloc_rdma_netdev
)(
2352 struct ib_device
*device
, u8 port_num
, enum rdma_netdev_t type
,
2353 const char *name
, unsigned char name_assign_type
,
2354 void (*setup
)(struct net_device
*));
2356 int (*rdma_netdev_get_params
)(struct ib_device
*device
, u8 port_num
,
2357 enum rdma_netdev_t type
,
2358 struct rdma_netdev_alloc_params
*params
);
2360 * query_gid should be return GID value for @device, when @port_num
2361 * link layer is either IB or iWarp. It is no-op if @port_num port
2362 * is RoCE link layer.
2364 int (*query_gid
)(struct ib_device
*device
, u8 port_num
, int index
,
2367 * When calling add_gid, the HW vendor's driver should add the gid
2368 * of device of port at gid index available at @attr. Meta-info of
2369 * that gid (for example, the network device related to this gid) is
2370 * available at @attr. @context allows the HW vendor driver to store
2371 * extra information together with a GID entry. The HW vendor driver may
2372 * allocate memory to contain this information and store it in @context
2373 * when a new GID entry is written to. Params are consistent until the
2374 * next call of add_gid or delete_gid. The function should return 0 on
2375 * success or error otherwise. The function could be called
2376 * concurrently for different ports. This function is only called when
2377 * roce_gid_table is used.
2379 int (*add_gid
)(const struct ib_gid_attr
*attr
, void **context
);
2381 * When calling del_gid, the HW vendor's driver should delete the
2382 * gid of device @device at gid index gid_index of port port_num
2383 * available in @attr.
2384 * Upon the deletion of a GID entry, the HW vendor must free any
2385 * allocated memory. The caller will clear @context afterwards.
2386 * This function is only called when roce_gid_table is used.
2388 int (*del_gid
)(const struct ib_gid_attr
*attr
, void **context
);
2389 int (*query_pkey
)(struct ib_device
*device
, u8 port_num
, u16 index
,
2391 int (*alloc_ucontext
)(struct ib_ucontext
*context
,
2392 struct ib_udata
*udata
);
2393 void (*dealloc_ucontext
)(struct ib_ucontext
*context
);
2394 int (*mmap
)(struct ib_ucontext
*context
, struct vm_area_struct
*vma
);
2396 * This will be called once refcount of an entry in mmap_xa reaches
2397 * zero. The type of the memory that was mapped may differ between
2398 * entries and is opaque to the rdma_user_mmap interface.
2399 * Therefore needs to be implemented by the driver in mmap_free.
2401 void (*mmap_free
)(struct rdma_user_mmap_entry
*entry
);
2402 void (*disassociate_ucontext
)(struct ib_ucontext
*ibcontext
);
2403 int (*alloc_pd
)(struct ib_pd
*pd
, struct ib_udata
*udata
);
2404 int (*dealloc_pd
)(struct ib_pd
*pd
, struct ib_udata
*udata
);
2405 int (*create_ah
)(struct ib_ah
*ah
, struct rdma_ah_init_attr
*attr
,
2406 struct ib_udata
*udata
);
2407 int (*create_user_ah
)(struct ib_ah
*ah
, struct rdma_ah_init_attr
*attr
,
2408 struct ib_udata
*udata
);
2409 int (*modify_ah
)(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
2410 int (*query_ah
)(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
2411 int (*destroy_ah
)(struct ib_ah
*ah
, u32 flags
);
2412 int (*create_srq
)(struct ib_srq
*srq
,
2413 struct ib_srq_init_attr
*srq_init_attr
,
2414 struct ib_udata
*udata
);
2415 int (*modify_srq
)(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
,
2416 enum ib_srq_attr_mask srq_attr_mask
,
2417 struct ib_udata
*udata
);
2418 int (*query_srq
)(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
);
2419 int (*destroy_srq
)(struct ib_srq
*srq
, struct ib_udata
*udata
);
2420 struct ib_qp
*(*create_qp
)(struct ib_pd
*pd
,
2421 struct ib_qp_init_attr
*qp_init_attr
,
2422 struct ib_udata
*udata
);
2423 int (*modify_qp
)(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
2424 int qp_attr_mask
, struct ib_udata
*udata
);
2425 int (*query_qp
)(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
2426 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
);
2427 int (*destroy_qp
)(struct ib_qp
*qp
, struct ib_udata
*udata
);
2428 int (*create_cq
)(struct ib_cq
*cq
, const struct ib_cq_init_attr
*attr
,
2429 struct ib_udata
*udata
);
2430 int (*modify_cq
)(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
);
2431 int (*destroy_cq
)(struct ib_cq
*cq
, struct ib_udata
*udata
);
2432 int (*resize_cq
)(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
);
2433 struct ib_mr
*(*get_dma_mr
)(struct ib_pd
*pd
, int mr_access_flags
);
2434 struct ib_mr
*(*reg_user_mr
)(struct ib_pd
*pd
, u64 start
, u64 length
,
2435 u64 virt_addr
, int mr_access_flags
,
2436 struct ib_udata
*udata
);
2437 struct ib_mr
*(*rereg_user_mr
)(struct ib_mr
*mr
, int flags
, u64 start
,
2438 u64 length
, u64 virt_addr
,
2439 int mr_access_flags
, struct ib_pd
*pd
,
2440 struct ib_udata
*udata
);
2441 int (*dereg_mr
)(struct ib_mr
*mr
, struct ib_udata
*udata
);
2442 struct ib_mr
*(*alloc_mr
)(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
2444 struct ib_mr
*(*alloc_mr_integrity
)(struct ib_pd
*pd
,
2445 u32 max_num_data_sg
,
2446 u32 max_num_meta_sg
);
2447 int (*advise_mr
)(struct ib_pd
*pd
,
2448 enum ib_uverbs_advise_mr_advice advice
, u32 flags
,
2449 struct ib_sge
*sg_list
, u32 num_sge
,
2450 struct uverbs_attr_bundle
*attrs
);
2451 int (*map_mr_sg
)(struct ib_mr
*mr
, struct scatterlist
*sg
, int sg_nents
,
2452 unsigned int *sg_offset
);
2453 int (*check_mr_status
)(struct ib_mr
*mr
, u32 check_mask
,
2454 struct ib_mr_status
*mr_status
);
2455 int (*alloc_mw
)(struct ib_mw
*mw
, struct ib_udata
*udata
);
2456 int (*dealloc_mw
)(struct ib_mw
*mw
);
2457 int (*attach_mcast
)(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
);
2458 int (*detach_mcast
)(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
);
2459 int (*alloc_xrcd
)(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
);
2460 int (*dealloc_xrcd
)(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
);
2461 struct ib_flow
*(*create_flow
)(struct ib_qp
*qp
,
2462 struct ib_flow_attr
*flow_attr
,
2463 struct ib_udata
*udata
);
2464 int (*destroy_flow
)(struct ib_flow
*flow_id
);
2465 struct ib_flow_action
*(*create_flow_action_esp
)(
2466 struct ib_device
*device
,
2467 const struct ib_flow_action_attrs_esp
*attr
,
2468 struct uverbs_attr_bundle
*attrs
);
2469 int (*destroy_flow_action
)(struct ib_flow_action
*action
);
2470 int (*modify_flow_action_esp
)(
2471 struct ib_flow_action
*action
,
2472 const struct ib_flow_action_attrs_esp
*attr
,
2473 struct uverbs_attr_bundle
*attrs
);
2474 int (*set_vf_link_state
)(struct ib_device
*device
, int vf
, u8 port
,
2476 int (*get_vf_config
)(struct ib_device
*device
, int vf
, u8 port
,
2477 struct ifla_vf_info
*ivf
);
2478 int (*get_vf_stats
)(struct ib_device
*device
, int vf
, u8 port
,
2479 struct ifla_vf_stats
*stats
);
2480 int (*get_vf_guid
)(struct ib_device
*device
, int vf
, u8 port
,
2481 struct ifla_vf_guid
*node_guid
,
2482 struct ifla_vf_guid
*port_guid
);
2483 int (*set_vf_guid
)(struct ib_device
*device
, int vf
, u8 port
, u64 guid
,
2485 struct ib_wq
*(*create_wq
)(struct ib_pd
*pd
,
2486 struct ib_wq_init_attr
*init_attr
,
2487 struct ib_udata
*udata
);
2488 int (*destroy_wq
)(struct ib_wq
*wq
, struct ib_udata
*udata
);
2489 int (*modify_wq
)(struct ib_wq
*wq
, struct ib_wq_attr
*attr
,
2490 u32 wq_attr_mask
, struct ib_udata
*udata
);
2491 int (*create_rwq_ind_table
)(struct ib_rwq_ind_table
*ib_rwq_ind_table
,
2492 struct ib_rwq_ind_table_init_attr
*init_attr
,
2493 struct ib_udata
*udata
);
2494 int (*destroy_rwq_ind_table
)(struct ib_rwq_ind_table
*wq_ind_table
);
2495 struct ib_dm
*(*alloc_dm
)(struct ib_device
*device
,
2496 struct ib_ucontext
*context
,
2497 struct ib_dm_alloc_attr
*attr
,
2498 struct uverbs_attr_bundle
*attrs
);
2499 int (*dealloc_dm
)(struct ib_dm
*dm
, struct uverbs_attr_bundle
*attrs
);
2500 struct ib_mr
*(*reg_dm_mr
)(struct ib_pd
*pd
, struct ib_dm
*dm
,
2501 struct ib_dm_mr_attr
*attr
,
2502 struct uverbs_attr_bundle
*attrs
);
2503 int (*create_counters
)(struct ib_counters
*counters
,
2504 struct uverbs_attr_bundle
*attrs
);
2505 int (*destroy_counters
)(struct ib_counters
*counters
);
2506 int (*read_counters
)(struct ib_counters
*counters
,
2507 struct ib_counters_read_attr
*counters_read_attr
,
2508 struct uverbs_attr_bundle
*attrs
);
2509 int (*map_mr_sg_pi
)(struct ib_mr
*mr
, struct scatterlist
*data_sg
,
2510 int data_sg_nents
, unsigned int *data_sg_offset
,
2511 struct scatterlist
*meta_sg
, int meta_sg_nents
,
2512 unsigned int *meta_sg_offset
);
2515 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2516 * driver initialized data. The struct is kfree()'ed by the sysfs
2517 * core when the device is removed. A lifespan of -1 in the return
2518 * struct tells the core to set a default lifespan.
2520 struct rdma_hw_stats
*(*alloc_hw_stats
)(struct ib_device
*device
,
2523 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2524 * @index - The index in the value array we wish to have updated, or
2525 * num_counters if we want all stats updated
2527 * < 0 - Error, no counters updated
2528 * index - Updated the single counter pointed to by index
2529 * num_counters - Updated all counters (will reset the timestamp
2530 * and prevent further calls for lifespan milliseconds)
2531 * Drivers are allowed to update all counters in leiu of just the
2532 * one given in index at their option
2534 int (*get_hw_stats
)(struct ib_device
*device
,
2535 struct rdma_hw_stats
*stats
, u8 port
, int index
);
2537 * This function is called once for each port when a ib device is
2540 int (*init_port
)(struct ib_device
*device
, u8 port_num
,
2541 struct kobject
*port_sysfs
);
2543 * Allows rdma drivers to add their own restrack attributes.
2545 int (*fill_res_mr_entry
)(struct sk_buff
*msg
, struct ib_mr
*ibmr
);
2546 int (*fill_res_mr_entry_raw
)(struct sk_buff
*msg
, struct ib_mr
*ibmr
);
2547 int (*fill_res_cq_entry
)(struct sk_buff
*msg
, struct ib_cq
*ibcq
);
2548 int (*fill_res_cq_entry_raw
)(struct sk_buff
*msg
, struct ib_cq
*ibcq
);
2549 int (*fill_res_qp_entry
)(struct sk_buff
*msg
, struct ib_qp
*ibqp
);
2550 int (*fill_res_qp_entry_raw
)(struct sk_buff
*msg
, struct ib_qp
*ibqp
);
2551 int (*fill_res_cm_id_entry
)(struct sk_buff
*msg
, struct rdma_cm_id
*id
);
2553 /* Device lifecycle callbacks */
2555 * Called after the device becomes registered, before clients are
2558 int (*enable_driver
)(struct ib_device
*dev
);
2560 * This is called as part of ib_dealloc_device().
2562 void (*dealloc_driver
)(struct ib_device
*dev
);
2564 /* iWarp CM callbacks */
2565 void (*iw_add_ref
)(struct ib_qp
*qp
);
2566 void (*iw_rem_ref
)(struct ib_qp
*qp
);
2567 struct ib_qp
*(*iw_get_qp
)(struct ib_device
*device
, int qpn
);
2568 int (*iw_connect
)(struct iw_cm_id
*cm_id
,
2569 struct iw_cm_conn_param
*conn_param
);
2570 int (*iw_accept
)(struct iw_cm_id
*cm_id
,
2571 struct iw_cm_conn_param
*conn_param
);
2572 int (*iw_reject
)(struct iw_cm_id
*cm_id
, const void *pdata
,
2574 int (*iw_create_listen
)(struct iw_cm_id
*cm_id
, int backlog
);
2575 int (*iw_destroy_listen
)(struct iw_cm_id
*cm_id
);
2577 * counter_bind_qp - Bind a QP to a counter.
2578 * @counter - The counter to be bound. If counter->id is zero then
2579 * the driver needs to allocate a new counter and set counter->id
2581 int (*counter_bind_qp
)(struct rdma_counter
*counter
, struct ib_qp
*qp
);
2583 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2584 * counter and bind it onto the default one
2586 int (*counter_unbind_qp
)(struct ib_qp
*qp
);
2588 * counter_dealloc -De-allocate the hw counter
2590 int (*counter_dealloc
)(struct rdma_counter
*counter
);
2592 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2593 * the driver initialized data.
2595 struct rdma_hw_stats
*(*counter_alloc_stats
)(
2596 struct rdma_counter
*counter
);
2598 * counter_update_stats - Query the stats value of this counter
2600 int (*counter_update_stats
)(struct rdma_counter
*counter
);
2603 * Allows rdma drivers to add their own restrack attributes
2604 * dumped via 'rdma stat' iproute2 command.
2606 int (*fill_stat_mr_entry
)(struct sk_buff
*msg
, struct ib_mr
*ibmr
);
2608 /* query driver for its ucontext properties */
2609 int (*query_ucontext
)(struct ib_ucontext
*context
,
2610 struct uverbs_attr_bundle
*attrs
);
2612 DECLARE_RDMA_OBJ_SIZE(ib_ah
);
2613 DECLARE_RDMA_OBJ_SIZE(ib_counters
);
2614 DECLARE_RDMA_OBJ_SIZE(ib_cq
);
2615 DECLARE_RDMA_OBJ_SIZE(ib_mw
);
2616 DECLARE_RDMA_OBJ_SIZE(ib_pd
);
2617 DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table
);
2618 DECLARE_RDMA_OBJ_SIZE(ib_srq
);
2619 DECLARE_RDMA_OBJ_SIZE(ib_ucontext
);
2620 DECLARE_RDMA_OBJ_SIZE(ib_xrcd
);
2623 struct ib_core_device
{
2624 /* device must be the first element in structure until,
2625 * union of ib_core_device and device exists in ib_device.
2628 possible_net_t rdma_net
;
2629 struct kobject
*ports_kobj
;
2630 struct list_head port_list
;
2631 struct ib_device
*owner
; /* reach back to owner ib_device */
2634 struct rdma_restrack_root
;
2636 /* Do not access @dma_device directly from ULP nor from HW drivers. */
2637 struct device
*dma_device
;
2638 struct ib_device_ops ops
;
2639 char name
[IB_DEVICE_NAME_MAX
];
2640 struct rcu_head rcu_head
;
2642 struct list_head event_handler_list
;
2643 /* Protects event_handler_list */
2644 struct rw_semaphore event_handler_rwsem
;
2646 /* Protects QP's event_handler calls and open_qp list */
2647 spinlock_t qp_open_list_lock
;
2649 struct rw_semaphore client_data_rwsem
;
2650 struct xarray client_data
;
2651 struct mutex unregistration_lock
;
2653 /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2654 rwlock_t cache_lock
;
2656 * port_data is indexed by port number
2658 struct ib_port_data
*port_data
;
2660 int num_comp_vectors
;
2664 struct ib_core_device coredev
;
2667 /* First group for device attributes,
2668 * Second group for driver provided attributes (optional).
2669 * It is NULL terminated array.
2671 const struct attribute_group
*groups
[3];
2673 u64 uverbs_cmd_mask
;
2675 char node_desc
[IB_DEVICE_NODE_DESC_MAX
];
2679 /* Indicates kernel verbs support, should not be used in drivers */
2680 u16 kverbs_provider
:1;
2681 /* CQ adaptive moderation (RDMA DIM) */
2685 struct ib_device_attr attrs
;
2686 struct attribute_group
*hw_stats_ag
;
2687 struct rdma_hw_stats
*hw_stats
;
2689 #ifdef CONFIG_CGROUP_RDMA
2690 struct rdmacg_device cg_device
;
2695 spinlock_t cq_pools_lock
;
2696 struct list_head cq_pools
[IB_POLL_LAST_POOL_TYPE
+ 1];
2698 struct rdma_restrack_root
*res
;
2700 const struct uapi_definition
*driver_def
;
2703 * Positive refcount indicates that the device is currently
2704 * registered and cannot be unregistered.
2706 refcount_t refcount
;
2707 struct completion unreg_completion
;
2708 struct work_struct unregistration_work
;
2710 const struct rdma_link_ops
*link_ops
;
2712 /* Protects compat_devs xarray modifications */
2713 struct mutex compat_devs_mutex
;
2714 /* Maintains compat devices for each net namespace */
2715 struct xarray compat_devs
;
2717 /* Used by iWarp CM */
2718 char iw_ifname
[IFNAMSIZ
];
2719 u32 iw_driver_flags
;
2723 struct ib_client_nl_info
;
2726 int (*add
)(struct ib_device
*ibdev
);
2727 void (*remove
)(struct ib_device
*, void *client_data
);
2728 void (*rename
)(struct ib_device
*dev
, void *client_data
);
2729 int (*get_nl_info
)(struct ib_device
*ibdev
, void *client_data
,
2730 struct ib_client_nl_info
*res
);
2731 int (*get_global_nl_info
)(struct ib_client_nl_info
*res
);
2733 /* Returns the net_dev belonging to this ib_client and matching the
2735 * @dev: An RDMA device that the net_dev use for communication.
2736 * @port: A physical port number on the RDMA device.
2737 * @pkey: P_Key that the net_dev uses if applicable.
2738 * @gid: A GID that the net_dev uses to communicate.
2739 * @addr: An IP address the net_dev is configured with.
2740 * @client_data: The device's client data set by ib_set_client_data().
2742 * An ib_client that implements a net_dev on top of RDMA devices
2743 * (such as IP over IB) should implement this callback, allowing the
2744 * rdma_cm module to find the right net_dev for a given request.
2746 * The caller is responsible for calling dev_put on the returned
2748 struct net_device
*(*get_net_dev_by_params
)(
2749 struct ib_device
*dev
,
2752 const union ib_gid
*gid
,
2753 const struct sockaddr
*addr
,
2757 struct completion uses_zero
;
2760 /* kverbs are not required by the client */
2765 * IB block DMA iterator
2767 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2768 * to a HW supported page size.
2770 struct ib_block_iter
{
2771 /* internal states */
2772 struct scatterlist
*__sg
; /* sg holding the current aligned block */
2773 dma_addr_t __dma_addr
; /* unaligned DMA address of this block */
2774 unsigned int __sg_nents
; /* number of SG entries */
2775 unsigned int __sg_advance
; /* number of bytes to advance in sg in next step */
2776 unsigned int __pg_bit
; /* alignment of current block */
2779 struct ib_device
*_ib_alloc_device(size_t size
);
2780 #define ib_alloc_device(drv_struct, member) \
2781 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2782 BUILD_BUG_ON_ZERO(offsetof( \
2783 struct drv_struct, member))), \
2784 struct drv_struct, member)
2786 void ib_dealloc_device(struct ib_device
*device
);
2788 void ib_get_device_fw_str(struct ib_device
*device
, char *str
);
2790 int ib_register_device(struct ib_device
*device
, const char *name
,
2791 struct device
*dma_device
);
2792 void ib_unregister_device(struct ib_device
*device
);
2793 void ib_unregister_driver(enum rdma_driver_id driver_id
);
2794 void ib_unregister_device_and_put(struct ib_device
*device
);
2795 void ib_unregister_device_queued(struct ib_device
*ib_dev
);
2797 int ib_register_client (struct ib_client
*client
);
2798 void ib_unregister_client(struct ib_client
*client
);
2800 void __rdma_block_iter_start(struct ib_block_iter
*biter
,
2801 struct scatterlist
*sglist
,
2803 unsigned long pgsz
);
2804 bool __rdma_block_iter_next(struct ib_block_iter
*biter
);
2807 * rdma_block_iter_dma_address - get the aligned dma address of the current
2808 * block held by the block iterator.
2809 * @biter: block iterator holding the memory block
2811 static inline dma_addr_t
2812 rdma_block_iter_dma_address(struct ib_block_iter
*biter
)
2814 return biter
->__dma_addr
& ~(BIT_ULL(biter
->__pg_bit
) - 1);
2818 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2819 * @sglist: sglist to iterate over
2820 * @biter: block iterator holding the memory block
2821 * @nents: maximum number of sg entries to iterate over
2822 * @pgsz: best HW supported page size to use
2824 * Callers may use rdma_block_iter_dma_address() to get each
2825 * blocks aligned DMA address.
2827 #define rdma_for_each_block(sglist, biter, nents, pgsz) \
2828 for (__rdma_block_iter_start(biter, sglist, nents, \
2830 __rdma_block_iter_next(biter);)
2833 * ib_get_client_data - Get IB client context
2834 * @device:Device to get context for
2835 * @client:Client to get context for
2837 * ib_get_client_data() returns the client context data set with
2838 * ib_set_client_data(). This can only be called while the client is
2839 * registered to the device, once the ib_client remove() callback returns this
2842 static inline void *ib_get_client_data(struct ib_device
*device
,
2843 struct ib_client
*client
)
2845 return xa_load(&device
->client_data
, client
->client_id
);
2847 void ib_set_client_data(struct ib_device
*device
, struct ib_client
*client
,
2849 void ib_set_device_ops(struct ib_device
*device
,
2850 const struct ib_device_ops
*ops
);
2852 int rdma_user_mmap_io(struct ib_ucontext
*ucontext
, struct vm_area_struct
*vma
,
2853 unsigned long pfn
, unsigned long size
, pgprot_t prot
,
2854 struct rdma_user_mmap_entry
*entry
);
2855 int rdma_user_mmap_entry_insert(struct ib_ucontext
*ucontext
,
2856 struct rdma_user_mmap_entry
*entry
,
2858 int rdma_user_mmap_entry_insert_range(struct ib_ucontext
*ucontext
,
2859 struct rdma_user_mmap_entry
*entry
,
2860 size_t length
, u32 min_pgoff
,
2863 struct rdma_user_mmap_entry
*
2864 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext
*ucontext
,
2865 unsigned long pgoff
);
2866 struct rdma_user_mmap_entry
*
2867 rdma_user_mmap_entry_get(struct ib_ucontext
*ucontext
,
2868 struct vm_area_struct
*vma
);
2869 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry
*entry
);
2871 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry
*entry
);
2873 static inline int ib_copy_from_udata(void *dest
, struct ib_udata
*udata
, size_t len
)
2875 return copy_from_user(dest
, udata
->inbuf
, len
) ? -EFAULT
: 0;
2878 static inline int ib_copy_to_udata(struct ib_udata
*udata
, void *src
, size_t len
)
2880 return copy_to_user(udata
->outbuf
, src
, len
) ? -EFAULT
: 0;
2883 static inline bool ib_is_buffer_cleared(const void __user
*p
,
2889 if (len
> USHRT_MAX
)
2892 buf
= memdup_user(p
, len
);
2896 ret
= !memchr_inv(buf
, 0, len
);
2901 static inline bool ib_is_udata_cleared(struct ib_udata
*udata
,
2905 return ib_is_buffer_cleared(udata
->inbuf
+ offset
, len
);
2909 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2910 * contains all required attributes and no attributes not allowed for
2911 * the given QP state transition.
2912 * @cur_state: Current QP state
2913 * @next_state: Next QP state
2915 * @mask: Mask of supplied QP attributes
2917 * This function is a helper function that a low-level driver's
2918 * modify_qp method can use to validate the consumer's input. It
2919 * checks that cur_state and next_state are valid QP states, that a
2920 * transition from cur_state to next_state is allowed by the IB spec,
2921 * and that the attribute mask supplied is allowed for the transition.
2923 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state next_state
,
2924 enum ib_qp_type type
, enum ib_qp_attr_mask mask
);
2926 void ib_register_event_handler(struct ib_event_handler
*event_handler
);
2927 void ib_unregister_event_handler(struct ib_event_handler
*event_handler
);
2928 void ib_dispatch_event(const struct ib_event
*event
);
2930 int ib_query_port(struct ib_device
*device
,
2931 u8 port_num
, struct ib_port_attr
*port_attr
);
2933 enum rdma_link_layer
rdma_port_get_link_layer(struct ib_device
*device
,
2937 * rdma_cap_ib_switch - Check if the device is IB switch
2938 * @device: Device to check
2940 * Device driver is responsible for setting is_switch bit on
2941 * in ib_device structure at init time.
2943 * Return: true if the device is IB switch.
2945 static inline bool rdma_cap_ib_switch(const struct ib_device
*device
)
2947 return device
->is_switch
;
2951 * rdma_start_port - Return the first valid port number for the device
2954 * @device: Device to be checked
2956 * Return start port number
2958 static inline u8
rdma_start_port(const struct ib_device
*device
)
2960 return rdma_cap_ib_switch(device
) ? 0 : 1;
2964 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
2965 * @device - The struct ib_device * to iterate over
2966 * @iter - The unsigned int to store the port number
2968 #define rdma_for_each_port(device, iter) \
2969 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
2970 unsigned int, iter))); \
2971 iter <= rdma_end_port(device); (iter)++)
2974 * rdma_end_port - Return the last valid port number for the device
2977 * @device: Device to be checked
2979 * Return last port number
2981 static inline u8
rdma_end_port(const struct ib_device
*device
)
2983 return rdma_cap_ib_switch(device
) ? 0 : device
->phys_port_cnt
;
2986 static inline int rdma_is_port_valid(const struct ib_device
*device
,
2989 return (port
>= rdma_start_port(device
) &&
2990 port
<= rdma_end_port(device
));
2993 static inline bool rdma_is_grh_required(const struct ib_device
*device
,
2996 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
2997 RDMA_CORE_PORT_IB_GRH_REQUIRED
;
3000 static inline bool rdma_protocol_ib(const struct ib_device
*device
, u8 port_num
)
3002 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3003 RDMA_CORE_CAP_PROT_IB
;
3006 static inline bool rdma_protocol_roce(const struct ib_device
*device
, u8 port_num
)
3008 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3009 (RDMA_CORE_CAP_PROT_ROCE
| RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
);
3012 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device
*device
, u8 port_num
)
3014 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3015 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
;
3018 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device
*device
, u8 port_num
)
3020 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3021 RDMA_CORE_CAP_PROT_ROCE
;
3024 static inline bool rdma_protocol_iwarp(const struct ib_device
*device
, u8 port_num
)
3026 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3027 RDMA_CORE_CAP_PROT_IWARP
;
3030 static inline bool rdma_ib_or_roce(const struct ib_device
*device
, u8 port_num
)
3032 return rdma_protocol_ib(device
, port_num
) ||
3033 rdma_protocol_roce(device
, port_num
);
3036 static inline bool rdma_protocol_raw_packet(const struct ib_device
*device
, u8 port_num
)
3038 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3039 RDMA_CORE_CAP_PROT_RAW_PACKET
;
3042 static inline bool rdma_protocol_usnic(const struct ib_device
*device
, u8 port_num
)
3044 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3045 RDMA_CORE_CAP_PROT_USNIC
;
3049 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3050 * Management Datagrams.
3051 * @device: Device to check
3052 * @port_num: Port number to check
3054 * Management Datagrams (MAD) are a required part of the InfiniBand
3055 * specification and are supported on all InfiniBand devices. A slightly
3056 * extended version are also supported on OPA interfaces.
3058 * Return: true if the port supports sending/receiving of MAD packets.
3060 static inline bool rdma_cap_ib_mad(const struct ib_device
*device
, u8 port_num
)
3062 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3063 RDMA_CORE_CAP_IB_MAD
;
3067 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3068 * Management Datagrams.
3069 * @device: Device to check
3070 * @port_num: Port number to check
3072 * Intel OmniPath devices extend and/or replace the InfiniBand Management
3073 * datagrams with their own versions. These OPA MADs share many but not all of
3074 * the characteristics of InfiniBand MADs.
3076 * OPA MADs differ in the following ways:
3078 * 1) MADs are variable size up to 2K
3079 * IBTA defined MADs remain fixed at 256 bytes
3080 * 2) OPA SMPs must carry valid PKeys
3081 * 3) OPA SMP packets are a different format
3083 * Return: true if the port supports OPA MAD packet formats.
3085 static inline bool rdma_cap_opa_mad(struct ib_device
*device
, u8 port_num
)
3087 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3088 RDMA_CORE_CAP_OPA_MAD
;
3092 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3093 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3094 * @device: Device to check
3095 * @port_num: Port number to check
3097 * Each InfiniBand node is required to provide a Subnet Management Agent
3098 * that the subnet manager can access. Prior to the fabric being fully
3099 * configured by the subnet manager, the SMA is accessed via a well known
3100 * interface called the Subnet Management Interface (SMI). This interface
3101 * uses directed route packets to communicate with the SM to get around the
3102 * chicken and egg problem of the SM needing to know what's on the fabric
3103 * in order to configure the fabric, and needing to configure the fabric in
3104 * order to send packets to the devices on the fabric. These directed
3105 * route packets do not need the fabric fully configured in order to reach
3106 * their destination. The SMI is the only method allowed to send
3107 * directed route packets on an InfiniBand fabric.
3109 * Return: true if the port provides an SMI.
3111 static inline bool rdma_cap_ib_smi(const struct ib_device
*device
, u8 port_num
)
3113 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3114 RDMA_CORE_CAP_IB_SMI
;
3118 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3119 * Communication Manager.
3120 * @device: Device to check
3121 * @port_num: Port number to check
3123 * The InfiniBand Communication Manager is one of many pre-defined General
3124 * Service Agents (GSA) that are accessed via the General Service
3125 * Interface (GSI). It's role is to facilitate establishment of connections
3126 * between nodes as well as other management related tasks for established
3129 * Return: true if the port supports an IB CM (this does not guarantee that
3130 * a CM is actually running however).
3132 static inline bool rdma_cap_ib_cm(const struct ib_device
*device
, u8 port_num
)
3134 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3135 RDMA_CORE_CAP_IB_CM
;
3139 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3140 * Communication Manager.
3141 * @device: Device to check
3142 * @port_num: Port number to check
3144 * Similar to above, but specific to iWARP connections which have a different
3145 * managment protocol than InfiniBand.
3147 * Return: true if the port supports an iWARP CM (this does not guarantee that
3148 * a CM is actually running however).
3150 static inline bool rdma_cap_iw_cm(const struct ib_device
*device
, u8 port_num
)
3152 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3153 RDMA_CORE_CAP_IW_CM
;
3157 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3158 * Subnet Administration.
3159 * @device: Device to check
3160 * @port_num: Port number to check
3162 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3163 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
3164 * fabrics, devices should resolve routes to other hosts by contacting the
3165 * SA to query the proper route.
3167 * Return: true if the port should act as a client to the fabric Subnet
3168 * Administration interface. This does not imply that the SA service is
3171 static inline bool rdma_cap_ib_sa(const struct ib_device
*device
, u8 port_num
)
3173 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3174 RDMA_CORE_CAP_IB_SA
;
3178 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3180 * @device: Device to check
3181 * @port_num: Port number to check
3183 * InfiniBand multicast registration is more complex than normal IPv4 or
3184 * IPv6 multicast registration. Each Host Channel Adapter must register
3185 * with the Subnet Manager when it wishes to join a multicast group. It
3186 * should do so only once regardless of how many queue pairs it subscribes
3187 * to this group. And it should leave the group only after all queue pairs
3188 * attached to the group have been detached.
3190 * Return: true if the port must undertake the additional adminstrative
3191 * overhead of registering/unregistering with the SM and tracking of the
3192 * total number of queue pairs attached to the multicast group.
3194 static inline bool rdma_cap_ib_mcast(const struct ib_device
*device
, u8 port_num
)
3196 return rdma_cap_ib_sa(device
, port_num
);
3200 * rdma_cap_af_ib - Check if the port of device has the capability
3201 * Native Infiniband Address.
3202 * @device: Device to check
3203 * @port_num: Port number to check
3205 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3206 * GID. RoCE uses a different mechanism, but still generates a GID via
3207 * a prescribed mechanism and port specific data.
3209 * Return: true if the port uses a GID address to identify devices on the
3212 static inline bool rdma_cap_af_ib(const struct ib_device
*device
, u8 port_num
)
3214 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3215 RDMA_CORE_CAP_AF_IB
;
3219 * rdma_cap_eth_ah - Check if the port of device has the capability
3220 * Ethernet Address Handle.
3221 * @device: Device to check
3222 * @port_num: Port number to check
3224 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3225 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3226 * port. Normally, packet headers are generated by the sending host
3227 * adapter, but when sending connectionless datagrams, we must manually
3228 * inject the proper headers for the fabric we are communicating over.
3230 * Return: true if we are running as a RoCE port and must force the
3231 * addition of a Global Route Header built from our Ethernet Address
3232 * Handle into our header list for connectionless packets.
3234 static inline bool rdma_cap_eth_ah(const struct ib_device
*device
, u8 port_num
)
3236 return device
->port_data
[port_num
].immutable
.core_cap_flags
&
3237 RDMA_CORE_CAP_ETH_AH
;
3241 * rdma_cap_opa_ah - Check if the port of device supports
3242 * OPA Address handles
3243 * @device: Device to check
3244 * @port_num: Port number to check
3246 * Return: true if we are running on an OPA device which supports
3247 * the extended OPA addressing.
3249 static inline bool rdma_cap_opa_ah(struct ib_device
*device
, u8 port_num
)
3251 return (device
->port_data
[port_num
].immutable
.core_cap_flags
&
3252 RDMA_CORE_CAP_OPA_AH
) == RDMA_CORE_CAP_OPA_AH
;
3256 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3259 * @port_num: Port number
3261 * This MAD size includes the MAD headers and MAD payload. No other headers
3264 * Return the max MAD size required by the Port. Will return 0 if the port
3265 * does not support MADs
3267 static inline size_t rdma_max_mad_size(const struct ib_device
*device
, u8 port_num
)
3269 return device
->port_data
[port_num
].immutable
.max_mad_size
;
3273 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3274 * @device: Device to check
3275 * @port_num: Port number to check
3277 * RoCE GID table mechanism manages the various GIDs for a device.
3279 * NOTE: if allocating the port's GID table has failed, this call will still
3280 * return true, but any RoCE GID table API will fail.
3282 * Return: true if the port uses RoCE GID table mechanism in order to manage
3285 static inline bool rdma_cap_roce_gid_table(const struct ib_device
*device
,
3288 return rdma_protocol_roce(device
, port_num
) &&
3289 device
->ops
.add_gid
&& device
->ops
.del_gid
;
3293 * Check if the device supports READ W/ INVALIDATE.
3295 static inline bool rdma_cap_read_inv(struct ib_device
*dev
, u32 port_num
)
3298 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
3299 * has support for it yet.
3301 return rdma_protocol_iwarp(dev
, port_num
);
3305 * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3307 * @port_num: 1 based Port number
3309 * Return true if port is an Intel OPA port , false if not
3311 static inline bool rdma_core_cap_opa_port(struct ib_device
*device
,
3314 return (device
->port_data
[port_num
].immutable
.core_cap_flags
&
3315 RDMA_CORE_PORT_INTEL_OPA
) == RDMA_CORE_PORT_INTEL_OPA
;
3319 * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3321 * @port_num: Port number
3322 * @mtu: enum value of MTU
3324 * Return the MTU size supported by the port as an integer value. Will return
3325 * -1 if enum value of mtu is not supported.
3327 static inline int rdma_mtu_enum_to_int(struct ib_device
*device
, u8 port
,
3330 if (rdma_core_cap_opa_port(device
, port
))
3331 return opa_mtu_enum_to_int((enum opa_mtu
)mtu
);
3333 return ib_mtu_enum_to_int((enum ib_mtu
)mtu
);
3337 * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3339 * @port_num: Port number
3340 * @attr: port attribute
3342 * Return the MTU size supported by the port as an integer value.
3344 static inline int rdma_mtu_from_attr(struct ib_device
*device
, u8 port
,
3345 struct ib_port_attr
*attr
)
3347 if (rdma_core_cap_opa_port(device
, port
))
3348 return attr
->phys_mtu
;
3350 return ib_mtu_enum_to_int(attr
->max_mtu
);
3353 int ib_set_vf_link_state(struct ib_device
*device
, int vf
, u8 port
,
3355 int ib_get_vf_config(struct ib_device
*device
, int vf
, u8 port
,
3356 struct ifla_vf_info
*info
);
3357 int ib_get_vf_stats(struct ib_device
*device
, int vf
, u8 port
,
3358 struct ifla_vf_stats
*stats
);
3359 int ib_get_vf_guid(struct ib_device
*device
, int vf
, u8 port
,
3360 struct ifla_vf_guid
*node_guid
,
3361 struct ifla_vf_guid
*port_guid
);
3362 int ib_set_vf_guid(struct ib_device
*device
, int vf
, u8 port
, u64 guid
,
3365 int ib_query_pkey(struct ib_device
*device
,
3366 u8 port_num
, u16 index
, u16
*pkey
);
3368 int ib_modify_device(struct ib_device
*device
,
3369 int device_modify_mask
,
3370 struct ib_device_modify
*device_modify
);
3372 int ib_modify_port(struct ib_device
*device
,
3373 u8 port_num
, int port_modify_mask
,
3374 struct ib_port_modify
*port_modify
);
3376 int ib_find_gid(struct ib_device
*device
, union ib_gid
*gid
,
3377 u8
*port_num
, u16
*index
);
3379 int ib_find_pkey(struct ib_device
*device
,
3380 u8 port_num
, u16 pkey
, u16
*index
);
3384 * Create a memory registration for all memory in the system and place
3385 * the rkey for it into pd->unsafe_global_rkey. This can be used by
3386 * ULPs to avoid the overhead of dynamic MRs.
3388 * This flag is generally considered unsafe and must only be used in
3389 * extremly trusted environments. Every use of it will log a warning
3390 * in the kernel log.
3392 IB_PD_UNSAFE_GLOBAL_RKEY
= 0x01,
3395 struct ib_pd
*__ib_alloc_pd(struct ib_device
*device
, unsigned int flags
,
3396 const char *caller
);
3399 * ib_alloc_pd - Allocates an unused protection domain.
3400 * @device: The device on which to allocate the protection domain.
3401 * @flags: protection domain flags
3403 * A protection domain object provides an association between QPs, shared
3404 * receive queues, address handles, memory regions, and memory windows.
3406 * Every PD has a local_dma_lkey which can be used as the lkey value for local
3407 * memory operations.
3409 #define ib_alloc_pd(device, flags) \
3410 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3412 int ib_dealloc_pd_user(struct ib_pd
*pd
, struct ib_udata
*udata
);
3415 * ib_dealloc_pd - Deallocate kernel PD
3416 * @pd: The protection domain
3418 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3420 static inline void ib_dealloc_pd(struct ib_pd
*pd
)
3422 int ret
= ib_dealloc_pd_user(pd
, NULL
);
3424 WARN_ONCE(ret
, "Destroy of kernel PD shouldn't fail");
3427 enum rdma_create_ah_flags
{
3428 /* In a sleepable context */
3429 RDMA_CREATE_AH_SLEEPABLE
= BIT(0),
3433 * rdma_create_ah - Creates an address handle for the given address vector.
3434 * @pd: The protection domain associated with the address handle.
3435 * @ah_attr: The attributes of the address vector.
3436 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3438 * The address handle is used to reference a local or global destination
3439 * in all UD QP post sends.
3441 struct ib_ah
*rdma_create_ah(struct ib_pd
*pd
, struct rdma_ah_attr
*ah_attr
,
3445 * rdma_create_user_ah - Creates an address handle for the given address vector.
3446 * It resolves destination mac address for ah attribute of RoCE type.
3447 * @pd: The protection domain associated with the address handle.
3448 * @ah_attr: The attributes of the address vector.
3449 * @udata: pointer to user's input output buffer information need by
3452 * It returns 0 on success and returns appropriate error code on error.
3453 * The address handle is used to reference a local or global destination
3454 * in all UD QP post sends.
3456 struct ib_ah
*rdma_create_user_ah(struct ib_pd
*pd
,
3457 struct rdma_ah_attr
*ah_attr
,
3458 struct ib_udata
*udata
);
3460 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3462 * @hdr: the L3 header to parse
3463 * @net_type: type of header to parse
3464 * @sgid: place to store source gid
3465 * @dgid: place to store destination gid
3467 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr
*hdr
,
3468 enum rdma_network_type net_type
,
3469 union ib_gid
*sgid
, union ib_gid
*dgid
);
3472 * ib_get_rdma_header_version - Get the header version
3473 * @hdr: the L3 header to parse
3475 int ib_get_rdma_header_version(const union rdma_network_hdr
*hdr
);
3478 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3480 * @device: Device on which the received message arrived.
3481 * @port_num: Port on which the received message arrived.
3482 * @wc: Work completion associated with the received message.
3483 * @grh: References the received global route header. This parameter is
3484 * ignored unless the work completion indicates that the GRH is valid.
3485 * @ah_attr: Returned attributes that can be used when creating an address
3486 * handle for replying to the message.
3487 * When ib_init_ah_attr_from_wc() returns success,
3488 * (a) for IB link layer it optionally contains a reference to SGID attribute
3489 * when GRH is present for IB link layer.
3490 * (b) for RoCE link layer it contains a reference to SGID attribute.
3491 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3492 * attributes which are initialized using ib_init_ah_attr_from_wc().
3495 int ib_init_ah_attr_from_wc(struct ib_device
*device
, u8 port_num
,
3496 const struct ib_wc
*wc
, const struct ib_grh
*grh
,
3497 struct rdma_ah_attr
*ah_attr
);
3500 * ib_create_ah_from_wc - Creates an address handle associated with the
3501 * sender of the specified work completion.
3502 * @pd: The protection domain associated with the address handle.
3503 * @wc: Work completion information associated with a received message.
3504 * @grh: References the received global route header. This parameter is
3505 * ignored unless the work completion indicates that the GRH is valid.
3506 * @port_num: The outbound port number to associate with the address.
3508 * The address handle is used to reference a local or global destination
3509 * in all UD QP post sends.
3511 struct ib_ah
*ib_create_ah_from_wc(struct ib_pd
*pd
, const struct ib_wc
*wc
,
3512 const struct ib_grh
*grh
, u8 port_num
);
3515 * rdma_modify_ah - Modifies the address vector associated with an address
3517 * @ah: The address handle to modify.
3518 * @ah_attr: The new address vector attributes to associate with the
3521 int rdma_modify_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
3524 * rdma_query_ah - Queries the address vector associated with an address
3526 * @ah: The address handle to query.
3527 * @ah_attr: The address vector attributes associated with the address
3530 int rdma_query_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
);
3532 enum rdma_destroy_ah_flags
{
3533 /* In a sleepable context */
3534 RDMA_DESTROY_AH_SLEEPABLE
= BIT(0),
3538 * rdma_destroy_ah_user - Destroys an address handle.
3539 * @ah: The address handle to destroy.
3540 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3541 * @udata: Valid user data or NULL for kernel objects
3543 int rdma_destroy_ah_user(struct ib_ah
*ah
, u32 flags
, struct ib_udata
*udata
);
3546 * rdma_destroy_ah - Destroys an kernel address handle.
3547 * @ah: The address handle to destroy.
3548 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3550 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3552 static inline void rdma_destroy_ah(struct ib_ah
*ah
, u32 flags
)
3554 int ret
= rdma_destroy_ah_user(ah
, flags
, NULL
);
3556 WARN_ONCE(ret
, "Destroy of kernel AH shouldn't fail");
3559 struct ib_srq
*ib_create_srq_user(struct ib_pd
*pd
,
3560 struct ib_srq_init_attr
*srq_init_attr
,
3561 struct ib_usrq_object
*uobject
,
3562 struct ib_udata
*udata
);
3563 static inline struct ib_srq
*
3564 ib_create_srq(struct ib_pd
*pd
, struct ib_srq_init_attr
*srq_init_attr
)
3566 if (!pd
->device
->ops
.create_srq
)
3567 return ERR_PTR(-EOPNOTSUPP
);
3569 return ib_create_srq_user(pd
, srq_init_attr
, NULL
, NULL
);
3573 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3574 * @srq: The SRQ to modify.
3575 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
3576 * the current values of selected SRQ attributes are returned.
3577 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3578 * are being modified.
3580 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3581 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3582 * the number of receives queued drops below the limit.
3584 int ib_modify_srq(struct ib_srq
*srq
,
3585 struct ib_srq_attr
*srq_attr
,
3586 enum ib_srq_attr_mask srq_attr_mask
);
3589 * ib_query_srq - Returns the attribute list and current values for the
3591 * @srq: The SRQ to query.
3592 * @srq_attr: The attributes of the specified SRQ.
3594 int ib_query_srq(struct ib_srq
*srq
,
3595 struct ib_srq_attr
*srq_attr
);
3598 * ib_destroy_srq_user - Destroys the specified SRQ.
3599 * @srq: The SRQ to destroy.
3600 * @udata: Valid user data or NULL for kernel objects
3602 int ib_destroy_srq_user(struct ib_srq
*srq
, struct ib_udata
*udata
);
3605 * ib_destroy_srq - Destroys the specified kernel SRQ.
3606 * @srq: The SRQ to destroy.
3608 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3610 static inline void ib_destroy_srq(struct ib_srq
*srq
)
3612 int ret
= ib_destroy_srq_user(srq
, NULL
);
3614 WARN_ONCE(ret
, "Destroy of kernel SRQ shouldn't fail");
3618 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3619 * @srq: The SRQ to post the work request on.
3620 * @recv_wr: A list of work requests to post on the receive queue.
3621 * @bad_recv_wr: On an immediate failure, this parameter will reference
3622 * the work request that failed to be posted on the QP.
3624 static inline int ib_post_srq_recv(struct ib_srq
*srq
,
3625 const struct ib_recv_wr
*recv_wr
,
3626 const struct ib_recv_wr
**bad_recv_wr
)
3628 const struct ib_recv_wr
*dummy
;
3630 return srq
->device
->ops
.post_srq_recv(srq
, recv_wr
,
3631 bad_recv_wr
? : &dummy
);
3634 struct ib_qp
*ib_create_named_qp(struct ib_pd
*pd
,
3635 struct ib_qp_init_attr
*qp_init_attr
,
3636 const char *caller
);
3637 static inline struct ib_qp
*ib_create_qp(struct ib_pd
*pd
,
3638 struct ib_qp_init_attr
*init_attr
)
3640 return ib_create_named_qp(pd
, init_attr
, KBUILD_MODNAME
);
3644 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3645 * @qp: The QP to modify.
3646 * @attr: On input, specifies the QP attributes to modify. On output,
3647 * the current values of selected QP attributes are returned.
3648 * @attr_mask: A bit-mask used to specify which attributes of the QP
3649 * are being modified.
3650 * @udata: pointer to user's input output buffer information
3651 * are being modified.
3652 * It returns 0 on success and returns appropriate error code on error.
3654 int ib_modify_qp_with_udata(struct ib_qp
*qp
,
3655 struct ib_qp_attr
*attr
,
3657 struct ib_udata
*udata
);
3660 * ib_modify_qp - Modifies the attributes for the specified QP and then
3661 * transitions the QP to the given state.
3662 * @qp: The QP to modify.
3663 * @qp_attr: On input, specifies the QP attributes to modify. On output,
3664 * the current values of selected QP attributes are returned.
3665 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3666 * are being modified.
3668 int ib_modify_qp(struct ib_qp
*qp
,
3669 struct ib_qp_attr
*qp_attr
,
3673 * ib_query_qp - Returns the attribute list and current values for the
3675 * @qp: The QP to query.
3676 * @qp_attr: The attributes of the specified QP.
3677 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3678 * @qp_init_attr: Additional attributes of the selected QP.
3680 * The qp_attr_mask may be used to limit the query to gathering only the
3681 * selected attributes.
3683 int ib_query_qp(struct ib_qp
*qp
,
3684 struct ib_qp_attr
*qp_attr
,
3686 struct ib_qp_init_attr
*qp_init_attr
);
3689 * ib_destroy_qp - Destroys the specified QP.
3690 * @qp: The QP to destroy.
3691 * @udata: Valid udata or NULL for kernel objects
3693 int ib_destroy_qp_user(struct ib_qp
*qp
, struct ib_udata
*udata
);
3696 * ib_destroy_qp - Destroys the specified kernel QP.
3697 * @qp: The QP to destroy.
3699 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3701 static inline int ib_destroy_qp(struct ib_qp
*qp
)
3703 return ib_destroy_qp_user(qp
, NULL
);
3707 * ib_open_qp - Obtain a reference to an existing sharable QP.
3708 * @xrcd - XRC domain
3709 * @qp_open_attr: Attributes identifying the QP to open.
3711 * Returns a reference to a sharable QP.
3713 struct ib_qp
*ib_open_qp(struct ib_xrcd
*xrcd
,
3714 struct ib_qp_open_attr
*qp_open_attr
);
3717 * ib_close_qp - Release an external reference to a QP.
3718 * @qp: The QP handle to release
3720 * The opened QP handle is released by the caller. The underlying
3721 * shared QP is not destroyed until all internal references are released.
3723 int ib_close_qp(struct ib_qp
*qp
);
3726 * ib_post_send - Posts a list of work requests to the send queue of
3728 * @qp: The QP to post the work request on.
3729 * @send_wr: A list of work requests to post on the send queue.
3730 * @bad_send_wr: On an immediate failure, this parameter will reference
3731 * the work request that failed to be posted on the QP.
3733 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3734 * error is returned, the QP state shall not be affected,
3735 * ib_post_send() will return an immediate error after queueing any
3736 * earlier work requests in the list.
3738 static inline int ib_post_send(struct ib_qp
*qp
,
3739 const struct ib_send_wr
*send_wr
,
3740 const struct ib_send_wr
**bad_send_wr
)
3742 const struct ib_send_wr
*dummy
;
3744 return qp
->device
->ops
.post_send(qp
, send_wr
, bad_send_wr
? : &dummy
);
3748 * ib_post_recv - Posts a list of work requests to the receive queue of
3750 * @qp: The QP to post the work request on.
3751 * @recv_wr: A list of work requests to post on the receive queue.
3752 * @bad_recv_wr: On an immediate failure, this parameter will reference
3753 * the work request that failed to be posted on the QP.
3755 static inline int ib_post_recv(struct ib_qp
*qp
,
3756 const struct ib_recv_wr
*recv_wr
,
3757 const struct ib_recv_wr
**bad_recv_wr
)
3759 const struct ib_recv_wr
*dummy
;
3761 return qp
->device
->ops
.post_recv(qp
, recv_wr
, bad_recv_wr
? : &dummy
);
3764 struct ib_cq
*__ib_alloc_cq(struct ib_device
*dev
, void *private, int nr_cqe
,
3765 int comp_vector
, enum ib_poll_context poll_ctx
,
3766 const char *caller
);
3767 static inline struct ib_cq
*ib_alloc_cq(struct ib_device
*dev
, void *private,
3768 int nr_cqe
, int comp_vector
,
3769 enum ib_poll_context poll_ctx
)
3771 return __ib_alloc_cq(dev
, private, nr_cqe
, comp_vector
, poll_ctx
,
3775 struct ib_cq
*__ib_alloc_cq_any(struct ib_device
*dev
, void *private,
3776 int nr_cqe
, enum ib_poll_context poll_ctx
,
3777 const char *caller
);
3780 * ib_alloc_cq_any: Allocate kernel CQ
3781 * @dev: The IB device
3782 * @private: Private data attached to the CQE
3783 * @nr_cqe: Number of CQEs in the CQ
3784 * @poll_ctx: Context used for polling the CQ
3786 static inline struct ib_cq
*ib_alloc_cq_any(struct ib_device
*dev
,
3787 void *private, int nr_cqe
,
3788 enum ib_poll_context poll_ctx
)
3790 return __ib_alloc_cq_any(dev
, private, nr_cqe
, poll_ctx
,
3794 void ib_free_cq(struct ib_cq
*cq
);
3795 int ib_process_cq_direct(struct ib_cq
*cq
, int budget
);
3798 * ib_create_cq - Creates a CQ on the specified device.
3799 * @device: The device on which to create the CQ.
3800 * @comp_handler: A user-specified callback that is invoked when a
3801 * completion event occurs on the CQ.
3802 * @event_handler: A user-specified callback that is invoked when an
3803 * asynchronous event not associated with a completion occurs on the CQ.
3804 * @cq_context: Context associated with the CQ returned to the user via
3805 * the associated completion and event handlers.
3806 * @cq_attr: The attributes the CQ should be created upon.
3808 * Users can examine the cq structure to determine the actual CQ size.
3810 struct ib_cq
*__ib_create_cq(struct ib_device
*device
,
3811 ib_comp_handler comp_handler
,
3812 void (*event_handler
)(struct ib_event
*, void *),
3814 const struct ib_cq_init_attr
*cq_attr
,
3815 const char *caller
);
3816 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3817 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3820 * ib_resize_cq - Modifies the capacity of the CQ.
3821 * @cq: The CQ to resize.
3822 * @cqe: The minimum size of the CQ.
3824 * Users can examine the cq structure to determine the actual CQ size.
3826 int ib_resize_cq(struct ib_cq
*cq
, int cqe
);
3829 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3830 * @cq: The CQ to modify.
3831 * @cq_count: number of CQEs that will trigger an event
3832 * @cq_period: max period of time in usec before triggering an event
3835 int rdma_set_cq_moderation(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
);
3838 * ib_destroy_cq_user - Destroys the specified CQ.
3839 * @cq: The CQ to destroy.
3840 * @udata: Valid user data or NULL for kernel objects
3842 int ib_destroy_cq_user(struct ib_cq
*cq
, struct ib_udata
*udata
);
3845 * ib_destroy_cq - Destroys the specified kernel CQ.
3846 * @cq: The CQ to destroy.
3848 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3850 static inline void ib_destroy_cq(struct ib_cq
*cq
)
3852 int ret
= ib_destroy_cq_user(cq
, NULL
);
3854 WARN_ONCE(ret
, "Destroy of kernel CQ shouldn't fail");
3858 * ib_poll_cq - poll a CQ for completion(s)
3859 * @cq:the CQ being polled
3860 * @num_entries:maximum number of completions to return
3861 * @wc:array of at least @num_entries &struct ib_wc where completions
3864 * Poll a CQ for (possibly multiple) completions. If the return value
3865 * is < 0, an error occurred. If the return value is >= 0, it is the
3866 * number of completions returned. If the return value is
3867 * non-negative and < num_entries, then the CQ was emptied.
3869 static inline int ib_poll_cq(struct ib_cq
*cq
, int num_entries
,
3872 return cq
->device
->ops
.poll_cq(cq
, num_entries
, wc
);
3876 * ib_req_notify_cq - Request completion notification on a CQ.
3877 * @cq: The CQ to generate an event for.
3879 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3880 * to request an event on the next solicited event or next work
3881 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3882 * may also be |ed in to request a hint about missed events, as
3886 * < 0 means an error occurred while requesting notification
3887 * == 0 means notification was requested successfully, and if
3888 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3889 * were missed and it is safe to wait for another event. In
3890 * this case is it guaranteed that any work completions added
3891 * to the CQ since the last CQ poll will trigger a completion
3892 * notification event.
3893 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3894 * in. It means that the consumer must poll the CQ again to
3895 * make sure it is empty to avoid missing an event because of a
3896 * race between requesting notification and an entry being
3897 * added to the CQ. This return value means it is possible
3898 * (but not guaranteed) that a work completion has been added
3899 * to the CQ since the last poll without triggering a
3900 * completion notification event.
3902 static inline int ib_req_notify_cq(struct ib_cq
*cq
,
3903 enum ib_cq_notify_flags flags
)
3905 return cq
->device
->ops
.req_notify_cq(cq
, flags
);
3908 struct ib_cq
*ib_cq_pool_get(struct ib_device
*dev
, unsigned int nr_cqe
,
3909 int comp_vector_hint
,
3910 enum ib_poll_context poll_ctx
);
3912 void ib_cq_pool_put(struct ib_cq
*cq
, unsigned int nr_cqe
);
3915 * ib_req_ncomp_notif - Request completion notification when there are
3916 * at least the specified number of unreaped completions on the CQ.
3917 * @cq: The CQ to generate an event for.
3918 * @wc_cnt: The number of unreaped completions that should be on the
3919 * CQ before an event is generated.
3921 static inline int ib_req_ncomp_notif(struct ib_cq
*cq
, int wc_cnt
)
3923 return cq
->device
->ops
.req_ncomp_notif
?
3924 cq
->device
->ops
.req_ncomp_notif(cq
, wc_cnt
) :
3929 * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
3930 * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
3931 * address into the dma address.
3933 static inline bool ib_uses_virt_dma(struct ib_device
*dev
)
3935 return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA
) && !dev
->dma_device
;
3939 * ib_dma_mapping_error - check a DMA addr for error
3940 * @dev: The device for which the dma_addr was created
3941 * @dma_addr: The DMA address to check
3943 static inline int ib_dma_mapping_error(struct ib_device
*dev
, u64 dma_addr
)
3945 if (ib_uses_virt_dma(dev
))
3947 return dma_mapping_error(dev
->dma_device
, dma_addr
);
3951 * ib_dma_map_single - Map a kernel virtual address to DMA address
3952 * @dev: The device for which the dma_addr is to be created
3953 * @cpu_addr: The kernel virtual address
3954 * @size: The size of the region in bytes
3955 * @direction: The direction of the DMA
3957 static inline u64
ib_dma_map_single(struct ib_device
*dev
,
3958 void *cpu_addr
, size_t size
,
3959 enum dma_data_direction direction
)
3961 if (ib_uses_virt_dma(dev
))
3962 return (uintptr_t)cpu_addr
;
3963 return dma_map_single(dev
->dma_device
, cpu_addr
, size
, direction
);
3967 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3968 * @dev: The device for which the DMA address was created
3969 * @addr: The DMA address
3970 * @size: The size of the region in bytes
3971 * @direction: The direction of the DMA
3973 static inline void ib_dma_unmap_single(struct ib_device
*dev
,
3974 u64 addr
, size_t size
,
3975 enum dma_data_direction direction
)
3977 if (!ib_uses_virt_dma(dev
))
3978 dma_unmap_single(dev
->dma_device
, addr
, size
, direction
);
3982 * ib_dma_map_page - Map a physical page to DMA address
3983 * @dev: The device for which the dma_addr is to be created
3984 * @page: The page to be mapped
3985 * @offset: The offset within the page
3986 * @size: The size of the region in bytes
3987 * @direction: The direction of the DMA
3989 static inline u64
ib_dma_map_page(struct ib_device
*dev
,
3991 unsigned long offset
,
3993 enum dma_data_direction direction
)
3995 if (ib_uses_virt_dma(dev
))
3996 return (uintptr_t)(page_address(page
) + offset
);
3997 return dma_map_page(dev
->dma_device
, page
, offset
, size
, direction
);
4001 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4002 * @dev: The device for which the DMA address was created
4003 * @addr: The DMA address
4004 * @size: The size of the region in bytes
4005 * @direction: The direction of the DMA
4007 static inline void ib_dma_unmap_page(struct ib_device
*dev
,
4008 u64 addr
, size_t size
,
4009 enum dma_data_direction direction
)
4011 if (!ib_uses_virt_dma(dev
))
4012 dma_unmap_page(dev
->dma_device
, addr
, size
, direction
);
4015 int ib_dma_virt_map_sg(struct ib_device
*dev
, struct scatterlist
*sg
, int nents
);
4016 static inline int ib_dma_map_sg_attrs(struct ib_device
*dev
,
4017 struct scatterlist
*sg
, int nents
,
4018 enum dma_data_direction direction
,
4019 unsigned long dma_attrs
)
4021 if (ib_uses_virt_dma(dev
))
4022 return ib_dma_virt_map_sg(dev
, sg
, nents
);
4023 return dma_map_sg_attrs(dev
->dma_device
, sg
, nents
, direction
,
4027 static inline void ib_dma_unmap_sg_attrs(struct ib_device
*dev
,
4028 struct scatterlist
*sg
, int nents
,
4029 enum dma_data_direction direction
,
4030 unsigned long dma_attrs
)
4032 if (!ib_uses_virt_dma(dev
))
4033 dma_unmap_sg_attrs(dev
->dma_device
, sg
, nents
, direction
,
4038 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4039 * @dev: The device for which the DMA addresses are to be created
4040 * @sg: The array of scatter/gather entries
4041 * @nents: The number of scatter/gather entries
4042 * @direction: The direction of the DMA
4044 static inline int ib_dma_map_sg(struct ib_device
*dev
,
4045 struct scatterlist
*sg
, int nents
,
4046 enum dma_data_direction direction
)
4048 return ib_dma_map_sg_attrs(dev
, sg
, nents
, direction
, 0);
4052 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4053 * @dev: The device for which the DMA addresses were created
4054 * @sg: The array of scatter/gather entries
4055 * @nents: The number of scatter/gather entries
4056 * @direction: The direction of the DMA
4058 static inline void ib_dma_unmap_sg(struct ib_device
*dev
,
4059 struct scatterlist
*sg
, int nents
,
4060 enum dma_data_direction direction
)
4062 ib_dma_unmap_sg_attrs(dev
, sg
, nents
, direction
, 0);
4066 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4067 * @dev: The device to query
4069 * The returned value represents a size in bytes.
4071 static inline unsigned int ib_dma_max_seg_size(struct ib_device
*dev
)
4073 if (ib_uses_virt_dma(dev
))
4075 return dma_get_max_seg_size(dev
->dma_device
);
4079 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4080 * @dev: The device for which the DMA address was created
4081 * @addr: The DMA address
4082 * @size: The size of the region in bytes
4083 * @dir: The direction of the DMA
4085 static inline void ib_dma_sync_single_for_cpu(struct ib_device
*dev
,
4088 enum dma_data_direction dir
)
4090 if (!ib_uses_virt_dma(dev
))
4091 dma_sync_single_for_cpu(dev
->dma_device
, addr
, size
, dir
);
4095 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4096 * @dev: The device for which the DMA address was created
4097 * @addr: The DMA address
4098 * @size: The size of the region in bytes
4099 * @dir: The direction of the DMA
4101 static inline void ib_dma_sync_single_for_device(struct ib_device
*dev
,
4104 enum dma_data_direction dir
)
4106 if (!ib_uses_virt_dma(dev
))
4107 dma_sync_single_for_device(dev
->dma_device
, addr
, size
, dir
);
4110 /* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4111 * space. This function should be called when 'current' is the owning MM.
4113 struct ib_mr
*ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
4114 u64 virt_addr
, int mr_access_flags
);
4116 /* ib_advise_mr - give an advice about an address range in a memory region */
4117 int ib_advise_mr(struct ib_pd
*pd
, enum ib_uverbs_advise_mr_advice advice
,
4118 u32 flags
, struct ib_sge
*sg_list
, u32 num_sge
);
4120 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4121 * HCA translation table.
4122 * @mr: The memory region to deregister.
4123 * @udata: Valid user data or NULL for kernel object
4125 * This function can fail, if the memory region has memory windows bound to it.
4127 int ib_dereg_mr_user(struct ib_mr
*mr
, struct ib_udata
*udata
);
4130 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4131 * HCA translation table.
4132 * @mr: The memory region to deregister.
4134 * This function can fail, if the memory region has memory windows bound to it.
4136 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4138 static inline int ib_dereg_mr(struct ib_mr
*mr
)
4140 return ib_dereg_mr_user(mr
, NULL
);
4143 struct ib_mr
*ib_alloc_mr(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
4146 struct ib_mr
*ib_alloc_mr_integrity(struct ib_pd
*pd
,
4147 u32 max_num_data_sg
,
4148 u32 max_num_meta_sg
);
4151 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4153 * @mr - struct ib_mr pointer to be updated.
4154 * @newkey - new key to be used.
4156 static inline void ib_update_fast_reg_key(struct ib_mr
*mr
, u8 newkey
)
4158 mr
->lkey
= (mr
->lkey
& 0xffffff00) | newkey
;
4159 mr
->rkey
= (mr
->rkey
& 0xffffff00) | newkey
;
4163 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4164 * for calculating a new rkey for type 2 memory windows.
4165 * @rkey - the rkey to increment.
4167 static inline u32
ib_inc_rkey(u32 rkey
)
4169 const u32 mask
= 0x000000ff;
4170 return ((rkey
+ 1) & mask
) | (rkey
& ~mask
);
4174 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4175 * @qp: QP to attach to the multicast group. The QP must be type
4177 * @gid: Multicast group GID.
4178 * @lid: Multicast group LID in host byte order.
4180 * In order to send and receive multicast packets, subnet
4181 * administration must have created the multicast group and configured
4182 * the fabric appropriately. The port associated with the specified
4183 * QP must also be a member of the multicast group.
4185 int ib_attach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
);
4188 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4189 * @qp: QP to detach from the multicast group.
4190 * @gid: Multicast group GID.
4191 * @lid: Multicast group LID in host byte order.
4193 int ib_detach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
);
4195 struct ib_xrcd
*ib_alloc_xrcd_user(struct ib_device
*device
,
4196 struct inode
*inode
, struct ib_udata
*udata
);
4197 int ib_dealloc_xrcd_user(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
);
4199 static inline int ib_check_mr_access(struct ib_device
*ib_dev
,
4203 * Local write permission is required if remote write or
4204 * remote atomic permission is also requested.
4206 if (flags
& (IB_ACCESS_REMOTE_ATOMIC
| IB_ACCESS_REMOTE_WRITE
) &&
4207 !(flags
& IB_ACCESS_LOCAL_WRITE
))
4210 if (flags
& ~IB_ACCESS_SUPPORTED
)
4213 if (flags
& IB_ACCESS_ON_DEMAND
&&
4214 !(ib_dev
->attrs
.device_cap_flags
& IB_DEVICE_ON_DEMAND_PAGING
))
4219 static inline bool ib_access_writable(int access_flags
)
4222 * We have writable memory backing the MR if any of the following
4223 * access flags are set. "Local write" and "remote write" obviously
4224 * require write access. "Remote atomic" can do things like fetch and
4225 * add, which will modify memory, and "MW bind" can change permissions
4226 * by binding a window.
4228 return access_flags
&
4229 (IB_ACCESS_LOCAL_WRITE
| IB_ACCESS_REMOTE_WRITE
|
4230 IB_ACCESS_REMOTE_ATOMIC
| IB_ACCESS_MW_BIND
);
4234 * ib_check_mr_status: lightweight check of MR status.
4235 * This routine may provide status checks on a selected
4236 * ib_mr. first use is for signature status check.
4238 * @mr: A memory region.
4239 * @check_mask: Bitmask of which checks to perform from
4240 * ib_mr_status_check enumeration.
4241 * @mr_status: The container of relevant status checks.
4242 * failed checks will be indicated in the status bitmask
4243 * and the relevant info shall be in the error item.
4245 int ib_check_mr_status(struct ib_mr
*mr
, u32 check_mask
,
4246 struct ib_mr_status
*mr_status
);
4249 * ib_device_try_get: Hold a registration lock
4250 * device: The device to lock
4252 * A device under an active registration lock cannot become unregistered. It
4253 * is only possible to obtain a registration lock on a device that is fully
4254 * registered, otherwise this function returns false.
4256 * The registration lock is only necessary for actions which require the
4257 * device to still be registered. Uses that only require the device pointer to
4258 * be valid should use get_device(&ibdev->dev) to hold the memory.
4261 static inline bool ib_device_try_get(struct ib_device
*dev
)
4263 return refcount_inc_not_zero(&dev
->refcount
);
4266 void ib_device_put(struct ib_device
*device
);
4267 struct ib_device
*ib_device_get_by_netdev(struct net_device
*ndev
,
4268 enum rdma_driver_id driver_id
);
4269 struct ib_device
*ib_device_get_by_name(const char *name
,
4270 enum rdma_driver_id driver_id
);
4271 struct net_device
*ib_get_net_dev_by_params(struct ib_device
*dev
, u8 port
,
4272 u16 pkey
, const union ib_gid
*gid
,
4273 const struct sockaddr
*addr
);
4274 int ib_device_set_netdev(struct ib_device
*ib_dev
, struct net_device
*ndev
,
4276 struct net_device
*ib_device_netdev(struct ib_device
*dev
, u8 port
);
4278 struct ib_wq
*ib_create_wq(struct ib_pd
*pd
,
4279 struct ib_wq_init_attr
*init_attr
);
4280 int ib_destroy_wq_user(struct ib_wq
*wq
, struct ib_udata
*udata
);
4281 int ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*attr
,
4284 int ib_map_mr_sg(struct ib_mr
*mr
, struct scatterlist
*sg
, int sg_nents
,
4285 unsigned int *sg_offset
, unsigned int page_size
);
4286 int ib_map_mr_sg_pi(struct ib_mr
*mr
, struct scatterlist
*data_sg
,
4287 int data_sg_nents
, unsigned int *data_sg_offset
,
4288 struct scatterlist
*meta_sg
, int meta_sg_nents
,
4289 unsigned int *meta_sg_offset
, unsigned int page_size
);
4292 ib_map_mr_sg_zbva(struct ib_mr
*mr
, struct scatterlist
*sg
, int sg_nents
,
4293 unsigned int *sg_offset
, unsigned int page_size
)
4297 n
= ib_map_mr_sg(mr
, sg
, sg_nents
, sg_offset
, page_size
);
4303 int ib_sg_to_pages(struct ib_mr
*mr
, struct scatterlist
*sgl
, int sg_nents
,
4304 unsigned int *sg_offset
, int (*set_page
)(struct ib_mr
*, u64
));
4306 void ib_drain_rq(struct ib_qp
*qp
);
4307 void ib_drain_sq(struct ib_qp
*qp
);
4308 void ib_drain_qp(struct ib_qp
*qp
);
4310 int ib_get_eth_speed(struct ib_device
*dev
, u8 port_num
, u16
*speed
, u8
*width
);
4312 static inline u8
*rdma_ah_retrieve_dmac(struct rdma_ah_attr
*attr
)
4314 if (attr
->type
== RDMA_AH_ATTR_TYPE_ROCE
)
4315 return attr
->roce
.dmac
;
4319 static inline void rdma_ah_set_dlid(struct rdma_ah_attr
*attr
, u32 dlid
)
4321 if (attr
->type
== RDMA_AH_ATTR_TYPE_IB
)
4322 attr
->ib
.dlid
= (u16
)dlid
;
4323 else if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4324 attr
->opa
.dlid
= dlid
;
4327 static inline u32
rdma_ah_get_dlid(const struct rdma_ah_attr
*attr
)
4329 if (attr
->type
== RDMA_AH_ATTR_TYPE_IB
)
4330 return attr
->ib
.dlid
;
4331 else if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4332 return attr
->opa
.dlid
;
4336 static inline void rdma_ah_set_sl(struct rdma_ah_attr
*attr
, u8 sl
)
4341 static inline u8
rdma_ah_get_sl(const struct rdma_ah_attr
*attr
)
4346 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr
*attr
,
4349 if (attr
->type
== RDMA_AH_ATTR_TYPE_IB
)
4350 attr
->ib
.src_path_bits
= src_path_bits
;
4351 else if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4352 attr
->opa
.src_path_bits
= src_path_bits
;
4355 static inline u8
rdma_ah_get_path_bits(const struct rdma_ah_attr
*attr
)
4357 if (attr
->type
== RDMA_AH_ATTR_TYPE_IB
)
4358 return attr
->ib
.src_path_bits
;
4359 else if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4360 return attr
->opa
.src_path_bits
;
4364 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr
*attr
,
4367 if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4368 attr
->opa
.make_grd
= make_grd
;
4371 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr
*attr
)
4373 if (attr
->type
== RDMA_AH_ATTR_TYPE_OPA
)
4374 return attr
->opa
.make_grd
;
4378 static inline void rdma_ah_set_port_num(struct rdma_ah_attr
*attr
, u8 port_num
)
4380 attr
->port_num
= port_num
;
4383 static inline u8
rdma_ah_get_port_num(const struct rdma_ah_attr
*attr
)
4385 return attr
->port_num
;
4388 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr
*attr
,
4391 attr
->static_rate
= static_rate
;
4394 static inline u8
rdma_ah_get_static_rate(const struct rdma_ah_attr
*attr
)
4396 return attr
->static_rate
;
4399 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr
*attr
,
4400 enum ib_ah_flags flag
)
4402 attr
->ah_flags
= flag
;
4405 static inline enum ib_ah_flags
4406 rdma_ah_get_ah_flags(const struct rdma_ah_attr
*attr
)
4408 return attr
->ah_flags
;
4411 static inline const struct ib_global_route
4412 *rdma_ah_read_grh(const struct rdma_ah_attr
*attr
)
4417 /*To retrieve and modify the grh */
4418 static inline struct ib_global_route
4419 *rdma_ah_retrieve_grh(struct rdma_ah_attr
*attr
)
4424 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr
*attr
, void *dgid
)
4426 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(attr
);
4428 memcpy(grh
->dgid
.raw
, dgid
, sizeof(grh
->dgid
));
4431 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr
*attr
,
4434 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(attr
);
4436 grh
->dgid
.global
.subnet_prefix
= prefix
;
4439 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr
*attr
,
4442 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(attr
);
4444 grh
->dgid
.global
.interface_id
= if_id
;
4447 static inline void rdma_ah_set_grh(struct rdma_ah_attr
*attr
,
4448 union ib_gid
*dgid
, u32 flow_label
,
4449 u8 sgid_index
, u8 hop_limit
,
4452 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(attr
);
4454 attr
->ah_flags
= IB_AH_GRH
;
4457 grh
->flow_label
= flow_label
;
4458 grh
->sgid_index
= sgid_index
;
4459 grh
->hop_limit
= hop_limit
;
4460 grh
->traffic_class
= traffic_class
;
4461 grh
->sgid_attr
= NULL
;
4464 void rdma_destroy_ah_attr(struct rdma_ah_attr
*ah_attr
);
4465 void rdma_move_grh_sgid_attr(struct rdma_ah_attr
*attr
, union ib_gid
*dgid
,
4466 u32 flow_label
, u8 hop_limit
, u8 traffic_class
,
4467 const struct ib_gid_attr
*sgid_attr
);
4468 void rdma_copy_ah_attr(struct rdma_ah_attr
*dest
,
4469 const struct rdma_ah_attr
*src
);
4470 void rdma_replace_ah_attr(struct rdma_ah_attr
*old
,
4471 const struct rdma_ah_attr
*new);
4472 void rdma_move_ah_attr(struct rdma_ah_attr
*dest
, struct rdma_ah_attr
*src
);
4475 * rdma_ah_find_type - Return address handle type.
4477 * @dev: Device to be checked
4478 * @port_num: Port number
4480 static inline enum rdma_ah_attr_type
rdma_ah_find_type(struct ib_device
*dev
,
4483 if (rdma_protocol_roce(dev
, port_num
))
4484 return RDMA_AH_ATTR_TYPE_ROCE
;
4485 if (rdma_protocol_ib(dev
, port_num
)) {
4486 if (rdma_cap_opa_ah(dev
, port_num
))
4487 return RDMA_AH_ATTR_TYPE_OPA
;
4488 return RDMA_AH_ATTR_TYPE_IB
;
4491 return RDMA_AH_ATTR_TYPE_UNDEFINED
;
4495 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4496 * In the current implementation the only way to get
4497 * get the 32bit lid is from other sources for OPA.
4498 * For IB, lids will always be 16bits so cast the
4499 * value accordingly.
4503 static inline u16
ib_lid_cpu16(u32 lid
)
4505 WARN_ON_ONCE(lid
& 0xFFFF0000);
4510 * ib_lid_be16 - Return lid in 16bit BE encoding.
4514 static inline __be16
ib_lid_be16(u32 lid
)
4516 WARN_ON_ONCE(lid
& 0xFFFF0000);
4517 return cpu_to_be16((u16
)lid
);
4521 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4523 * @device: the rdma device
4524 * @comp_vector: index of completion vector
4526 * Returns NULL on failure, otherwise a corresponding cpu map of the
4527 * completion vector (returns all-cpus map if the device driver doesn't
4528 * implement get_vector_affinity).
4530 static inline const struct cpumask
*
4531 ib_get_vector_affinity(struct ib_device
*device
, int comp_vector
)
4533 if (comp_vector
< 0 || comp_vector
>= device
->num_comp_vectors
||
4534 !device
->ops
.get_vector_affinity
)
4537 return device
->ops
.get_vector_affinity(device
, comp_vector
);
4542 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4543 * and add their gids, as needed, to the relevant RoCE devices.
4545 * @device: the rdma device
4547 void rdma_roce_rescan_device(struct ib_device
*ibdev
);
4549 struct ib_ucontext
*ib_uverbs_get_ucontext_file(struct ib_uverbs_file
*ufile
);
4551 int uverbs_destroy_def_handler(struct uverbs_attr_bundle
*attrs
);
4553 struct net_device
*rdma_alloc_netdev(struct ib_device
*device
, u8 port_num
,
4554 enum rdma_netdev_t type
, const char *name
,
4555 unsigned char name_assign_type
,
4556 void (*setup
)(struct net_device
*));
4558 int rdma_init_netdev(struct ib_device
*device
, u8 port_num
,
4559 enum rdma_netdev_t type
, const char *name
,
4560 unsigned char name_assign_type
,
4561 void (*setup
)(struct net_device
*),
4562 struct net_device
*netdev
);
4565 * rdma_set_device_sysfs_group - Set device attributes group to have
4566 * driver specific sysfs entries at
4567 * for infiniband class.
4569 * @device: device pointer for which attributes to be created
4570 * @group: Pointer to group which should be added when device
4571 * is registered with sysfs.
4572 * rdma_set_device_sysfs_group() allows existing drivers to expose one
4573 * group per device to have sysfs attributes.
4575 * NOTE: New drivers should not make use of this API; instead new device
4576 * parameter should be exposed via netlink command. This API and mechanism
4577 * exist only for existing drivers.
4580 rdma_set_device_sysfs_group(struct ib_device
*dev
,
4581 const struct attribute_group
*group
)
4583 dev
->groups
[1] = group
;
4587 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4589 * @device: device pointer for which ib_device pointer to retrieve
4591 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4594 static inline struct ib_device
*rdma_device_to_ibdev(struct device
*device
)
4596 struct ib_core_device
*coredev
=
4597 container_of(device
, struct ib_core_device
, dev
);
4599 return coredev
->owner
;
4603 * ibdev_to_node - return the NUMA node for a given ib_device
4604 * @dev: device to get the NUMA node for.
4606 static inline int ibdev_to_node(struct ib_device
*ibdev
)
4608 struct device
*parent
= ibdev
->dev
.parent
;
4611 return NUMA_NO_NODE
;
4612 return dev_to_node(parent
);
4616 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4617 * ib_device holder structure from device pointer.
4619 * NOTE: New drivers should not make use of this API; This API is only for
4620 * existing drivers who have exposed sysfs entries using
4621 * rdma_set_device_sysfs_group().
4623 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4624 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4626 bool rdma_dev_access_netns(const struct ib_device
*device
,
4627 const struct net
*net
);
4629 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4630 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4631 #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4634 * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4637 * This function will convert the 20 bit flow_label input to a valid RoCE v2
4638 * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4641 static inline u16
rdma_flow_label_to_udp_sport(u32 fl
)
4643 u32 fl_low
= fl
& 0x03fff, fl_high
= fl
& 0xFC000;
4645 fl_low
^= fl_high
>> 14;
4646 return (u16
)(fl_low
| IB_ROCE_UDP_ENCAP_VALID_PORT_MIN
);
4650 * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4651 * local and remote qpn values
4653 * This function folded the multiplication results of two qpns, 24 bit each,
4654 * fields, and converts it to a 20 bit results.
4656 * This function will create symmetric flow_label value based on the local
4657 * and remote qpn values. this will allow both the requester and responder
4658 * to calculate the same flow_label for a given connection.
4660 * This helper function should be used by driver in case the upper layer
4661 * provide a zero flow_label value. This is to improve entropy of RDMA
4662 * traffic in the network.
4664 static inline u32
rdma_calc_flow_label(u32 lqpn
, u32 rqpn
)
4666 u64 v
= (u64
)lqpn
* rqpn
;
4671 return (u32
)(v
& IB_GRH_FLOWLABEL_MASK
);
4673 #endif /* IB_VERBS_H */