1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (c) 2022 Microsoft Corporation. All rights reserved.
9 #include <rdma/ib_verbs.h>
10 #include <rdma/ib_mad.h>
11 #include <rdma/ib_umem.h>
12 #include <rdma/mana-abi.h>
13 #include <rdma/uverbs_ioctl.h>
15 #include <net/mana/mana.h>
18 (SZ_4K | SZ_8K | SZ_16K | SZ_32K | SZ_64K | SZ_128K | SZ_256K | \
19 SZ_512K | SZ_1M | SZ_2M)
21 /* MANA doesn't have any limit for MR size */
22 #define MANA_IB_MAX_MR_SIZE U64_MAX
25 * The hardware limit of number of MRs is greater than maximum number of MRs
26 * that can possibly represent in 24 bits
28 #define MANA_IB_MAX_MR 0xFFFFFFu
31 * The CA timeout is approx. 260ms (4us * 2^(DELAY))
33 #define MANA_CA_ACK_DELAY 16
35 struct mana_ib_adapter_caps
{
43 u32 max_inbound_read_limit
;
44 u32 max_outbound_read_limit
;
48 u32 max_send_sge_count
;
49 u32 max_recv_sge_count
;
50 u32 max_inline_data_size
;
53 struct mana_ib_queue
{
60 struct ib_device ib_dev
;
61 struct gdma_dev
*gdma_dev
;
62 mana_handle_t adapter_handle
;
63 struct gdma_queue
*fatal_err_eq
;
64 struct gdma_queue
**eqs
;
65 struct xarray qp_table_wq
;
66 struct mana_ib_adapter_caps adapter_caps
;
71 struct mana_ib_queue queue
;
74 mana_handle_t rx_object
;
80 mana_handle_t pd_handle
;
82 /* Mutex for sharing access to vport_use_count */
83 struct mutex vport_mutex
;
86 bool tx_shortform_allowed
;
93 mana_handle_t mr_handle
;
98 struct mana_ib_queue queue
;
101 mana_handle_t cq_handle
;
104 enum mana_rc_queue_type
{
105 MANA_RC_SEND_QUEUE_REQUESTER
= 0,
106 MANA_RC_SEND_QUEUE_RESPONDER
,
107 MANA_RC_SEND_QUEUE_FMR
,
108 MANA_RC_RECV_QUEUE_REQUESTER
,
109 MANA_RC_RECV_QUEUE_RESPONDER
,
110 MANA_RC_QUEUE_TYPE_MAX
,
113 struct mana_ib_rc_qp
{
114 struct mana_ib_queue queues
[MANA_RC_QUEUE_TYPE_MAX
];
120 mana_handle_t qp_handle
;
122 struct mana_ib_queue raw_sq
;
123 struct mana_ib_rc_qp rc_qp
;
126 /* The port on the IB device, starting with 1 */
130 struct completion free
;
133 struct mana_ib_ucontext
{
134 struct ib_ucontext ibucontext
;
138 struct mana_ib_rwq_ind_table
{
139 struct ib_rwq_ind_table ib_ind_table
;
142 enum mana_ib_command_code
{
143 MANA_IB_GET_ADAPTER_CAP
= 0x30001,
144 MANA_IB_CREATE_ADAPTER
= 0x30002,
145 MANA_IB_DESTROY_ADAPTER
= 0x30003,
146 MANA_IB_CONFIG_IP_ADDR
= 0x30004,
147 MANA_IB_CONFIG_MAC_ADDR
= 0x30005,
148 MANA_IB_CREATE_CQ
= 0x30008,
149 MANA_IB_DESTROY_CQ
= 0x30009,
150 MANA_IB_CREATE_RC_QP
= 0x3000a,
151 MANA_IB_DESTROY_RC_QP
= 0x3000b,
152 MANA_IB_SET_QP_STATE
= 0x3000d,
155 struct mana_ib_query_adapter_caps_req
{
156 struct gdma_req_hdr hdr
;
159 struct mana_ib_query_adapter_caps_resp
{
160 struct gdma_resp_hdr hdr
;
168 u32 max_inbound_read_limit
;
169 u32 max_outbound_read_limit
;
172 u32 max_requester_sq_size
;
173 u32 max_responder_sq_size
;
174 u32 max_requester_rq_size
;
175 u32 max_responder_rq_size
;
176 u32 max_send_sge_count
;
177 u32 max_recv_sge_count
;
178 u32 max_inline_data_size
;
181 struct mana_rnic_create_adapter_req
{
182 struct gdma_req_hdr hdr
;
188 struct mana_rnic_create_adapter_resp
{
189 struct gdma_resp_hdr hdr
;
190 mana_handle_t adapter
;
193 struct mana_rnic_destroy_adapter_req
{
194 struct gdma_req_hdr hdr
;
195 mana_handle_t adapter
;
198 struct mana_rnic_destroy_adapter_resp
{
199 struct gdma_resp_hdr hdr
;
202 enum mana_ib_addr_op
{
207 enum sgid_entry_type
{
212 struct mana_rnic_config_addr_req
{
213 struct gdma_req_hdr hdr
;
214 mana_handle_t adapter
;
215 enum mana_ib_addr_op op
;
216 enum sgid_entry_type sgid_type
;
220 struct mana_rnic_config_addr_resp
{
221 struct gdma_resp_hdr hdr
;
224 struct mana_rnic_config_mac_addr_req
{
225 struct gdma_req_hdr hdr
;
226 mana_handle_t adapter
;
227 enum mana_ib_addr_op op
;
228 u8 mac_addr
[ETH_ALEN
];
232 struct mana_rnic_config_mac_addr_resp
{
233 struct gdma_resp_hdr hdr
;
236 struct mana_rnic_create_cq_req
{
237 struct gdma_req_hdr hdr
;
238 mana_handle_t adapter
;
244 struct mana_rnic_create_cq_resp
{
245 struct gdma_resp_hdr hdr
;
246 mana_handle_t cq_handle
;
251 struct mana_rnic_destroy_cq_req
{
252 struct gdma_req_hdr hdr
;
253 mana_handle_t adapter
;
254 mana_handle_t cq_handle
;
257 struct mana_rnic_destroy_cq_resp
{
258 struct gdma_resp_hdr hdr
;
261 enum mana_rnic_create_rc_flags
{
262 MANA_RC_FLAG_NO_FMR
= 2,
265 struct mana_rnic_create_qp_req
{
266 struct gdma_req_hdr hdr
;
267 mana_handle_t adapter
;
268 mana_handle_t pd_handle
;
269 mana_handle_t send_cq_handle
;
270 mana_handle_t recv_cq_handle
;
271 u64 dma_region
[MANA_RC_QUEUE_TYPE_MAX
];
282 struct mana_rnic_create_qp_resp
{
283 struct gdma_resp_hdr hdr
;
284 mana_handle_t rc_qp_handle
;
285 u32 queue_ids
[MANA_RC_QUEUE_TYPE_MAX
];
289 struct mana_rnic_destroy_rc_qp_req
{
290 struct gdma_req_hdr hdr
;
291 mana_handle_t adapter
;
292 mana_handle_t rc_qp_handle
;
295 struct mana_rnic_destroy_rc_qp_resp
{
296 struct gdma_resp_hdr hdr
;
299 struct mana_ib_ah_attr
{
302 u8 src_mac
[ETH_ALEN
];
303 u8 dest_mac
[ETH_ALEN
];
313 struct mana_rnic_set_qp_state_req
{
314 struct gdma_req_hdr hdr
;
315 mana_handle_t adapter
;
316 mana_handle_t qp_handle
;
323 u32 max_dest_rd_atomic
;
328 struct mana_ib_ah_attr ah_attr
;
331 struct mana_rnic_set_qp_state_resp
{
332 struct gdma_resp_hdr hdr
;
335 static inline struct gdma_context
*mdev_to_gc(struct mana_ib_dev
*mdev
)
337 return mdev
->gdma_dev
->gdma_context
;
340 static inline struct mana_ib_qp
*mana_get_qp_ref(struct mana_ib_dev
*mdev
,
343 struct mana_ib_qp
*qp
;
346 xa_lock_irqsave(&mdev
->qp_table_wq
, flag
);
347 qp
= xa_load(&mdev
->qp_table_wq
, qid
);
349 refcount_inc(&qp
->refcount
);
350 xa_unlock_irqrestore(&mdev
->qp_table_wq
, flag
);
354 static inline void mana_put_qp_ref(struct mana_ib_qp
*qp
)
356 if (refcount_dec_and_test(&qp
->refcount
))
360 static inline struct net_device
*mana_ib_get_netdev(struct ib_device
*ibdev
, u32 port
)
362 struct mana_ib_dev
*mdev
= container_of(ibdev
, struct mana_ib_dev
, ib_dev
);
363 struct gdma_context
*gc
= mdev_to_gc(mdev
);
364 struct mana_context
*mc
= gc
->mana
.driver_data
;
366 if (port
< 1 || port
> mc
->num_ports
)
368 return mc
->ports
[port
- 1];
371 static inline void copy_in_reverse(u8
*dst
, const u8
*src
, u32 size
)
375 for (i
= 0; i
< size
; i
++)
376 dst
[size
- 1 - i
] = src
[i
];
379 int mana_ib_install_cq_cb(struct mana_ib_dev
*mdev
, struct mana_ib_cq
*cq
);
380 void mana_ib_remove_cq_cb(struct mana_ib_dev
*mdev
, struct mana_ib_cq
*cq
);
382 int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev
*dev
, struct ib_umem
*umem
,
383 mana_handle_t
*gdma_region
);
385 int mana_ib_create_dma_region(struct mana_ib_dev
*dev
, struct ib_umem
*umem
,
386 mana_handle_t
*gdma_region
, u64 virt
);
388 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev
*dev
,
389 mana_handle_t gdma_region
);
391 int mana_ib_create_queue(struct mana_ib_dev
*mdev
, u64 addr
, u32 size
,
392 struct mana_ib_queue
*queue
);
393 void mana_ib_destroy_queue(struct mana_ib_dev
*mdev
, struct mana_ib_queue
*queue
);
395 struct ib_wq
*mana_ib_create_wq(struct ib_pd
*pd
,
396 struct ib_wq_init_attr
*init_attr
,
397 struct ib_udata
*udata
);
399 int mana_ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
400 u32 wq_attr_mask
, struct ib_udata
*udata
);
402 int mana_ib_destroy_wq(struct ib_wq
*ibwq
, struct ib_udata
*udata
);
404 int mana_ib_create_rwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_table
,
405 struct ib_rwq_ind_table_init_attr
*init_attr
,
406 struct ib_udata
*udata
);
408 int mana_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
);
410 struct ib_mr
*mana_ib_get_dma_mr(struct ib_pd
*ibpd
, int access_flags
);
412 struct ib_mr
*mana_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
413 u64 iova
, int access_flags
,
414 struct ib_udata
*udata
);
416 int mana_ib_dereg_mr(struct ib_mr
*ibmr
, struct ib_udata
*udata
);
418 int mana_ib_create_qp(struct ib_qp
*qp
, struct ib_qp_init_attr
*qp_init_attr
,
419 struct ib_udata
*udata
);
421 int mana_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
422 int attr_mask
, struct ib_udata
*udata
);
424 int mana_ib_destroy_qp(struct ib_qp
*ibqp
, struct ib_udata
*udata
);
426 int mana_ib_cfg_vport(struct mana_ib_dev
*dev
, u32 port_id
,
427 struct mana_ib_pd
*pd
, u32 doorbell_id
);
428 void mana_ib_uncfg_vport(struct mana_ib_dev
*dev
, struct mana_ib_pd
*pd
,
431 int mana_ib_create_cq(struct ib_cq
*ibcq
, const struct ib_cq_init_attr
*attr
,
432 struct uverbs_attr_bundle
*attrs
);
434 int mana_ib_destroy_cq(struct ib_cq
*ibcq
, struct ib_udata
*udata
);
436 int mana_ib_alloc_pd(struct ib_pd
*ibpd
, struct ib_udata
*udata
);
437 int mana_ib_dealloc_pd(struct ib_pd
*ibpd
, struct ib_udata
*udata
);
439 int mana_ib_alloc_ucontext(struct ib_ucontext
*ibcontext
,
440 struct ib_udata
*udata
);
441 void mana_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
);
443 int mana_ib_mmap(struct ib_ucontext
*ibcontext
, struct vm_area_struct
*vma
);
445 int mana_ib_get_port_immutable(struct ib_device
*ibdev
, u32 port_num
,
446 struct ib_port_immutable
*immutable
);
447 int mana_ib_query_device(struct ib_device
*ibdev
, struct ib_device_attr
*props
,
448 struct ib_udata
*uhw
);
449 int mana_ib_query_port(struct ib_device
*ibdev
, u32 port
,
450 struct ib_port_attr
*props
);
451 int mana_ib_query_gid(struct ib_device
*ibdev
, u32 port
, int index
,
454 void mana_ib_disassociate_ucontext(struct ib_ucontext
*ibcontext
);
456 int mana_ib_gd_query_adapter_caps(struct mana_ib_dev
*mdev
);
458 int mana_ib_create_eqs(struct mana_ib_dev
*mdev
);
460 void mana_ib_destroy_eqs(struct mana_ib_dev
*mdev
);
462 int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev
*mdev
);
464 int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev
*mdev
);
466 int mana_ib_query_pkey(struct ib_device
*ibdev
, u32 port
, u16 index
, u16
*pkey
);
468 enum rdma_link_layer
mana_ib_get_link_layer(struct ib_device
*device
, u32 port_num
);
470 int mana_ib_gd_add_gid(const struct ib_gid_attr
*attr
, void **context
);
472 int mana_ib_gd_del_gid(const struct ib_gid_attr
*attr
, void **context
);
474 int mana_ib_gd_config_mac(struct mana_ib_dev
*mdev
, enum mana_ib_addr_op op
, u8
*mac
);
476 int mana_ib_gd_create_cq(struct mana_ib_dev
*mdev
, struct mana_ib_cq
*cq
, u32 doorbell
);
478 int mana_ib_gd_destroy_cq(struct mana_ib_dev
*mdev
, struct mana_ib_cq
*cq
);
480 int mana_ib_gd_create_rc_qp(struct mana_ib_dev
*mdev
, struct mana_ib_qp
*qp
,
481 struct ib_qp_init_attr
*attr
, u32 doorbell
, u64 flags
);
482 int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev
*mdev
, struct mana_ib_qp
*qp
);