2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/sched.h>
42 #include <rdma/ib_user_verbs.h>
43 #include <rdma/ib_smi.h>
44 #include <rdma/ib_umem.h>
48 #define DRIVER_NAME "mlx5_ib"
49 #define DRIVER_VERSION "1.0"
50 #define DRIVER_RELDATE "June 2013"
52 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
53 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
54 MODULE_LICENSE("Dual BSD/GPL");
55 MODULE_VERSION(DRIVER_VERSION
);
57 static int prof_sel
= 2;
58 module_param_named(prof_sel
, prof_sel
, int, 0444);
59 MODULE_PARM_DESC(prof_sel
, "profile selector. Valid range 0 - 2");
61 static char mlx5_version
[] =
62 DRIVER_NAME
": Mellanox Connect-IB Infiniband driver v"
63 DRIVER_VERSION
" (" DRIVER_RELDATE
")\n";
65 static struct mlx5_profile profile
[] = {
70 .mask
= MLX5_PROF_MASK_QP_SIZE
,
74 .mask
= MLX5_PROF_MASK_QP_SIZE
|
75 MLX5_PROF_MASK_MR_CACHE
,
144 int mlx5_vector2eqn(struct mlx5_ib_dev
*dev
, int vector
, int *eqn
, int *irqn
)
146 struct mlx5_eq_table
*table
= &dev
->mdev
.priv
.eq_table
;
147 struct mlx5_eq
*eq
, *n
;
150 spin_lock(&table
->lock
);
151 list_for_each_entry_safe(eq
, n
, &dev
->eqs_list
, list
) {
152 if (eq
->index
== vector
) {
159 spin_unlock(&table
->lock
);
164 static int alloc_comp_eqs(struct mlx5_ib_dev
*dev
)
166 struct mlx5_eq_table
*table
= &dev
->mdev
.priv
.eq_table
;
167 char name
[MLX5_MAX_EQ_NAME
];
168 struct mlx5_eq
*eq
, *n
;
174 INIT_LIST_HEAD(&dev
->eqs_list
);
175 ncomp_vec
= table
->num_comp_vectors
;
176 nent
= MLX5_COMP_EQ_SIZE
;
177 for (i
= 0; i
< ncomp_vec
; i
++) {
178 eq
= kzalloc(sizeof(*eq
), GFP_KERNEL
);
184 snprintf(name
, MLX5_MAX_EQ_NAME
, "mlx5_comp%d", i
);
185 err
= mlx5_create_map_eq(&dev
->mdev
, eq
,
186 i
+ MLX5_EQ_VEC_COMP_BASE
, nent
, 0,
187 name
, &dev
->mdev
.priv
.uuari
.uars
[0]);
192 mlx5_ib_dbg(dev
, "allocated completion EQN %d\n", eq
->eqn
);
194 spin_lock(&table
->lock
);
195 list_add_tail(&eq
->list
, &dev
->eqs_list
);
196 spin_unlock(&table
->lock
);
199 dev
->num_comp_vectors
= ncomp_vec
;
203 spin_lock(&table
->lock
);
204 list_for_each_entry_safe(eq
, n
, &dev
->eqs_list
, list
) {
206 spin_unlock(&table
->lock
);
207 if (mlx5_destroy_unmap_eq(&dev
->mdev
, eq
))
208 mlx5_ib_warn(dev
, "failed to destroy EQ 0x%x\n", eq
->eqn
);
210 spin_lock(&table
->lock
);
212 spin_unlock(&table
->lock
);
216 static void free_comp_eqs(struct mlx5_ib_dev
*dev
)
218 struct mlx5_eq_table
*table
= &dev
->mdev
.priv
.eq_table
;
219 struct mlx5_eq
*eq
, *n
;
221 spin_lock(&table
->lock
);
222 list_for_each_entry_safe(eq
, n
, &dev
->eqs_list
, list
) {
224 spin_unlock(&table
->lock
);
225 if (mlx5_destroy_unmap_eq(&dev
->mdev
, eq
))
226 mlx5_ib_warn(dev
, "failed to destroy EQ 0x%x\n", eq
->eqn
);
228 spin_lock(&table
->lock
);
230 spin_unlock(&table
->lock
);
233 static int mlx5_ib_query_device(struct ib_device
*ibdev
,
234 struct ib_device_attr
*props
)
236 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
237 struct ib_smp
*in_mad
= NULL
;
238 struct ib_smp
*out_mad
= NULL
;
244 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
245 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
246 if (!in_mad
|| !out_mad
)
249 init_query_mad(in_mad
);
250 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
252 err
= mlx5_MAD_IFC(to_mdev(ibdev
), 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
256 memset(props
, 0, sizeof(*props
));
258 props
->fw_ver
= ((u64
)fw_rev_maj(&dev
->mdev
) << 32) |
259 (fw_rev_min(&dev
->mdev
) << 16) |
260 fw_rev_sub(&dev
->mdev
);
261 props
->device_cap_flags
= IB_DEVICE_CHANGE_PHY_PORT
|
262 IB_DEVICE_PORT_ACTIVE_EVENT
|
263 IB_DEVICE_SYS_IMAGE_GUID
|
264 IB_DEVICE_RC_RNR_NAK_GEN
;
265 flags
= dev
->mdev
.caps
.flags
;
266 if (flags
& MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR
)
267 props
->device_cap_flags
|= IB_DEVICE_BAD_PKEY_CNTR
;
268 if (flags
& MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR
)
269 props
->device_cap_flags
|= IB_DEVICE_BAD_QKEY_CNTR
;
270 if (flags
& MLX5_DEV_CAP_FLAG_APM
)
271 props
->device_cap_flags
|= IB_DEVICE_AUTO_PATH_MIG
;
272 props
->device_cap_flags
|= IB_DEVICE_LOCAL_DMA_LKEY
;
273 if (flags
& MLX5_DEV_CAP_FLAG_XRC
)
274 props
->device_cap_flags
|= IB_DEVICE_XRC
;
275 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
277 props
->vendor_id
= be32_to_cpup((__be32
*)(out_mad
->data
+ 36)) &
279 props
->vendor_part_id
= be16_to_cpup((__be16
*)(out_mad
->data
+ 30));
280 props
->hw_ver
= be32_to_cpup((__be32
*)(out_mad
->data
+ 32));
281 memcpy(&props
->sys_image_guid
, out_mad
->data
+ 4, 8);
283 props
->max_mr_size
= ~0ull;
284 props
->page_size_cap
= dev
->mdev
.caps
.min_page_sz
;
285 props
->max_qp
= 1 << dev
->mdev
.caps
.log_max_qp
;
286 props
->max_qp_wr
= dev
->mdev
.caps
.max_wqes
;
287 max_rq_sg
= dev
->mdev
.caps
.max_rq_desc_sz
/ sizeof(struct mlx5_wqe_data_seg
);
288 max_sq_sg
= (dev
->mdev
.caps
.max_sq_desc_sz
- sizeof(struct mlx5_wqe_ctrl_seg
)) /
289 sizeof(struct mlx5_wqe_data_seg
);
290 props
->max_sge
= min(max_rq_sg
, max_sq_sg
);
291 props
->max_cq
= 1 << dev
->mdev
.caps
.log_max_cq
;
292 props
->max_cqe
= dev
->mdev
.caps
.max_cqes
- 1;
293 props
->max_mr
= 1 << dev
->mdev
.caps
.log_max_mkey
;
294 props
->max_pd
= 1 << dev
->mdev
.caps
.log_max_pd
;
295 props
->max_qp_rd_atom
= dev
->mdev
.caps
.max_ra_req_qp
;
296 props
->max_qp_init_rd_atom
= dev
->mdev
.caps
.max_ra_res_qp
;
297 props
->max_res_rd_atom
= props
->max_qp_rd_atom
* props
->max_qp
;
298 props
->max_srq
= 1 << dev
->mdev
.caps
.log_max_srq
;
299 props
->max_srq_wr
= dev
->mdev
.caps
.max_srq_wqes
- 1;
300 props
->max_srq_sge
= max_rq_sg
- 1;
301 props
->max_fast_reg_page_list_len
= (unsigned int)-1;
302 props
->local_ca_ack_delay
= dev
->mdev
.caps
.local_ca_ack_delay
;
303 props
->atomic_cap
= IB_ATOMIC_NONE
;
304 props
->masked_atomic_cap
= IB_ATOMIC_NONE
;
305 props
->max_pkeys
= be16_to_cpup((__be16
*)(out_mad
->data
+ 28));
306 props
->max_mcast_grp
= 1 << dev
->mdev
.caps
.log_max_mcg
;
307 props
->max_mcast_qp_attach
= dev
->mdev
.caps
.max_qp_mcg
;
308 props
->max_total_mcast_qp_attach
= props
->max_mcast_qp_attach
*
309 props
->max_mcast_grp
;
310 props
->max_map_per_fmr
= INT_MAX
; /* no limit in ConnectIB */
319 int mlx5_ib_query_port(struct ib_device
*ibdev
, u8 port
,
320 struct ib_port_attr
*props
)
322 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
323 struct ib_smp
*in_mad
= NULL
;
324 struct ib_smp
*out_mad
= NULL
;
325 int ext_active_speed
;
328 if (port
< 1 || port
> dev
->mdev
.caps
.num_ports
) {
329 mlx5_ib_warn(dev
, "invalid port number %d\n", port
);
333 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
334 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
335 if (!in_mad
|| !out_mad
)
338 memset(props
, 0, sizeof(*props
));
340 init_query_mad(in_mad
);
341 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
342 in_mad
->attr_mod
= cpu_to_be32(port
);
344 err
= mlx5_MAD_IFC(dev
, 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
346 mlx5_ib_warn(dev
, "err %d\n", err
);
351 props
->lid
= be16_to_cpup((__be16
*)(out_mad
->data
+ 16));
352 props
->lmc
= out_mad
->data
[34] & 0x7;
353 props
->sm_lid
= be16_to_cpup((__be16
*)(out_mad
->data
+ 18));
354 props
->sm_sl
= out_mad
->data
[36] & 0xf;
355 props
->state
= out_mad
->data
[32] & 0xf;
356 props
->phys_state
= out_mad
->data
[33] >> 4;
357 props
->port_cap_flags
= be32_to_cpup((__be32
*)(out_mad
->data
+ 20));
358 props
->gid_tbl_len
= out_mad
->data
[50];
359 props
->max_msg_sz
= 1 << to_mdev(ibdev
)->mdev
.caps
.log_max_msg
;
360 props
->pkey_tbl_len
= to_mdev(ibdev
)->mdev
.caps
.port
[port
- 1].pkey_table_len
;
361 props
->bad_pkey_cntr
= be16_to_cpup((__be16
*)(out_mad
->data
+ 46));
362 props
->qkey_viol_cntr
= be16_to_cpup((__be16
*)(out_mad
->data
+ 48));
363 props
->active_width
= out_mad
->data
[31] & 0xf;
364 props
->active_speed
= out_mad
->data
[35] >> 4;
365 props
->max_mtu
= out_mad
->data
[41] & 0xf;
366 props
->active_mtu
= out_mad
->data
[36] >> 4;
367 props
->subnet_timeout
= out_mad
->data
[51] & 0x1f;
368 props
->max_vl_num
= out_mad
->data
[37] >> 4;
369 props
->init_type_reply
= out_mad
->data
[41] >> 4;
371 /* Check if extended speeds (EDR/FDR/...) are supported */
372 if (props
->port_cap_flags
& IB_PORT_EXTENDED_SPEEDS_SUP
) {
373 ext_active_speed
= out_mad
->data
[62] >> 4;
375 switch (ext_active_speed
) {
377 props
->active_speed
= 16; /* FDR */
380 props
->active_speed
= 32; /* EDR */
385 /* If reported active speed is QDR, check if is FDR-10 */
386 if (props
->active_speed
== 4) {
387 if (dev
->mdev
.caps
.ext_port_cap
[port
- 1] &
388 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO
) {
389 init_query_mad(in_mad
);
390 in_mad
->attr_id
= MLX5_ATTR_EXTENDED_PORT_INFO
;
391 in_mad
->attr_mod
= cpu_to_be32(port
);
393 err
= mlx5_MAD_IFC(dev
, 1, 1, port
,
394 NULL
, NULL
, in_mad
, out_mad
);
398 /* Checking LinkSpeedActive for FDR-10 */
399 if (out_mad
->data
[15] & 0x1)
400 props
->active_speed
= 8;
411 static int mlx5_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
414 struct ib_smp
*in_mad
= NULL
;
415 struct ib_smp
*out_mad
= NULL
;
418 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
419 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
420 if (!in_mad
|| !out_mad
)
423 init_query_mad(in_mad
);
424 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
425 in_mad
->attr_mod
= cpu_to_be32(port
);
427 err
= mlx5_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
431 memcpy(gid
->raw
, out_mad
->data
+ 8, 8);
433 init_query_mad(in_mad
);
434 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
435 in_mad
->attr_mod
= cpu_to_be32(index
/ 8);
437 err
= mlx5_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
441 memcpy(gid
->raw
+ 8, out_mad
->data
+ (index
% 8) * 8, 8);
449 static int mlx5_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
452 struct ib_smp
*in_mad
= NULL
;
453 struct ib_smp
*out_mad
= NULL
;
456 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
457 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
458 if (!in_mad
|| !out_mad
)
461 init_query_mad(in_mad
);
462 in_mad
->attr_id
= IB_SMP_ATTR_PKEY_TABLE
;
463 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
465 err
= mlx5_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
469 *pkey
= be16_to_cpu(((__be16
*)out_mad
->data
)[index
% 32]);
477 struct mlx5_reg_node_desc
{
481 static int mlx5_ib_modify_device(struct ib_device
*ibdev
, int mask
,
482 struct ib_device_modify
*props
)
484 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
485 struct mlx5_reg_node_desc in
;
486 struct mlx5_reg_node_desc out
;
489 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
492 if (!(mask
& IB_DEVICE_MODIFY_NODE_DESC
))
496 * If possible, pass node desc to FW, so it can generate
497 * a 144 trap. If cmd fails, just ignore.
499 memcpy(&in
, props
->node_desc
, 64);
500 err
= mlx5_core_access_reg(&dev
->mdev
, &in
, sizeof(in
), &out
,
501 sizeof(out
), MLX5_REG_NODE_DESC
, 0, 1);
505 memcpy(ibdev
->node_desc
, props
->node_desc
, 64);
510 static int mlx5_ib_modify_port(struct ib_device
*ibdev
, u8 port
, int mask
,
511 struct ib_port_modify
*props
)
513 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
514 struct ib_port_attr attr
;
518 mutex_lock(&dev
->cap_mask_mutex
);
520 err
= mlx5_ib_query_port(ibdev
, port
, &attr
);
524 tmp
= (attr
.port_cap_flags
| props
->set_port_cap_mask
) &
525 ~props
->clr_port_cap_mask
;
527 err
= mlx5_set_port_caps(&dev
->mdev
, port
, tmp
);
530 mutex_unlock(&dev
->cap_mask_mutex
);
534 static struct ib_ucontext
*mlx5_ib_alloc_ucontext(struct ib_device
*ibdev
,
535 struct ib_udata
*udata
)
537 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
538 struct mlx5_ib_alloc_ucontext_req_v2 req
;
539 struct mlx5_ib_alloc_ucontext_resp resp
;
540 struct mlx5_ib_ucontext
*context
;
541 struct mlx5_uuar_info
*uuari
;
542 struct mlx5_uar
*uars
;
552 return ERR_PTR(-EAGAIN
);
554 memset(&req
, 0, sizeof(req
));
555 reqlen
= udata
->inlen
- sizeof(struct ib_uverbs_cmd_hdr
);
556 if (reqlen
== sizeof(struct mlx5_ib_alloc_ucontext_req
))
558 else if (reqlen
== sizeof(struct mlx5_ib_alloc_ucontext_req_v2
))
561 return ERR_PTR(-EINVAL
);
563 err
= ib_copy_from_udata(&req
, udata
, reqlen
);
567 if (req
.flags
|| req
.reserved
)
568 return ERR_PTR(-EINVAL
);
570 if (req
.total_num_uuars
> MLX5_MAX_UUARS
)
571 return ERR_PTR(-ENOMEM
);
573 if (req
.total_num_uuars
== 0)
574 return ERR_PTR(-EINVAL
);
576 req
.total_num_uuars
= ALIGN(req
.total_num_uuars
,
577 MLX5_NON_FP_BF_REGS_PER_PAGE
);
578 if (req
.num_low_latency_uuars
> req
.total_num_uuars
- 1)
579 return ERR_PTR(-EINVAL
);
581 num_uars
= req
.total_num_uuars
/ MLX5_NON_FP_BF_REGS_PER_PAGE
;
582 gross_uuars
= num_uars
* MLX5_BF_REGS_PER_PAGE
;
583 resp
.qp_tab_size
= 1 << dev
->mdev
.caps
.log_max_qp
;
584 resp
.bf_reg_size
= dev
->mdev
.caps
.bf_reg_size
;
585 resp
.cache_line_size
= L1_CACHE_BYTES
;
586 resp
.max_sq_desc_sz
= dev
->mdev
.caps
.max_sq_desc_sz
;
587 resp
.max_rq_desc_sz
= dev
->mdev
.caps
.max_rq_desc_sz
;
588 resp
.max_send_wqebb
= dev
->mdev
.caps
.max_wqes
;
589 resp
.max_recv_wr
= dev
->mdev
.caps
.max_wqes
;
590 resp
.max_srq_recv_wr
= dev
->mdev
.caps
.max_srq_wqes
;
592 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
594 return ERR_PTR(-ENOMEM
);
596 uuari
= &context
->uuari
;
597 mutex_init(&uuari
->lock
);
598 uars
= kcalloc(num_uars
, sizeof(*uars
), GFP_KERNEL
);
604 uuari
->bitmap
= kcalloc(BITS_TO_LONGS(gross_uuars
),
605 sizeof(*uuari
->bitmap
),
607 if (!uuari
->bitmap
) {
612 * clear all fast path uuars
614 for (i
= 0; i
< gross_uuars
; i
++) {
616 if (uuarn
== 2 || uuarn
== 3)
617 set_bit(i
, uuari
->bitmap
);
620 uuari
->count
= kcalloc(gross_uuars
, sizeof(*uuari
->count
), GFP_KERNEL
);
626 for (i
= 0; i
< num_uars
; i
++) {
627 err
= mlx5_cmd_alloc_uar(&dev
->mdev
, &uars
[i
].index
);
632 INIT_LIST_HEAD(&context
->db_page_list
);
633 mutex_init(&context
->db_page_mutex
);
635 resp
.tot_uuars
= req
.total_num_uuars
;
636 resp
.num_ports
= dev
->mdev
.caps
.num_ports
;
637 err
= ib_copy_to_udata(udata
, &resp
,
638 sizeof(resp
) - sizeof(resp
.reserved
));
643 uuari
->num_low_latency_uuars
= req
.num_low_latency_uuars
;
645 uuari
->num_uars
= num_uars
;
646 return &context
->ibucontext
;
649 for (i
--; i
>= 0; i
--)
650 mlx5_cmd_free_uar(&dev
->mdev
, uars
[i
].index
);
655 kfree(uuari
->bitmap
);
665 static int mlx5_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
667 struct mlx5_ib_ucontext
*context
= to_mucontext(ibcontext
);
668 struct mlx5_ib_dev
*dev
= to_mdev(ibcontext
->device
);
669 struct mlx5_uuar_info
*uuari
= &context
->uuari
;
672 for (i
= 0; i
< uuari
->num_uars
; i
++) {
673 if (mlx5_cmd_free_uar(&dev
->mdev
, uuari
->uars
[i
].index
))
674 mlx5_ib_warn(dev
, "failed to free UAR 0x%x\n", uuari
->uars
[i
].index
);
678 kfree(uuari
->bitmap
);
685 static phys_addr_t
uar_index2pfn(struct mlx5_ib_dev
*dev
, int index
)
687 return (pci_resource_start(dev
->mdev
.pdev
, 0) >> PAGE_SHIFT
) + index
;
690 static int get_command(unsigned long offset
)
692 return (offset
>> MLX5_IB_MMAP_CMD_SHIFT
) & MLX5_IB_MMAP_CMD_MASK
;
695 static int get_arg(unsigned long offset
)
697 return offset
& ((1 << MLX5_IB_MMAP_CMD_SHIFT
) - 1);
700 static int get_index(unsigned long offset
)
702 return get_arg(offset
);
705 static int mlx5_ib_mmap(struct ib_ucontext
*ibcontext
, struct vm_area_struct
*vma
)
707 struct mlx5_ib_ucontext
*context
= to_mucontext(ibcontext
);
708 struct mlx5_ib_dev
*dev
= to_mdev(ibcontext
->device
);
709 struct mlx5_uuar_info
*uuari
= &context
->uuari
;
710 unsigned long command
;
714 command
= get_command(vma
->vm_pgoff
);
716 case MLX5_IB_MMAP_REGULAR_PAGE
:
717 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
720 idx
= get_index(vma
->vm_pgoff
);
721 pfn
= uar_index2pfn(dev
, uuari
->uars
[idx
].index
);
722 mlx5_ib_dbg(dev
, "uar idx 0x%lx, pfn 0x%llx\n", idx
,
723 (unsigned long long)pfn
);
725 if (idx
>= uuari
->num_uars
)
728 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
729 if (io_remap_pfn_range(vma
, vma
->vm_start
, pfn
,
730 PAGE_SIZE
, vma
->vm_page_prot
))
733 mlx5_ib_dbg(dev
, "mapped WC at 0x%lx, PA 0x%llx\n",
735 (unsigned long long)pfn
<< PAGE_SHIFT
);
738 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES
:
748 static int alloc_pa_mkey(struct mlx5_ib_dev
*dev
, u32
*key
, u32 pdn
)
750 struct mlx5_create_mkey_mbox_in
*in
;
751 struct mlx5_mkey_seg
*seg
;
752 struct mlx5_core_mr mr
;
755 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
760 seg
->flags
= MLX5_PERM_LOCAL_READ
| MLX5_ACCESS_MODE_PA
;
761 seg
->flags_pd
= cpu_to_be32(pdn
| MLX5_MKEY_LEN64
);
762 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff << 8);
765 err
= mlx5_core_create_mkey(&dev
->mdev
, &mr
, in
, sizeof(*in
),
768 mlx5_ib_warn(dev
, "failed to create mkey, %d\n", err
);
783 static void free_pa_mkey(struct mlx5_ib_dev
*dev
, u32 key
)
785 struct mlx5_core_mr mr
;
788 memset(&mr
, 0, sizeof(mr
));
790 err
= mlx5_core_destroy_mkey(&dev
->mdev
, &mr
);
792 mlx5_ib_warn(dev
, "failed to destroy mkey 0x%x\n", key
);
795 static struct ib_pd
*mlx5_ib_alloc_pd(struct ib_device
*ibdev
,
796 struct ib_ucontext
*context
,
797 struct ib_udata
*udata
)
799 struct mlx5_ib_alloc_pd_resp resp
;
800 struct mlx5_ib_pd
*pd
;
803 pd
= kmalloc(sizeof(*pd
), GFP_KERNEL
);
805 return ERR_PTR(-ENOMEM
);
807 err
= mlx5_core_alloc_pd(&to_mdev(ibdev
)->mdev
, &pd
->pdn
);
815 if (ib_copy_to_udata(udata
, &resp
, sizeof(resp
))) {
816 mlx5_core_dealloc_pd(&to_mdev(ibdev
)->mdev
, pd
->pdn
);
818 return ERR_PTR(-EFAULT
);
821 err
= alloc_pa_mkey(to_mdev(ibdev
), &pd
->pa_lkey
, pd
->pdn
);
823 mlx5_core_dealloc_pd(&to_mdev(ibdev
)->mdev
, pd
->pdn
);
832 static int mlx5_ib_dealloc_pd(struct ib_pd
*pd
)
834 struct mlx5_ib_dev
*mdev
= to_mdev(pd
->device
);
835 struct mlx5_ib_pd
*mpd
= to_mpd(pd
);
838 free_pa_mkey(mdev
, mpd
->pa_lkey
);
840 mlx5_core_dealloc_pd(&mdev
->mdev
, mpd
->pdn
);
846 static int mlx5_ib_mcg_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
848 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
851 err
= mlx5_core_attach_mcg(&dev
->mdev
, gid
, ibqp
->qp_num
);
853 mlx5_ib_warn(dev
, "failed attaching QPN 0x%x, MGID %pI6\n",
854 ibqp
->qp_num
, gid
->raw
);
859 static int mlx5_ib_mcg_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
861 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
864 err
= mlx5_core_detach_mcg(&dev
->mdev
, gid
, ibqp
->qp_num
);
866 mlx5_ib_warn(dev
, "failed detaching QPN 0x%x, MGID %pI6\n",
867 ibqp
->qp_num
, gid
->raw
);
872 static int init_node_data(struct mlx5_ib_dev
*dev
)
874 struct ib_smp
*in_mad
= NULL
;
875 struct ib_smp
*out_mad
= NULL
;
878 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
879 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
880 if (!in_mad
|| !out_mad
)
883 init_query_mad(in_mad
);
884 in_mad
->attr_id
= IB_SMP_ATTR_NODE_DESC
;
886 err
= mlx5_MAD_IFC(dev
, 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
890 memcpy(dev
->ib_dev
.node_desc
, out_mad
->data
, 64);
892 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
894 err
= mlx5_MAD_IFC(dev
, 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
898 dev
->mdev
.rev_id
= be32_to_cpup((__be32
*)(out_mad
->data
+ 32));
899 memcpy(&dev
->ib_dev
.node_guid
, out_mad
->data
+ 12, 8);
907 static ssize_t
show_fw_pages(struct device
*device
, struct device_attribute
*attr
,
910 struct mlx5_ib_dev
*dev
=
911 container_of(device
, struct mlx5_ib_dev
, ib_dev
.dev
);
913 return sprintf(buf
, "%d\n", dev
->mdev
.priv
.fw_pages
);
916 static ssize_t
show_reg_pages(struct device
*device
,
917 struct device_attribute
*attr
, char *buf
)
919 struct mlx5_ib_dev
*dev
=
920 container_of(device
, struct mlx5_ib_dev
, ib_dev
.dev
);
922 return sprintf(buf
, "%d\n", dev
->mdev
.priv
.reg_pages
);
925 static ssize_t
show_hca(struct device
*device
, struct device_attribute
*attr
,
928 struct mlx5_ib_dev
*dev
=
929 container_of(device
, struct mlx5_ib_dev
, ib_dev
.dev
);
930 return sprintf(buf
, "MT%d\n", dev
->mdev
.pdev
->device
);
933 static ssize_t
show_fw_ver(struct device
*device
, struct device_attribute
*attr
,
936 struct mlx5_ib_dev
*dev
=
937 container_of(device
, struct mlx5_ib_dev
, ib_dev
.dev
);
938 return sprintf(buf
, "%d.%d.%d\n", fw_rev_maj(&dev
->mdev
),
939 fw_rev_min(&dev
->mdev
), fw_rev_sub(&dev
->mdev
));
942 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
945 struct mlx5_ib_dev
*dev
=
946 container_of(device
, struct mlx5_ib_dev
, ib_dev
.dev
);
947 return sprintf(buf
, "%x\n", dev
->mdev
.rev_id
);
950 static ssize_t
show_board(struct device
*device
, struct device_attribute
*attr
,
953 struct mlx5_ib_dev
*dev
=
954 container_of(device
, struct mlx5_ib_dev
, ib_dev
.dev
);
955 return sprintf(buf
, "%.*s\n", MLX5_BOARD_ID_LEN
,
959 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
960 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
961 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
962 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
963 static DEVICE_ATTR(fw_pages
, S_IRUGO
, show_fw_pages
, NULL
);
964 static DEVICE_ATTR(reg_pages
, S_IRUGO
, show_reg_pages
, NULL
);
966 static struct device_attribute
*mlx5_class_attributes
[] = {
975 static void mlx5_ib_event(struct mlx5_core_dev
*dev
, enum mlx5_dev_event event
,
978 struct mlx5_ib_dev
*ibdev
= container_of(dev
, struct mlx5_ib_dev
, mdev
);
979 struct ib_event ibev
;
983 case MLX5_DEV_EVENT_SYS_ERROR
:
984 ibdev
->ib_active
= false;
985 ibev
.event
= IB_EVENT_DEVICE_FATAL
;
988 case MLX5_DEV_EVENT_PORT_UP
:
989 ibev
.event
= IB_EVENT_PORT_ACTIVE
;
993 case MLX5_DEV_EVENT_PORT_DOWN
:
994 ibev
.event
= IB_EVENT_PORT_ERR
;
998 case MLX5_DEV_EVENT_PORT_INITIALIZED
:
999 /* not used by ULPs */
1002 case MLX5_DEV_EVENT_LID_CHANGE
:
1003 ibev
.event
= IB_EVENT_LID_CHANGE
;
1007 case MLX5_DEV_EVENT_PKEY_CHANGE
:
1008 ibev
.event
= IB_EVENT_PKEY_CHANGE
;
1012 case MLX5_DEV_EVENT_GUID_CHANGE
:
1013 ibev
.event
= IB_EVENT_GID_CHANGE
;
1017 case MLX5_DEV_EVENT_CLIENT_REREG
:
1018 ibev
.event
= IB_EVENT_CLIENT_REREGISTER
;
1023 ibev
.device
= &ibdev
->ib_dev
;
1024 ibev
.element
.port_num
= port
;
1026 if (port
< 1 || port
> ibdev
->num_ports
) {
1027 mlx5_ib_warn(ibdev
, "warning: event on port %d\n", port
);
1031 if (ibdev
->ib_active
)
1032 ib_dispatch_event(&ibev
);
1035 static void get_ext_port_caps(struct mlx5_ib_dev
*dev
)
1039 for (port
= 1; port
<= dev
->mdev
.caps
.num_ports
; port
++)
1040 mlx5_query_ext_port_caps(dev
, port
);
1043 static int get_port_caps(struct mlx5_ib_dev
*dev
)
1045 struct ib_device_attr
*dprops
= NULL
;
1046 struct ib_port_attr
*pprops
= NULL
;
1050 pprops
= kmalloc(sizeof(*pprops
), GFP_KERNEL
);
1054 dprops
= kmalloc(sizeof(*dprops
), GFP_KERNEL
);
1058 err
= mlx5_ib_query_device(&dev
->ib_dev
, dprops
);
1060 mlx5_ib_warn(dev
, "query_device failed %d\n", err
);
1064 for (port
= 1; port
<= dev
->mdev
.caps
.num_ports
; port
++) {
1065 err
= mlx5_ib_query_port(&dev
->ib_dev
, port
, pprops
);
1067 mlx5_ib_warn(dev
, "query_port %d failed %d\n", port
, err
);
1070 dev
->mdev
.caps
.port
[port
- 1].pkey_table_len
= dprops
->max_pkeys
;
1071 dev
->mdev
.caps
.port
[port
- 1].gid_table_len
= pprops
->gid_tbl_len
;
1072 mlx5_ib_dbg(dev
, "pkey_table_len %d, gid_table_len %d\n",
1073 dprops
->max_pkeys
, pprops
->gid_tbl_len
);
1083 static void destroy_umrc_res(struct mlx5_ib_dev
*dev
)
1087 err
= mlx5_mr_cache_cleanup(dev
);
1089 mlx5_ib_warn(dev
, "mr cache cleanup failed\n");
1091 mlx5_ib_destroy_qp(dev
->umrc
.qp
);
1092 ib_destroy_cq(dev
->umrc
.cq
);
1093 ib_dereg_mr(dev
->umrc
.mr
);
1094 ib_dealloc_pd(dev
->umrc
.pd
);
1101 static int create_umr_res(struct mlx5_ib_dev
*dev
)
1103 struct ib_qp_init_attr
*init_attr
= NULL
;
1104 struct ib_qp_attr
*attr
= NULL
;
1111 attr
= kzalloc(sizeof(*attr
), GFP_KERNEL
);
1112 init_attr
= kzalloc(sizeof(*init_attr
), GFP_KERNEL
);
1113 if (!attr
|| !init_attr
) {
1118 pd
= ib_alloc_pd(&dev
->ib_dev
);
1120 mlx5_ib_dbg(dev
, "Couldn't create PD for sync UMR QP\n");
1125 mr
= ib_get_dma_mr(pd
, IB_ACCESS_LOCAL_WRITE
);
1127 mlx5_ib_dbg(dev
, "Couldn't create DMA MR for sync UMR QP\n");
1132 cq
= ib_create_cq(&dev
->ib_dev
, mlx5_umr_cq_handler
, NULL
, NULL
, 128,
1135 mlx5_ib_dbg(dev
, "Couldn't create CQ for sync UMR QP\n");
1139 ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
1141 init_attr
->send_cq
= cq
;
1142 init_attr
->recv_cq
= cq
;
1143 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
1144 init_attr
->cap
.max_send_wr
= MAX_UMR_WR
;
1145 init_attr
->cap
.max_send_sge
= 1;
1146 init_attr
->qp_type
= MLX5_IB_QPT_REG_UMR
;
1147 init_attr
->port_num
= 1;
1148 qp
= mlx5_ib_create_qp(pd
, init_attr
, NULL
);
1150 mlx5_ib_dbg(dev
, "Couldn't create sync UMR QP\n");
1154 qp
->device
= &dev
->ib_dev
;
1157 qp
->qp_type
= MLX5_IB_QPT_REG_UMR
;
1159 attr
->qp_state
= IB_QPS_INIT
;
1161 ret
= mlx5_ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_PKEY_INDEX
|
1164 mlx5_ib_dbg(dev
, "Couldn't modify UMR QP\n");
1168 memset(attr
, 0, sizeof(*attr
));
1169 attr
->qp_state
= IB_QPS_RTR
;
1170 attr
->path_mtu
= IB_MTU_256
;
1172 ret
= mlx5_ib_modify_qp(qp
, attr
, IB_QP_STATE
, NULL
);
1174 mlx5_ib_dbg(dev
, "Couldn't modify umr QP to rtr\n");
1178 memset(attr
, 0, sizeof(*attr
));
1179 attr
->qp_state
= IB_QPS_RTS
;
1180 ret
= mlx5_ib_modify_qp(qp
, attr
, IB_QP_STATE
, NULL
);
1182 mlx5_ib_dbg(dev
, "Couldn't modify umr QP to rts\n");
1191 sema_init(&dev
->umrc
.sem
, MAX_UMR_WR
);
1192 ret
= mlx5_mr_cache_init(dev
);
1194 mlx5_ib_warn(dev
, "mr cache init failed %d\n", ret
);
1204 mlx5_ib_destroy_qp(qp
);
1221 static int create_dev_resources(struct mlx5_ib_resources
*devr
)
1223 struct ib_srq_init_attr attr
;
1224 struct mlx5_ib_dev
*dev
;
1227 dev
= container_of(devr
, struct mlx5_ib_dev
, devr
);
1229 devr
->p0
= mlx5_ib_alloc_pd(&dev
->ib_dev
, NULL
, NULL
);
1230 if (IS_ERR(devr
->p0
)) {
1231 ret
= PTR_ERR(devr
->p0
);
1234 devr
->p0
->device
= &dev
->ib_dev
;
1235 devr
->p0
->uobject
= NULL
;
1236 atomic_set(&devr
->p0
->usecnt
, 0);
1238 devr
->c0
= mlx5_ib_create_cq(&dev
->ib_dev
, 1, 0, NULL
, NULL
);
1239 if (IS_ERR(devr
->c0
)) {
1240 ret
= PTR_ERR(devr
->c0
);
1243 devr
->c0
->device
= &dev
->ib_dev
;
1244 devr
->c0
->uobject
= NULL
;
1245 devr
->c0
->comp_handler
= NULL
;
1246 devr
->c0
->event_handler
= NULL
;
1247 devr
->c0
->cq_context
= NULL
;
1248 atomic_set(&devr
->c0
->usecnt
, 0);
1250 devr
->x0
= mlx5_ib_alloc_xrcd(&dev
->ib_dev
, NULL
, NULL
);
1251 if (IS_ERR(devr
->x0
)) {
1252 ret
= PTR_ERR(devr
->x0
);
1255 devr
->x0
->device
= &dev
->ib_dev
;
1256 devr
->x0
->inode
= NULL
;
1257 atomic_set(&devr
->x0
->usecnt
, 0);
1258 mutex_init(&devr
->x0
->tgt_qp_mutex
);
1259 INIT_LIST_HEAD(&devr
->x0
->tgt_qp_list
);
1261 devr
->x1
= mlx5_ib_alloc_xrcd(&dev
->ib_dev
, NULL
, NULL
);
1262 if (IS_ERR(devr
->x1
)) {
1263 ret
= PTR_ERR(devr
->x1
);
1266 devr
->x1
->device
= &dev
->ib_dev
;
1267 devr
->x1
->inode
= NULL
;
1268 atomic_set(&devr
->x1
->usecnt
, 0);
1269 mutex_init(&devr
->x1
->tgt_qp_mutex
);
1270 INIT_LIST_HEAD(&devr
->x1
->tgt_qp_list
);
1272 memset(&attr
, 0, sizeof(attr
));
1273 attr
.attr
.max_sge
= 1;
1274 attr
.attr
.max_wr
= 1;
1275 attr
.srq_type
= IB_SRQT_XRC
;
1276 attr
.ext
.xrc
.cq
= devr
->c0
;
1277 attr
.ext
.xrc
.xrcd
= devr
->x0
;
1279 devr
->s0
= mlx5_ib_create_srq(devr
->p0
, &attr
, NULL
);
1280 if (IS_ERR(devr
->s0
)) {
1281 ret
= PTR_ERR(devr
->s0
);
1284 devr
->s0
->device
= &dev
->ib_dev
;
1285 devr
->s0
->pd
= devr
->p0
;
1286 devr
->s0
->uobject
= NULL
;
1287 devr
->s0
->event_handler
= NULL
;
1288 devr
->s0
->srq_context
= NULL
;
1289 devr
->s0
->srq_type
= IB_SRQT_XRC
;
1290 devr
->s0
->ext
.xrc
.xrcd
= devr
->x0
;
1291 devr
->s0
->ext
.xrc
.cq
= devr
->c0
;
1292 atomic_inc(&devr
->s0
->ext
.xrc
.xrcd
->usecnt
);
1293 atomic_inc(&devr
->s0
->ext
.xrc
.cq
->usecnt
);
1294 atomic_inc(&devr
->p0
->usecnt
);
1295 atomic_set(&devr
->s0
->usecnt
, 0);
1300 mlx5_ib_dealloc_xrcd(devr
->x1
);
1302 mlx5_ib_dealloc_xrcd(devr
->x0
);
1304 mlx5_ib_destroy_cq(devr
->c0
);
1306 mlx5_ib_dealloc_pd(devr
->p0
);
1311 static void destroy_dev_resources(struct mlx5_ib_resources
*devr
)
1313 mlx5_ib_destroy_srq(devr
->s0
);
1314 mlx5_ib_dealloc_xrcd(devr
->x0
);
1315 mlx5_ib_dealloc_xrcd(devr
->x1
);
1316 mlx5_ib_destroy_cq(devr
->c0
);
1317 mlx5_ib_dealloc_pd(devr
->p0
);
1320 static int init_one(struct pci_dev
*pdev
,
1321 const struct pci_device_id
*id
)
1323 struct mlx5_core_dev
*mdev
;
1324 struct mlx5_ib_dev
*dev
;
1328 printk_once(KERN_INFO
"%s", mlx5_version
);
1330 dev
= (struct mlx5_ib_dev
*)ib_alloc_device(sizeof(*dev
));
1335 mdev
->event
= mlx5_ib_event
;
1336 if (prof_sel
>= ARRAY_SIZE(profile
)) {
1337 pr_warn("selected pofile out of range, selceting default\n");
1340 mdev
->profile
= &profile
[prof_sel
];
1341 err
= mlx5_dev_init(mdev
, pdev
);
1345 err
= get_port_caps(dev
);
1349 get_ext_port_caps(dev
);
1351 err
= alloc_comp_eqs(dev
);
1355 MLX5_INIT_DOORBELL_LOCK(&dev
->uar_lock
);
1357 strlcpy(dev
->ib_dev
.name
, "mlx5_%d", IB_DEVICE_NAME_MAX
);
1358 dev
->ib_dev
.owner
= THIS_MODULE
;
1359 dev
->ib_dev
.node_type
= RDMA_NODE_IB_CA
;
1360 dev
->ib_dev
.local_dma_lkey
= mdev
->caps
.reserved_lkey
;
1361 dev
->num_ports
= mdev
->caps
.num_ports
;
1362 dev
->ib_dev
.phys_port_cnt
= dev
->num_ports
;
1363 dev
->ib_dev
.num_comp_vectors
= dev
->num_comp_vectors
;
1364 dev
->ib_dev
.dma_device
= &mdev
->pdev
->dev
;
1366 dev
->ib_dev
.uverbs_abi_ver
= MLX5_IB_UVERBS_ABI_VERSION
;
1367 dev
->ib_dev
.uverbs_cmd_mask
=
1368 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
1369 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
1370 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
1371 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
1372 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
1373 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
1374 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
1375 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
1376 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
1377 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
1378 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
1379 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
1380 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
1381 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
1382 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
1383 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
1384 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
) |
1385 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
1386 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
1387 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
1388 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
1389 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ
) |
1390 (1ull << IB_USER_VERBS_CMD_OPEN_QP
);
1392 dev
->ib_dev
.query_device
= mlx5_ib_query_device
;
1393 dev
->ib_dev
.query_port
= mlx5_ib_query_port
;
1394 dev
->ib_dev
.query_gid
= mlx5_ib_query_gid
;
1395 dev
->ib_dev
.query_pkey
= mlx5_ib_query_pkey
;
1396 dev
->ib_dev
.modify_device
= mlx5_ib_modify_device
;
1397 dev
->ib_dev
.modify_port
= mlx5_ib_modify_port
;
1398 dev
->ib_dev
.alloc_ucontext
= mlx5_ib_alloc_ucontext
;
1399 dev
->ib_dev
.dealloc_ucontext
= mlx5_ib_dealloc_ucontext
;
1400 dev
->ib_dev
.mmap
= mlx5_ib_mmap
;
1401 dev
->ib_dev
.alloc_pd
= mlx5_ib_alloc_pd
;
1402 dev
->ib_dev
.dealloc_pd
= mlx5_ib_dealloc_pd
;
1403 dev
->ib_dev
.create_ah
= mlx5_ib_create_ah
;
1404 dev
->ib_dev
.query_ah
= mlx5_ib_query_ah
;
1405 dev
->ib_dev
.destroy_ah
= mlx5_ib_destroy_ah
;
1406 dev
->ib_dev
.create_srq
= mlx5_ib_create_srq
;
1407 dev
->ib_dev
.modify_srq
= mlx5_ib_modify_srq
;
1408 dev
->ib_dev
.query_srq
= mlx5_ib_query_srq
;
1409 dev
->ib_dev
.destroy_srq
= mlx5_ib_destroy_srq
;
1410 dev
->ib_dev
.post_srq_recv
= mlx5_ib_post_srq_recv
;
1411 dev
->ib_dev
.create_qp
= mlx5_ib_create_qp
;
1412 dev
->ib_dev
.modify_qp
= mlx5_ib_modify_qp
;
1413 dev
->ib_dev
.query_qp
= mlx5_ib_query_qp
;
1414 dev
->ib_dev
.destroy_qp
= mlx5_ib_destroy_qp
;
1415 dev
->ib_dev
.post_send
= mlx5_ib_post_send
;
1416 dev
->ib_dev
.post_recv
= mlx5_ib_post_recv
;
1417 dev
->ib_dev
.create_cq
= mlx5_ib_create_cq
;
1418 dev
->ib_dev
.modify_cq
= mlx5_ib_modify_cq
;
1419 dev
->ib_dev
.resize_cq
= mlx5_ib_resize_cq
;
1420 dev
->ib_dev
.destroy_cq
= mlx5_ib_destroy_cq
;
1421 dev
->ib_dev
.poll_cq
= mlx5_ib_poll_cq
;
1422 dev
->ib_dev
.req_notify_cq
= mlx5_ib_arm_cq
;
1423 dev
->ib_dev
.get_dma_mr
= mlx5_ib_get_dma_mr
;
1424 dev
->ib_dev
.reg_user_mr
= mlx5_ib_reg_user_mr
;
1425 dev
->ib_dev
.dereg_mr
= mlx5_ib_dereg_mr
;
1426 dev
->ib_dev
.attach_mcast
= mlx5_ib_mcg_attach
;
1427 dev
->ib_dev
.detach_mcast
= mlx5_ib_mcg_detach
;
1428 dev
->ib_dev
.process_mad
= mlx5_ib_process_mad
;
1429 dev
->ib_dev
.alloc_fast_reg_mr
= mlx5_ib_alloc_fast_reg_mr
;
1430 dev
->ib_dev
.alloc_fast_reg_page_list
= mlx5_ib_alloc_fast_reg_page_list
;
1431 dev
->ib_dev
.free_fast_reg_page_list
= mlx5_ib_free_fast_reg_page_list
;
1433 if (mdev
->caps
.flags
& MLX5_DEV_CAP_FLAG_XRC
) {
1434 dev
->ib_dev
.alloc_xrcd
= mlx5_ib_alloc_xrcd
;
1435 dev
->ib_dev
.dealloc_xrcd
= mlx5_ib_dealloc_xrcd
;
1436 dev
->ib_dev
.uverbs_cmd_mask
|=
1437 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD
) |
1438 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD
);
1441 err
= init_node_data(dev
);
1445 mutex_init(&dev
->cap_mask_mutex
);
1446 spin_lock_init(&dev
->mr_lock
);
1448 err
= create_dev_resources(&dev
->devr
);
1452 err
= ib_register_device(&dev
->ib_dev
, NULL
);
1456 err
= create_umr_res(dev
);
1460 for (i
= 0; i
< ARRAY_SIZE(mlx5_class_attributes
); i
++) {
1461 err
= device_create_file(&dev
->ib_dev
.dev
,
1462 mlx5_class_attributes
[i
]);
1467 dev
->ib_active
= true;
1472 destroy_umrc_res(dev
);
1475 ib_unregister_device(&dev
->ib_dev
);
1478 destroy_dev_resources(&dev
->devr
);
1484 mlx5_dev_cleanup(mdev
);
1487 ib_dealloc_device((struct ib_device
*)dev
);
1492 static void remove_one(struct pci_dev
*pdev
)
1494 struct mlx5_ib_dev
*dev
= mlx5_pci2ibdev(pdev
);
1496 destroy_umrc_res(dev
);
1497 ib_unregister_device(&dev
->ib_dev
);
1498 destroy_dev_resources(&dev
->devr
);
1500 mlx5_dev_cleanup(&dev
->mdev
);
1501 ib_dealloc_device(&dev
->ib_dev
);
1504 static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table
) = {
1505 { PCI_VDEVICE(MELLANOX
, 4113) }, /* MT4113 Connect-IB */
1509 MODULE_DEVICE_TABLE(pci
, mlx5_ib_pci_table
);
1511 static struct pci_driver mlx5_ib_driver
= {
1512 .name
= DRIVER_NAME
,
1513 .id_table
= mlx5_ib_pci_table
,
1515 .remove
= remove_one
1518 static int __init
mlx5_ib_init(void)
1520 return pci_register_driver(&mlx5_ib_driver
);
1523 static void __exit
mlx5_ib_cleanup(void)
1525 pci_unregister_driver(&mlx5_ib_driver
);
1528 module_init(mlx5_ib_init
);
1529 module_exit(mlx5_ib_cleanup
);