2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/sched.h>
42 #include <rdma/ib_user_verbs.h>
43 #include <rdma/ib_smi.h>
44 #include <rdma/ib_umem.h>
48 #define DRIVER_NAME "mlx5_ib"
49 #define DRIVER_VERSION "1.0"
50 #define DRIVER_RELDATE "June 2013"
52 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
53 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
54 MODULE_LICENSE("Dual BSD/GPL");
55 MODULE_VERSION(DRIVER_VERSION
);
57 static int prof_sel
= 2;
58 module_param_named(prof_sel
, prof_sel
, int, 0444);
59 MODULE_PARM_DESC(prof_sel
, "profile selector. Valid range 0 - 2");
61 static char mlx5_version
[] =
62 DRIVER_NAME
": Mellanox Connect-IB Infiniband driver v"
63 DRIVER_VERSION
" (" DRIVER_RELDATE
")\n";
65 static struct mlx5_profile profile
[] = {
70 .mask
= MLX5_PROF_MASK_QP_SIZE
,
74 .mask
= MLX5_PROF_MASK_QP_SIZE
|
75 MLX5_PROF_MASK_MR_CACHE
,
144 int mlx5_vector2eqn(struct mlx5_ib_dev
*dev
, int vector
, int *eqn
, int *irqn
)
146 struct mlx5_eq_table
*table
= &dev
->mdev
.priv
.eq_table
;
147 struct mlx5_eq
*eq
, *n
;
150 spin_lock(&table
->lock
);
151 list_for_each_entry_safe(eq
, n
, &dev
->eqs_list
, list
) {
152 if (eq
->index
== vector
) {
159 spin_unlock(&table
->lock
);
164 static int alloc_comp_eqs(struct mlx5_ib_dev
*dev
)
166 struct mlx5_eq_table
*table
= &dev
->mdev
.priv
.eq_table
;
167 struct mlx5_eq
*eq
, *n
;
173 INIT_LIST_HEAD(&dev
->eqs_list
);
174 ncomp_vec
= table
->num_comp_vectors
;
175 nent
= MLX5_COMP_EQ_SIZE
;
176 for (i
= 0; i
< ncomp_vec
; i
++) {
177 eq
= kzalloc(sizeof(*eq
), GFP_KERNEL
);
183 snprintf(eq
->name
, MLX5_MAX_EQ_NAME
, "mlx5_comp%d", i
);
184 err
= mlx5_create_map_eq(&dev
->mdev
, eq
,
185 i
+ MLX5_EQ_VEC_COMP_BASE
, nent
, 0,
187 &dev
->mdev
.priv
.uuari
.uars
[0]);
192 mlx5_ib_dbg(dev
, "allocated completion EQN %d\n", eq
->eqn
);
194 spin_lock(&table
->lock
);
195 list_add_tail(&eq
->list
, &dev
->eqs_list
);
196 spin_unlock(&table
->lock
);
199 dev
->num_comp_vectors
= ncomp_vec
;
203 spin_lock(&table
->lock
);
204 list_for_each_entry_safe(eq
, n
, &dev
->eqs_list
, list
) {
206 spin_unlock(&table
->lock
);
207 if (mlx5_destroy_unmap_eq(&dev
->mdev
, eq
))
208 mlx5_ib_warn(dev
, "failed to destroy EQ 0x%x\n", eq
->eqn
);
210 spin_lock(&table
->lock
);
212 spin_unlock(&table
->lock
);
216 static void free_comp_eqs(struct mlx5_ib_dev
*dev
)
218 struct mlx5_eq_table
*table
= &dev
->mdev
.priv
.eq_table
;
219 struct mlx5_eq
*eq
, *n
;
221 spin_lock(&table
->lock
);
222 list_for_each_entry_safe(eq
, n
, &dev
->eqs_list
, list
) {
224 spin_unlock(&table
->lock
);
225 if (mlx5_destroy_unmap_eq(&dev
->mdev
, eq
))
226 mlx5_ib_warn(dev
, "failed to destroy EQ 0x%x\n", eq
->eqn
);
228 spin_lock(&table
->lock
);
230 spin_unlock(&table
->lock
);
233 static int mlx5_ib_query_device(struct ib_device
*ibdev
,
234 struct ib_device_attr
*props
)
236 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
237 struct ib_smp
*in_mad
= NULL
;
238 struct ib_smp
*out_mad
= NULL
;
244 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
245 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
246 if (!in_mad
|| !out_mad
)
249 init_query_mad(in_mad
);
250 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
252 err
= mlx5_MAD_IFC(to_mdev(ibdev
), 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
256 memset(props
, 0, sizeof(*props
));
258 props
->fw_ver
= ((u64
)fw_rev_maj(&dev
->mdev
) << 32) |
259 (fw_rev_min(&dev
->mdev
) << 16) |
260 fw_rev_sub(&dev
->mdev
);
261 props
->device_cap_flags
= IB_DEVICE_CHANGE_PHY_PORT
|
262 IB_DEVICE_PORT_ACTIVE_EVENT
|
263 IB_DEVICE_SYS_IMAGE_GUID
|
264 IB_DEVICE_RC_RNR_NAK_GEN
|
265 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK
;
266 flags
= dev
->mdev
.caps
.flags
;
267 if (flags
& MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR
)
268 props
->device_cap_flags
|= IB_DEVICE_BAD_PKEY_CNTR
;
269 if (flags
& MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR
)
270 props
->device_cap_flags
|= IB_DEVICE_BAD_QKEY_CNTR
;
271 if (flags
& MLX5_DEV_CAP_FLAG_APM
)
272 props
->device_cap_flags
|= IB_DEVICE_AUTO_PATH_MIG
;
273 props
->device_cap_flags
|= IB_DEVICE_LOCAL_DMA_LKEY
;
274 if (flags
& MLX5_DEV_CAP_FLAG_XRC
)
275 props
->device_cap_flags
|= IB_DEVICE_XRC
;
276 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
278 props
->vendor_id
= be32_to_cpup((__be32
*)(out_mad
->data
+ 36)) &
280 props
->vendor_part_id
= be16_to_cpup((__be16
*)(out_mad
->data
+ 30));
281 props
->hw_ver
= be32_to_cpup((__be32
*)(out_mad
->data
+ 32));
282 memcpy(&props
->sys_image_guid
, out_mad
->data
+ 4, 8);
284 props
->max_mr_size
= ~0ull;
285 props
->page_size_cap
= dev
->mdev
.caps
.min_page_sz
;
286 props
->max_qp
= 1 << dev
->mdev
.caps
.log_max_qp
;
287 props
->max_qp_wr
= dev
->mdev
.caps
.max_wqes
;
288 max_rq_sg
= dev
->mdev
.caps
.max_rq_desc_sz
/ sizeof(struct mlx5_wqe_data_seg
);
289 max_sq_sg
= (dev
->mdev
.caps
.max_sq_desc_sz
- sizeof(struct mlx5_wqe_ctrl_seg
)) /
290 sizeof(struct mlx5_wqe_data_seg
);
291 props
->max_sge
= min(max_rq_sg
, max_sq_sg
);
292 props
->max_cq
= 1 << dev
->mdev
.caps
.log_max_cq
;
293 props
->max_cqe
= dev
->mdev
.caps
.max_cqes
- 1;
294 props
->max_mr
= 1 << dev
->mdev
.caps
.log_max_mkey
;
295 props
->max_pd
= 1 << dev
->mdev
.caps
.log_max_pd
;
296 props
->max_qp_rd_atom
= dev
->mdev
.caps
.max_ra_req_qp
;
297 props
->max_qp_init_rd_atom
= dev
->mdev
.caps
.max_ra_res_qp
;
298 props
->max_res_rd_atom
= props
->max_qp_rd_atom
* props
->max_qp
;
299 props
->max_srq
= 1 << dev
->mdev
.caps
.log_max_srq
;
300 props
->max_srq_wr
= dev
->mdev
.caps
.max_srq_wqes
- 1;
301 props
->max_srq_sge
= max_rq_sg
- 1;
302 props
->max_fast_reg_page_list_len
= (unsigned int)-1;
303 props
->local_ca_ack_delay
= dev
->mdev
.caps
.local_ca_ack_delay
;
304 props
->atomic_cap
= dev
->mdev
.caps
.flags
& MLX5_DEV_CAP_FLAG_ATOMIC
?
305 IB_ATOMIC_HCA
: IB_ATOMIC_NONE
;
306 props
->masked_atomic_cap
= IB_ATOMIC_HCA
;
307 props
->max_pkeys
= be16_to_cpup((__be16
*)(out_mad
->data
+ 28));
308 props
->max_mcast_grp
= 1 << dev
->mdev
.caps
.log_max_mcg
;
309 props
->max_mcast_qp_attach
= dev
->mdev
.caps
.max_qp_mcg
;
310 props
->max_total_mcast_qp_attach
= props
->max_mcast_qp_attach
*
311 props
->max_mcast_grp
;
312 props
->max_map_per_fmr
= INT_MAX
; /* no limit in ConnectIB */
321 int mlx5_ib_query_port(struct ib_device
*ibdev
, u8 port
,
322 struct ib_port_attr
*props
)
324 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
325 struct ib_smp
*in_mad
= NULL
;
326 struct ib_smp
*out_mad
= NULL
;
327 int ext_active_speed
;
330 if (port
< 1 || port
> dev
->mdev
.caps
.num_ports
) {
331 mlx5_ib_warn(dev
, "invalid port number %d\n", port
);
335 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
336 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
337 if (!in_mad
|| !out_mad
)
340 memset(props
, 0, sizeof(*props
));
342 init_query_mad(in_mad
);
343 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
344 in_mad
->attr_mod
= cpu_to_be32(port
);
346 err
= mlx5_MAD_IFC(dev
, 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
348 mlx5_ib_warn(dev
, "err %d\n", err
);
353 props
->lid
= be16_to_cpup((__be16
*)(out_mad
->data
+ 16));
354 props
->lmc
= out_mad
->data
[34] & 0x7;
355 props
->sm_lid
= be16_to_cpup((__be16
*)(out_mad
->data
+ 18));
356 props
->sm_sl
= out_mad
->data
[36] & 0xf;
357 props
->state
= out_mad
->data
[32] & 0xf;
358 props
->phys_state
= out_mad
->data
[33] >> 4;
359 props
->port_cap_flags
= be32_to_cpup((__be32
*)(out_mad
->data
+ 20));
360 props
->gid_tbl_len
= out_mad
->data
[50];
361 props
->max_msg_sz
= 1 << to_mdev(ibdev
)->mdev
.caps
.log_max_msg
;
362 props
->pkey_tbl_len
= to_mdev(ibdev
)->mdev
.caps
.port
[port
- 1].pkey_table_len
;
363 props
->bad_pkey_cntr
= be16_to_cpup((__be16
*)(out_mad
->data
+ 46));
364 props
->qkey_viol_cntr
= be16_to_cpup((__be16
*)(out_mad
->data
+ 48));
365 props
->active_width
= out_mad
->data
[31] & 0xf;
366 props
->active_speed
= out_mad
->data
[35] >> 4;
367 props
->max_mtu
= out_mad
->data
[41] & 0xf;
368 props
->active_mtu
= out_mad
->data
[36] >> 4;
369 props
->subnet_timeout
= out_mad
->data
[51] & 0x1f;
370 props
->max_vl_num
= out_mad
->data
[37] >> 4;
371 props
->init_type_reply
= out_mad
->data
[41] >> 4;
373 /* Check if extended speeds (EDR/FDR/...) are supported */
374 if (props
->port_cap_flags
& IB_PORT_EXTENDED_SPEEDS_SUP
) {
375 ext_active_speed
= out_mad
->data
[62] >> 4;
377 switch (ext_active_speed
) {
379 props
->active_speed
= 16; /* FDR */
382 props
->active_speed
= 32; /* EDR */
387 /* If reported active speed is QDR, check if is FDR-10 */
388 if (props
->active_speed
== 4) {
389 if (dev
->mdev
.caps
.ext_port_cap
[port
- 1] &
390 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO
) {
391 init_query_mad(in_mad
);
392 in_mad
->attr_id
= MLX5_ATTR_EXTENDED_PORT_INFO
;
393 in_mad
->attr_mod
= cpu_to_be32(port
);
395 err
= mlx5_MAD_IFC(dev
, 1, 1, port
,
396 NULL
, NULL
, in_mad
, out_mad
);
400 /* Checking LinkSpeedActive for FDR-10 */
401 if (out_mad
->data
[15] & 0x1)
402 props
->active_speed
= 8;
413 static int mlx5_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
416 struct ib_smp
*in_mad
= NULL
;
417 struct ib_smp
*out_mad
= NULL
;
420 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
421 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
422 if (!in_mad
|| !out_mad
)
425 init_query_mad(in_mad
);
426 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
427 in_mad
->attr_mod
= cpu_to_be32(port
);
429 err
= mlx5_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
433 memcpy(gid
->raw
, out_mad
->data
+ 8, 8);
435 init_query_mad(in_mad
);
436 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
437 in_mad
->attr_mod
= cpu_to_be32(index
/ 8);
439 err
= mlx5_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
443 memcpy(gid
->raw
+ 8, out_mad
->data
+ (index
% 8) * 8, 8);
451 static int mlx5_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
454 struct ib_smp
*in_mad
= NULL
;
455 struct ib_smp
*out_mad
= NULL
;
458 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
459 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
460 if (!in_mad
|| !out_mad
)
463 init_query_mad(in_mad
);
464 in_mad
->attr_id
= IB_SMP_ATTR_PKEY_TABLE
;
465 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
467 err
= mlx5_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
471 *pkey
= be16_to_cpu(((__be16
*)out_mad
->data
)[index
% 32]);
479 struct mlx5_reg_node_desc
{
483 static int mlx5_ib_modify_device(struct ib_device
*ibdev
, int mask
,
484 struct ib_device_modify
*props
)
486 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
487 struct mlx5_reg_node_desc in
;
488 struct mlx5_reg_node_desc out
;
491 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
494 if (!(mask
& IB_DEVICE_MODIFY_NODE_DESC
))
498 * If possible, pass node desc to FW, so it can generate
499 * a 144 trap. If cmd fails, just ignore.
501 memcpy(&in
, props
->node_desc
, 64);
502 err
= mlx5_core_access_reg(&dev
->mdev
, &in
, sizeof(in
), &out
,
503 sizeof(out
), MLX5_REG_NODE_DESC
, 0, 1);
507 memcpy(ibdev
->node_desc
, props
->node_desc
, 64);
512 static int mlx5_ib_modify_port(struct ib_device
*ibdev
, u8 port
, int mask
,
513 struct ib_port_modify
*props
)
515 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
516 struct ib_port_attr attr
;
520 mutex_lock(&dev
->cap_mask_mutex
);
522 err
= mlx5_ib_query_port(ibdev
, port
, &attr
);
526 tmp
= (attr
.port_cap_flags
| props
->set_port_cap_mask
) &
527 ~props
->clr_port_cap_mask
;
529 err
= mlx5_set_port_caps(&dev
->mdev
, port
, tmp
);
532 mutex_unlock(&dev
->cap_mask_mutex
);
536 static struct ib_ucontext
*mlx5_ib_alloc_ucontext(struct ib_device
*ibdev
,
537 struct ib_udata
*udata
)
539 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
540 struct mlx5_ib_alloc_ucontext_req req
;
541 struct mlx5_ib_alloc_ucontext_resp resp
;
542 struct mlx5_ib_ucontext
*context
;
543 struct mlx5_uuar_info
*uuari
;
544 struct mlx5_uar
*uars
;
551 return ERR_PTR(-EAGAIN
);
553 err
= ib_copy_from_udata(&req
, udata
, sizeof(req
));
557 if (req
.total_num_uuars
> MLX5_MAX_UUARS
)
558 return ERR_PTR(-ENOMEM
);
560 if (req
.total_num_uuars
== 0)
561 return ERR_PTR(-EINVAL
);
563 req
.total_num_uuars
= ALIGN(req
.total_num_uuars
, MLX5_BF_REGS_PER_PAGE
);
564 if (req
.num_low_latency_uuars
> req
.total_num_uuars
- 1)
565 return ERR_PTR(-EINVAL
);
567 num_uars
= req
.total_num_uuars
/ MLX5_BF_REGS_PER_PAGE
;
568 resp
.qp_tab_size
= 1 << dev
->mdev
.caps
.log_max_qp
;
569 resp
.bf_reg_size
= dev
->mdev
.caps
.bf_reg_size
;
570 resp
.cache_line_size
= L1_CACHE_BYTES
;
571 resp
.max_sq_desc_sz
= dev
->mdev
.caps
.max_sq_desc_sz
;
572 resp
.max_rq_desc_sz
= dev
->mdev
.caps
.max_rq_desc_sz
;
573 resp
.max_send_wqebb
= dev
->mdev
.caps
.max_wqes
;
574 resp
.max_recv_wr
= dev
->mdev
.caps
.max_wqes
;
575 resp
.max_srq_recv_wr
= dev
->mdev
.caps
.max_srq_wqes
;
577 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
579 return ERR_PTR(-ENOMEM
);
581 uuari
= &context
->uuari
;
582 mutex_init(&uuari
->lock
);
583 uars
= kcalloc(num_uars
, sizeof(*uars
), GFP_KERNEL
);
589 uuari
->bitmap
= kcalloc(BITS_TO_LONGS(req
.total_num_uuars
),
590 sizeof(*uuari
->bitmap
),
592 if (!uuari
->bitmap
) {
597 * clear all fast path uuars
599 for (i
= 0; i
< req
.total_num_uuars
; i
++) {
601 if (uuarn
== 2 || uuarn
== 3)
602 set_bit(i
, uuari
->bitmap
);
605 uuari
->count
= kcalloc(req
.total_num_uuars
, sizeof(*uuari
->count
), GFP_KERNEL
);
611 for (i
= 0; i
< num_uars
; i
++) {
612 err
= mlx5_cmd_alloc_uar(&dev
->mdev
, &uars
[i
].index
);
617 INIT_LIST_HEAD(&context
->db_page_list
);
618 mutex_init(&context
->db_page_mutex
);
620 resp
.tot_uuars
= req
.total_num_uuars
;
621 resp
.num_ports
= dev
->mdev
.caps
.num_ports
;
622 err
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
626 uuari
->num_low_latency_uuars
= req
.num_low_latency_uuars
;
628 uuari
->num_uars
= num_uars
;
629 return &context
->ibucontext
;
632 for (i
--; i
>= 0; i
--)
633 mlx5_cmd_free_uar(&dev
->mdev
, uars
[i
].index
);
638 kfree(uuari
->bitmap
);
648 static int mlx5_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
650 struct mlx5_ib_ucontext
*context
= to_mucontext(ibcontext
);
651 struct mlx5_ib_dev
*dev
= to_mdev(ibcontext
->device
);
652 struct mlx5_uuar_info
*uuari
= &context
->uuari
;
655 for (i
= 0; i
< uuari
->num_uars
; i
++) {
656 if (mlx5_cmd_free_uar(&dev
->mdev
, uuari
->uars
[i
].index
))
657 mlx5_ib_warn(dev
, "failed to free UAR 0x%x\n", uuari
->uars
[i
].index
);
661 kfree(uuari
->bitmap
);
668 static phys_addr_t
uar_index2pfn(struct mlx5_ib_dev
*dev
, int index
)
670 return (pci_resource_start(dev
->mdev
.pdev
, 0) >> PAGE_SHIFT
) + index
;
673 static int get_command(unsigned long offset
)
675 return (offset
>> MLX5_IB_MMAP_CMD_SHIFT
) & MLX5_IB_MMAP_CMD_MASK
;
678 static int get_arg(unsigned long offset
)
680 return offset
& ((1 << MLX5_IB_MMAP_CMD_SHIFT
) - 1);
683 static int get_index(unsigned long offset
)
685 return get_arg(offset
);
688 static int mlx5_ib_mmap(struct ib_ucontext
*ibcontext
, struct vm_area_struct
*vma
)
690 struct mlx5_ib_ucontext
*context
= to_mucontext(ibcontext
);
691 struct mlx5_ib_dev
*dev
= to_mdev(ibcontext
->device
);
692 struct mlx5_uuar_info
*uuari
= &context
->uuari
;
693 unsigned long command
;
697 command
= get_command(vma
->vm_pgoff
);
699 case MLX5_IB_MMAP_REGULAR_PAGE
:
700 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
703 idx
= get_index(vma
->vm_pgoff
);
704 pfn
= uar_index2pfn(dev
, uuari
->uars
[idx
].index
);
705 mlx5_ib_dbg(dev
, "uar idx 0x%lx, pfn 0x%llx\n", idx
,
706 (unsigned long long)pfn
);
708 if (idx
>= uuari
->num_uars
)
711 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
712 if (io_remap_pfn_range(vma
, vma
->vm_start
, pfn
,
713 PAGE_SIZE
, vma
->vm_page_prot
))
716 mlx5_ib_dbg(dev
, "mapped WC at 0x%lx, PA 0x%llx\n",
718 (unsigned long long)pfn
<< PAGE_SHIFT
);
721 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES
:
731 static int alloc_pa_mkey(struct mlx5_ib_dev
*dev
, u32
*key
, u32 pdn
)
733 struct mlx5_create_mkey_mbox_in
*in
;
734 struct mlx5_mkey_seg
*seg
;
735 struct mlx5_core_mr mr
;
738 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
743 seg
->flags
= MLX5_PERM_LOCAL_READ
| MLX5_ACCESS_MODE_PA
;
744 seg
->flags_pd
= cpu_to_be32(pdn
| MLX5_MKEY_LEN64
);
745 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff << 8);
748 err
= mlx5_core_create_mkey(&dev
->mdev
, &mr
, in
, sizeof(*in
));
750 mlx5_ib_warn(dev
, "failed to create mkey, %d\n", err
);
765 static void free_pa_mkey(struct mlx5_ib_dev
*dev
, u32 key
)
767 struct mlx5_core_mr mr
;
770 memset(&mr
, 0, sizeof(mr
));
772 err
= mlx5_core_destroy_mkey(&dev
->mdev
, &mr
);
774 mlx5_ib_warn(dev
, "failed to destroy mkey 0x%x\n", key
);
777 static struct ib_pd
*mlx5_ib_alloc_pd(struct ib_device
*ibdev
,
778 struct ib_ucontext
*context
,
779 struct ib_udata
*udata
)
781 struct mlx5_ib_alloc_pd_resp resp
;
782 struct mlx5_ib_pd
*pd
;
785 pd
= kmalloc(sizeof(*pd
), GFP_KERNEL
);
787 return ERR_PTR(-ENOMEM
);
789 err
= mlx5_core_alloc_pd(&to_mdev(ibdev
)->mdev
, &pd
->pdn
);
797 if (ib_copy_to_udata(udata
, &resp
, sizeof(resp
))) {
798 mlx5_core_dealloc_pd(&to_mdev(ibdev
)->mdev
, pd
->pdn
);
800 return ERR_PTR(-EFAULT
);
803 err
= alloc_pa_mkey(to_mdev(ibdev
), &pd
->pa_lkey
, pd
->pdn
);
805 mlx5_core_dealloc_pd(&to_mdev(ibdev
)->mdev
, pd
->pdn
);
814 static int mlx5_ib_dealloc_pd(struct ib_pd
*pd
)
816 struct mlx5_ib_dev
*mdev
= to_mdev(pd
->device
);
817 struct mlx5_ib_pd
*mpd
= to_mpd(pd
);
820 free_pa_mkey(mdev
, mpd
->pa_lkey
);
822 mlx5_core_dealloc_pd(&mdev
->mdev
, mpd
->pdn
);
828 static int mlx5_ib_mcg_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
830 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
833 err
= mlx5_core_attach_mcg(&dev
->mdev
, gid
, ibqp
->qp_num
);
835 mlx5_ib_warn(dev
, "failed attaching QPN 0x%x, MGID %pI6\n",
836 ibqp
->qp_num
, gid
->raw
);
841 static int mlx5_ib_mcg_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
843 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
846 err
= mlx5_core_detach_mcg(&dev
->mdev
, gid
, ibqp
->qp_num
);
848 mlx5_ib_warn(dev
, "failed detaching QPN 0x%x, MGID %pI6\n",
849 ibqp
->qp_num
, gid
->raw
);
854 static int init_node_data(struct mlx5_ib_dev
*dev
)
856 struct ib_smp
*in_mad
= NULL
;
857 struct ib_smp
*out_mad
= NULL
;
860 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
861 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
862 if (!in_mad
|| !out_mad
)
865 init_query_mad(in_mad
);
866 in_mad
->attr_id
= IB_SMP_ATTR_NODE_DESC
;
868 err
= mlx5_MAD_IFC(dev
, 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
872 memcpy(dev
->ib_dev
.node_desc
, out_mad
->data
, 64);
874 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
876 err
= mlx5_MAD_IFC(dev
, 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
880 dev
->mdev
.rev_id
= be32_to_cpup((__be32
*)(out_mad
->data
+ 32));
881 memcpy(&dev
->ib_dev
.node_guid
, out_mad
->data
+ 12, 8);
889 static ssize_t
show_fw_pages(struct device
*device
, struct device_attribute
*attr
,
892 struct mlx5_ib_dev
*dev
=
893 container_of(device
, struct mlx5_ib_dev
, ib_dev
.dev
);
895 return sprintf(buf
, "%d\n", dev
->mdev
.priv
.fw_pages
);
898 static ssize_t
show_reg_pages(struct device
*device
,
899 struct device_attribute
*attr
, char *buf
)
901 struct mlx5_ib_dev
*dev
=
902 container_of(device
, struct mlx5_ib_dev
, ib_dev
.dev
);
904 return sprintf(buf
, "%d\n", dev
->mdev
.priv
.reg_pages
);
907 static ssize_t
show_hca(struct device
*device
, struct device_attribute
*attr
,
910 struct mlx5_ib_dev
*dev
=
911 container_of(device
, struct mlx5_ib_dev
, ib_dev
.dev
);
912 return sprintf(buf
, "MT%d\n", dev
->mdev
.pdev
->device
);
915 static ssize_t
show_fw_ver(struct device
*device
, struct device_attribute
*attr
,
918 struct mlx5_ib_dev
*dev
=
919 container_of(device
, struct mlx5_ib_dev
, ib_dev
.dev
);
920 return sprintf(buf
, "%d.%d.%d\n", fw_rev_maj(&dev
->mdev
),
921 fw_rev_min(&dev
->mdev
), fw_rev_sub(&dev
->mdev
));
924 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
927 struct mlx5_ib_dev
*dev
=
928 container_of(device
, struct mlx5_ib_dev
, ib_dev
.dev
);
929 return sprintf(buf
, "%x\n", dev
->mdev
.rev_id
);
932 static ssize_t
show_board(struct device
*device
, struct device_attribute
*attr
,
935 struct mlx5_ib_dev
*dev
=
936 container_of(device
, struct mlx5_ib_dev
, ib_dev
.dev
);
937 return sprintf(buf
, "%.*s\n", MLX5_BOARD_ID_LEN
,
941 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
942 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
943 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
944 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
945 static DEVICE_ATTR(fw_pages
, S_IRUGO
, show_fw_pages
, NULL
);
946 static DEVICE_ATTR(reg_pages
, S_IRUGO
, show_reg_pages
, NULL
);
948 static struct device_attribute
*mlx5_class_attributes
[] = {
957 static void mlx5_ib_event(struct mlx5_core_dev
*dev
, enum mlx5_dev_event event
,
960 struct mlx5_ib_dev
*ibdev
= container_of(dev
, struct mlx5_ib_dev
, mdev
);
961 struct ib_event ibev
;
965 case MLX5_DEV_EVENT_SYS_ERROR
:
966 ibdev
->ib_active
= false;
967 ibev
.event
= IB_EVENT_DEVICE_FATAL
;
970 case MLX5_DEV_EVENT_PORT_UP
:
971 ibev
.event
= IB_EVENT_PORT_ACTIVE
;
975 case MLX5_DEV_EVENT_PORT_DOWN
:
976 ibev
.event
= IB_EVENT_PORT_ERR
;
980 case MLX5_DEV_EVENT_PORT_INITIALIZED
:
981 /* not used by ULPs */
984 case MLX5_DEV_EVENT_LID_CHANGE
:
985 ibev
.event
= IB_EVENT_LID_CHANGE
;
989 case MLX5_DEV_EVENT_PKEY_CHANGE
:
990 ibev
.event
= IB_EVENT_PKEY_CHANGE
;
994 case MLX5_DEV_EVENT_GUID_CHANGE
:
995 ibev
.event
= IB_EVENT_GID_CHANGE
;
999 case MLX5_DEV_EVENT_CLIENT_REREG
:
1000 ibev
.event
= IB_EVENT_CLIENT_REREGISTER
;
1005 ibev
.device
= &ibdev
->ib_dev
;
1006 ibev
.element
.port_num
= port
;
1008 if (ibdev
->ib_active
)
1009 ib_dispatch_event(&ibev
);
1012 static void get_ext_port_caps(struct mlx5_ib_dev
*dev
)
1016 for (port
= 1; port
<= dev
->mdev
.caps
.num_ports
; port
++)
1017 mlx5_query_ext_port_caps(dev
, port
);
1020 static int get_port_caps(struct mlx5_ib_dev
*dev
)
1022 struct ib_device_attr
*dprops
= NULL
;
1023 struct ib_port_attr
*pprops
= NULL
;
1027 pprops
= kmalloc(sizeof(*pprops
), GFP_KERNEL
);
1031 dprops
= kmalloc(sizeof(*dprops
), GFP_KERNEL
);
1035 err
= mlx5_ib_query_device(&dev
->ib_dev
, dprops
);
1037 mlx5_ib_warn(dev
, "query_device failed %d\n", err
);
1041 for (port
= 1; port
<= dev
->mdev
.caps
.num_ports
; port
++) {
1042 err
= mlx5_ib_query_port(&dev
->ib_dev
, port
, pprops
);
1044 mlx5_ib_warn(dev
, "query_port %d failed %d\n", port
, err
);
1047 dev
->mdev
.caps
.port
[port
- 1].pkey_table_len
= dprops
->max_pkeys
;
1048 dev
->mdev
.caps
.port
[port
- 1].gid_table_len
= pprops
->gid_tbl_len
;
1049 mlx5_ib_dbg(dev
, "pkey_table_len %d, gid_table_len %d\n",
1050 dprops
->max_pkeys
, pprops
->gid_tbl_len
);
1060 static void destroy_umrc_res(struct mlx5_ib_dev
*dev
)
1064 err
= mlx5_mr_cache_cleanup(dev
);
1066 mlx5_ib_warn(dev
, "mr cache cleanup failed\n");
1068 mlx5_ib_destroy_qp(dev
->umrc
.qp
);
1069 ib_destroy_cq(dev
->umrc
.cq
);
1070 ib_dereg_mr(dev
->umrc
.mr
);
1071 ib_dealloc_pd(dev
->umrc
.pd
);
1078 static int create_umr_res(struct mlx5_ib_dev
*dev
)
1080 struct ib_qp_init_attr
*init_attr
= NULL
;
1081 struct ib_qp_attr
*attr
= NULL
;
1088 attr
= kzalloc(sizeof(*attr
), GFP_KERNEL
);
1089 init_attr
= kzalloc(sizeof(*init_attr
), GFP_KERNEL
);
1090 if (!attr
|| !init_attr
) {
1095 pd
= ib_alloc_pd(&dev
->ib_dev
);
1097 mlx5_ib_dbg(dev
, "Couldn't create PD for sync UMR QP\n");
1102 mr
= ib_get_dma_mr(pd
, IB_ACCESS_LOCAL_WRITE
);
1104 mlx5_ib_dbg(dev
, "Couldn't create DMA MR for sync UMR QP\n");
1109 cq
= ib_create_cq(&dev
->ib_dev
, mlx5_umr_cq_handler
, NULL
, NULL
, 128,
1112 mlx5_ib_dbg(dev
, "Couldn't create CQ for sync UMR QP\n");
1116 ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
1118 init_attr
->send_cq
= cq
;
1119 init_attr
->recv_cq
= cq
;
1120 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
1121 init_attr
->cap
.max_send_wr
= MAX_UMR_WR
;
1122 init_attr
->cap
.max_send_sge
= 1;
1123 init_attr
->qp_type
= MLX5_IB_QPT_REG_UMR
;
1124 init_attr
->port_num
= 1;
1125 qp
= mlx5_ib_create_qp(pd
, init_attr
, NULL
);
1127 mlx5_ib_dbg(dev
, "Couldn't create sync UMR QP\n");
1131 qp
->device
= &dev
->ib_dev
;
1134 qp
->qp_type
= MLX5_IB_QPT_REG_UMR
;
1136 attr
->qp_state
= IB_QPS_INIT
;
1138 ret
= mlx5_ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_PKEY_INDEX
|
1141 mlx5_ib_dbg(dev
, "Couldn't modify UMR QP\n");
1145 memset(attr
, 0, sizeof(*attr
));
1146 attr
->qp_state
= IB_QPS_RTR
;
1147 attr
->path_mtu
= IB_MTU_256
;
1149 ret
= mlx5_ib_modify_qp(qp
, attr
, IB_QP_STATE
, NULL
);
1151 mlx5_ib_dbg(dev
, "Couldn't modify umr QP to rtr\n");
1155 memset(attr
, 0, sizeof(*attr
));
1156 attr
->qp_state
= IB_QPS_RTS
;
1157 ret
= mlx5_ib_modify_qp(qp
, attr
, IB_QP_STATE
, NULL
);
1159 mlx5_ib_dbg(dev
, "Couldn't modify umr QP to rts\n");
1168 sema_init(&dev
->umrc
.sem
, MAX_UMR_WR
);
1169 ret
= mlx5_mr_cache_init(dev
);
1171 mlx5_ib_warn(dev
, "mr cache init failed %d\n", ret
);
1181 mlx5_ib_destroy_qp(qp
);
1198 static int create_dev_resources(struct mlx5_ib_resources
*devr
)
1200 struct ib_srq_init_attr attr
;
1201 struct mlx5_ib_dev
*dev
;
1204 dev
= container_of(devr
, struct mlx5_ib_dev
, devr
);
1206 devr
->p0
= mlx5_ib_alloc_pd(&dev
->ib_dev
, NULL
, NULL
);
1207 if (IS_ERR(devr
->p0
)) {
1208 ret
= PTR_ERR(devr
->p0
);
1211 devr
->p0
->device
= &dev
->ib_dev
;
1212 devr
->p0
->uobject
= NULL
;
1213 atomic_set(&devr
->p0
->usecnt
, 0);
1215 devr
->c0
= mlx5_ib_create_cq(&dev
->ib_dev
, 1, 0, NULL
, NULL
);
1216 if (IS_ERR(devr
->c0
)) {
1217 ret
= PTR_ERR(devr
->c0
);
1220 devr
->c0
->device
= &dev
->ib_dev
;
1221 devr
->c0
->uobject
= NULL
;
1222 devr
->c0
->comp_handler
= NULL
;
1223 devr
->c0
->event_handler
= NULL
;
1224 devr
->c0
->cq_context
= NULL
;
1225 atomic_set(&devr
->c0
->usecnt
, 0);
1227 devr
->x0
= mlx5_ib_alloc_xrcd(&dev
->ib_dev
, NULL
, NULL
);
1228 if (IS_ERR(devr
->x0
)) {
1229 ret
= PTR_ERR(devr
->x0
);
1232 devr
->x0
->device
= &dev
->ib_dev
;
1233 devr
->x0
->inode
= NULL
;
1234 atomic_set(&devr
->x0
->usecnt
, 0);
1235 mutex_init(&devr
->x0
->tgt_qp_mutex
);
1236 INIT_LIST_HEAD(&devr
->x0
->tgt_qp_list
);
1238 devr
->x1
= mlx5_ib_alloc_xrcd(&dev
->ib_dev
, NULL
, NULL
);
1239 if (IS_ERR(devr
->x1
)) {
1240 ret
= PTR_ERR(devr
->x1
);
1243 devr
->x1
->device
= &dev
->ib_dev
;
1244 devr
->x1
->inode
= NULL
;
1245 atomic_set(&devr
->x1
->usecnt
, 0);
1246 mutex_init(&devr
->x1
->tgt_qp_mutex
);
1247 INIT_LIST_HEAD(&devr
->x1
->tgt_qp_list
);
1249 memset(&attr
, 0, sizeof(attr
));
1250 attr
.attr
.max_sge
= 1;
1251 attr
.attr
.max_wr
= 1;
1252 attr
.srq_type
= IB_SRQT_XRC
;
1253 attr
.ext
.xrc
.cq
= devr
->c0
;
1254 attr
.ext
.xrc
.xrcd
= devr
->x0
;
1256 devr
->s0
= mlx5_ib_create_srq(devr
->p0
, &attr
, NULL
);
1257 if (IS_ERR(devr
->s0
)) {
1258 ret
= PTR_ERR(devr
->s0
);
1261 devr
->s0
->device
= &dev
->ib_dev
;
1262 devr
->s0
->pd
= devr
->p0
;
1263 devr
->s0
->uobject
= NULL
;
1264 devr
->s0
->event_handler
= NULL
;
1265 devr
->s0
->srq_context
= NULL
;
1266 devr
->s0
->srq_type
= IB_SRQT_XRC
;
1267 devr
->s0
->ext
.xrc
.xrcd
= devr
->x0
;
1268 devr
->s0
->ext
.xrc
.cq
= devr
->c0
;
1269 atomic_inc(&devr
->s0
->ext
.xrc
.xrcd
->usecnt
);
1270 atomic_inc(&devr
->s0
->ext
.xrc
.cq
->usecnt
);
1271 atomic_inc(&devr
->p0
->usecnt
);
1272 atomic_set(&devr
->s0
->usecnt
, 0);
1277 mlx5_ib_dealloc_xrcd(devr
->x1
);
1279 mlx5_ib_dealloc_xrcd(devr
->x0
);
1281 mlx5_ib_destroy_cq(devr
->c0
);
1283 mlx5_ib_dealloc_pd(devr
->p0
);
1288 static void destroy_dev_resources(struct mlx5_ib_resources
*devr
)
1290 mlx5_ib_destroy_srq(devr
->s0
);
1291 mlx5_ib_dealloc_xrcd(devr
->x0
);
1292 mlx5_ib_dealloc_xrcd(devr
->x1
);
1293 mlx5_ib_destroy_cq(devr
->c0
);
1294 mlx5_ib_dealloc_pd(devr
->p0
);
1297 static int init_one(struct pci_dev
*pdev
,
1298 const struct pci_device_id
*id
)
1300 struct mlx5_core_dev
*mdev
;
1301 struct mlx5_ib_dev
*dev
;
1305 printk_once(KERN_INFO
"%s", mlx5_version
);
1307 dev
= (struct mlx5_ib_dev
*)ib_alloc_device(sizeof(*dev
));
1312 mdev
->event
= mlx5_ib_event
;
1313 if (prof_sel
>= ARRAY_SIZE(profile
)) {
1314 pr_warn("selected pofile out of range, selceting default\n");
1317 mdev
->profile
= &profile
[prof_sel
];
1318 err
= mlx5_dev_init(mdev
, pdev
);
1322 err
= get_port_caps(dev
);
1326 get_ext_port_caps(dev
);
1328 err
= alloc_comp_eqs(dev
);
1332 MLX5_INIT_DOORBELL_LOCK(&dev
->uar_lock
);
1334 strlcpy(dev
->ib_dev
.name
, "mlx5_%d", IB_DEVICE_NAME_MAX
);
1335 dev
->ib_dev
.owner
= THIS_MODULE
;
1336 dev
->ib_dev
.node_type
= RDMA_NODE_IB_CA
;
1337 dev
->ib_dev
.local_dma_lkey
= mdev
->caps
.reserved_lkey
;
1338 dev
->num_ports
= mdev
->caps
.num_ports
;
1339 dev
->ib_dev
.phys_port_cnt
= dev
->num_ports
;
1340 dev
->ib_dev
.num_comp_vectors
= dev
->num_comp_vectors
;
1341 dev
->ib_dev
.dma_device
= &mdev
->pdev
->dev
;
1343 dev
->ib_dev
.uverbs_abi_ver
= MLX5_IB_UVERBS_ABI_VERSION
;
1344 dev
->ib_dev
.uverbs_cmd_mask
=
1345 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
1346 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
1347 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
1348 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
1349 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
1350 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
1351 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
1352 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
1353 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
1354 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
1355 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
1356 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
1357 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
1358 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
1359 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
1360 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
1361 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
) |
1362 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
1363 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
1364 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
1365 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
1366 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ
) |
1367 (1ull << IB_USER_VERBS_CMD_OPEN_QP
);
1369 dev
->ib_dev
.query_device
= mlx5_ib_query_device
;
1370 dev
->ib_dev
.query_port
= mlx5_ib_query_port
;
1371 dev
->ib_dev
.query_gid
= mlx5_ib_query_gid
;
1372 dev
->ib_dev
.query_pkey
= mlx5_ib_query_pkey
;
1373 dev
->ib_dev
.modify_device
= mlx5_ib_modify_device
;
1374 dev
->ib_dev
.modify_port
= mlx5_ib_modify_port
;
1375 dev
->ib_dev
.alloc_ucontext
= mlx5_ib_alloc_ucontext
;
1376 dev
->ib_dev
.dealloc_ucontext
= mlx5_ib_dealloc_ucontext
;
1377 dev
->ib_dev
.mmap
= mlx5_ib_mmap
;
1378 dev
->ib_dev
.alloc_pd
= mlx5_ib_alloc_pd
;
1379 dev
->ib_dev
.dealloc_pd
= mlx5_ib_dealloc_pd
;
1380 dev
->ib_dev
.create_ah
= mlx5_ib_create_ah
;
1381 dev
->ib_dev
.query_ah
= mlx5_ib_query_ah
;
1382 dev
->ib_dev
.destroy_ah
= mlx5_ib_destroy_ah
;
1383 dev
->ib_dev
.create_srq
= mlx5_ib_create_srq
;
1384 dev
->ib_dev
.modify_srq
= mlx5_ib_modify_srq
;
1385 dev
->ib_dev
.query_srq
= mlx5_ib_query_srq
;
1386 dev
->ib_dev
.destroy_srq
= mlx5_ib_destroy_srq
;
1387 dev
->ib_dev
.post_srq_recv
= mlx5_ib_post_srq_recv
;
1388 dev
->ib_dev
.create_qp
= mlx5_ib_create_qp
;
1389 dev
->ib_dev
.modify_qp
= mlx5_ib_modify_qp
;
1390 dev
->ib_dev
.query_qp
= mlx5_ib_query_qp
;
1391 dev
->ib_dev
.destroy_qp
= mlx5_ib_destroy_qp
;
1392 dev
->ib_dev
.post_send
= mlx5_ib_post_send
;
1393 dev
->ib_dev
.post_recv
= mlx5_ib_post_recv
;
1394 dev
->ib_dev
.create_cq
= mlx5_ib_create_cq
;
1395 dev
->ib_dev
.modify_cq
= mlx5_ib_modify_cq
;
1396 dev
->ib_dev
.resize_cq
= mlx5_ib_resize_cq
;
1397 dev
->ib_dev
.destroy_cq
= mlx5_ib_destroy_cq
;
1398 dev
->ib_dev
.poll_cq
= mlx5_ib_poll_cq
;
1399 dev
->ib_dev
.req_notify_cq
= mlx5_ib_arm_cq
;
1400 dev
->ib_dev
.get_dma_mr
= mlx5_ib_get_dma_mr
;
1401 dev
->ib_dev
.reg_user_mr
= mlx5_ib_reg_user_mr
;
1402 dev
->ib_dev
.dereg_mr
= mlx5_ib_dereg_mr
;
1403 dev
->ib_dev
.attach_mcast
= mlx5_ib_mcg_attach
;
1404 dev
->ib_dev
.detach_mcast
= mlx5_ib_mcg_detach
;
1405 dev
->ib_dev
.process_mad
= mlx5_ib_process_mad
;
1406 dev
->ib_dev
.alloc_fast_reg_mr
= mlx5_ib_alloc_fast_reg_mr
;
1407 dev
->ib_dev
.alloc_fast_reg_page_list
= mlx5_ib_alloc_fast_reg_page_list
;
1408 dev
->ib_dev
.free_fast_reg_page_list
= mlx5_ib_free_fast_reg_page_list
;
1410 if (mdev
->caps
.flags
& MLX5_DEV_CAP_FLAG_XRC
) {
1411 dev
->ib_dev
.alloc_xrcd
= mlx5_ib_alloc_xrcd
;
1412 dev
->ib_dev
.dealloc_xrcd
= mlx5_ib_dealloc_xrcd
;
1413 dev
->ib_dev
.uverbs_cmd_mask
|=
1414 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD
) |
1415 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD
);
1418 err
= init_node_data(dev
);
1422 mutex_init(&dev
->cap_mask_mutex
);
1423 spin_lock_init(&dev
->mr_lock
);
1425 err
= create_dev_resources(&dev
->devr
);
1429 if (ib_register_device(&dev
->ib_dev
, NULL
))
1432 err
= create_umr_res(dev
);
1436 for (i
= 0; i
< ARRAY_SIZE(mlx5_class_attributes
); i
++) {
1437 if (device_create_file(&dev
->ib_dev
.dev
,
1438 mlx5_class_attributes
[i
]))
1442 dev
->ib_active
= true;
1447 destroy_umrc_res(dev
);
1450 ib_unregister_device(&dev
->ib_dev
);
1453 destroy_dev_resources(&dev
->devr
);
1459 mlx5_dev_cleanup(mdev
);
1462 ib_dealloc_device((struct ib_device
*)dev
);
1467 static void remove_one(struct pci_dev
*pdev
)
1469 struct mlx5_ib_dev
*dev
= mlx5_pci2ibdev(pdev
);
1471 destroy_umrc_res(dev
);
1472 ib_unregister_device(&dev
->ib_dev
);
1473 destroy_dev_resources(&dev
->devr
);
1475 mlx5_dev_cleanup(&dev
->mdev
);
1476 ib_dealloc_device(&dev
->ib_dev
);
1479 static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table
) = {
1480 { PCI_VDEVICE(MELLANOX
, 4113) }, /* MT4113 Connect-IB */
1484 MODULE_DEVICE_TABLE(pci
, mlx5_ib_pci_table
);
1486 static struct pci_driver mlx5_ib_driver
= {
1487 .name
= DRIVER_NAME
,
1488 .id_table
= mlx5_ib_pci_table
,
1490 .remove
= remove_one
1493 static int __init
mlx5_ib_init(void)
1495 return pci_register_driver(&mlx5_ib_driver
);
1498 static void __exit
mlx5_ib_cleanup(void)
1500 pci_unregister_driver(&mlx5_ib_driver
);
1503 module_init(mlx5_ib_init
);
1504 module_exit(mlx5_ib_cleanup
);