2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
43 #include <rdma/ib_smi.h>
44 #include <rdma/ib_user_verbs.h>
45 #include <rdma/ib_addr.h>
47 #include <linux/mlx4/driver.h>
48 #include <linux/mlx4/cmd.h>
53 #define DRV_NAME MLX4_IB_DRV_NAME
54 #define DRV_VERSION "1.0"
55 #define DRV_RELDATE "April 4, 2008"
57 MODULE_AUTHOR("Roland Dreier");
58 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
59 MODULE_LICENSE("Dual BSD/GPL");
60 MODULE_VERSION(DRV_VERSION
);
62 static const char mlx4_ib_version
[] =
63 DRV_NAME
": Mellanox ConnectX InfiniBand driver v"
64 DRV_VERSION
" (" DRV_RELDATE
")\n";
66 struct update_gid_work
{
67 struct work_struct work
;
68 union ib_gid gids
[128];
69 struct mlx4_ib_dev
*dev
;
73 static struct workqueue_struct
*wq
;
75 static void init_query_mad(struct ib_smp
*mad
)
77 mad
->base_version
= 1;
78 mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
79 mad
->class_version
= 1;
80 mad
->method
= IB_MGMT_METHOD_GET
;
83 static union ib_gid zgid
;
85 static int mlx4_ib_query_device(struct ib_device
*ibdev
,
86 struct ib_device_attr
*props
)
88 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
89 struct ib_smp
*in_mad
= NULL
;
90 struct ib_smp
*out_mad
= NULL
;
93 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
94 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
95 if (!in_mad
|| !out_mad
)
98 init_query_mad(in_mad
);
99 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
101 err
= mlx4_MAD_IFC(to_mdev(ibdev
), 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
105 memset(props
, 0, sizeof *props
);
107 props
->fw_ver
= dev
->dev
->caps
.fw_ver
;
108 props
->device_cap_flags
= IB_DEVICE_CHANGE_PHY_PORT
|
109 IB_DEVICE_PORT_ACTIVE_EVENT
|
110 IB_DEVICE_SYS_IMAGE_GUID
|
111 IB_DEVICE_RC_RNR_NAK_GEN
|
112 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK
;
113 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR
)
114 props
->device_cap_flags
|= IB_DEVICE_BAD_PKEY_CNTR
;
115 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR
)
116 props
->device_cap_flags
|= IB_DEVICE_BAD_QKEY_CNTR
;
117 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_APM
)
118 props
->device_cap_flags
|= IB_DEVICE_AUTO_PATH_MIG
;
119 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_UD_AV_PORT
)
120 props
->device_cap_flags
|= IB_DEVICE_UD_AV_PORT_ENFORCE
;
121 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
122 props
->device_cap_flags
|= IB_DEVICE_UD_IP_CSUM
;
123 if (dev
->dev
->caps
.max_gso_sz
&& dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BLH
)
124 props
->device_cap_flags
|= IB_DEVICE_UD_TSO
;
125 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_RESERVED_LKEY
)
126 props
->device_cap_flags
|= IB_DEVICE_LOCAL_DMA_LKEY
;
127 if ((dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_LOCAL_INV
) &&
128 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_REMOTE_INV
) &&
129 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_FAST_REG_WR
))
130 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
131 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
)
132 props
->device_cap_flags
|= IB_DEVICE_XRC
;
134 props
->vendor_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 36)) &
136 props
->vendor_part_id
= be16_to_cpup((__be16
*) (out_mad
->data
+ 30));
137 props
->hw_ver
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
138 memcpy(&props
->sys_image_guid
, out_mad
->data
+ 4, 8);
140 props
->max_mr_size
= ~0ull;
141 props
->page_size_cap
= dev
->dev
->caps
.page_size_cap
;
142 props
->max_qp
= dev
->dev
->caps
.num_qps
- dev
->dev
->caps
.reserved_qps
;
143 props
->max_qp_wr
= dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
;
144 props
->max_sge
= min(dev
->dev
->caps
.max_sq_sg
,
145 dev
->dev
->caps
.max_rq_sg
);
146 props
->max_cq
= dev
->dev
->caps
.num_cqs
- dev
->dev
->caps
.reserved_cqs
;
147 props
->max_cqe
= dev
->dev
->caps
.max_cqes
;
148 props
->max_mr
= dev
->dev
->caps
.num_mpts
- dev
->dev
->caps
.reserved_mrws
;
149 props
->max_pd
= dev
->dev
->caps
.num_pds
- dev
->dev
->caps
.reserved_pds
;
150 props
->max_qp_rd_atom
= dev
->dev
->caps
.max_qp_dest_rdma
;
151 props
->max_qp_init_rd_atom
= dev
->dev
->caps
.max_qp_init_rdma
;
152 props
->max_res_rd_atom
= props
->max_qp_rd_atom
* props
->max_qp
;
153 props
->max_srq
= dev
->dev
->caps
.num_srqs
- dev
->dev
->caps
.reserved_srqs
;
154 props
->max_srq_wr
= dev
->dev
->caps
.max_srq_wqes
- 1;
155 props
->max_srq_sge
= dev
->dev
->caps
.max_srq_sge
;
156 props
->max_fast_reg_page_list_len
= MLX4_MAX_FAST_REG_PAGES
;
157 props
->local_ca_ack_delay
= dev
->dev
->caps
.local_ca_ack_delay
;
158 props
->atomic_cap
= dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_ATOMIC
?
159 IB_ATOMIC_HCA
: IB_ATOMIC_NONE
;
160 props
->masked_atomic_cap
= props
->atomic_cap
;
161 props
->max_pkeys
= dev
->dev
->caps
.pkey_table_len
[1];
162 props
->max_mcast_grp
= dev
->dev
->caps
.num_mgms
+ dev
->dev
->caps
.num_amgms
;
163 props
->max_mcast_qp_attach
= dev
->dev
->caps
.num_qp_per_mgm
;
164 props
->max_total_mcast_qp_attach
= props
->max_mcast_qp_attach
*
165 props
->max_mcast_grp
;
166 props
->max_map_per_fmr
= dev
->dev
->caps
.max_fmr_maps
;
175 static enum rdma_link_layer
176 mlx4_ib_port_link_layer(struct ib_device
*device
, u8 port_num
)
178 struct mlx4_dev
*dev
= to_mdev(device
)->dev
;
180 return dev
->caps
.port_mask
[port_num
] == MLX4_PORT_TYPE_IB
?
181 IB_LINK_LAYER_INFINIBAND
: IB_LINK_LAYER_ETHERNET
;
184 static int ib_link_query_port(struct ib_device
*ibdev
, u8 port
,
185 struct ib_port_attr
*props
)
187 struct ib_smp
*in_mad
= NULL
;
188 struct ib_smp
*out_mad
= NULL
;
189 int ext_active_speed
;
192 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
193 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
194 if (!in_mad
|| !out_mad
)
197 init_query_mad(in_mad
);
198 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
199 in_mad
->attr_mod
= cpu_to_be32(port
);
201 err
= mlx4_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
,
207 props
->lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 16));
208 props
->lmc
= out_mad
->data
[34] & 0x7;
209 props
->sm_lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 18));
210 props
->sm_sl
= out_mad
->data
[36] & 0xf;
211 props
->state
= out_mad
->data
[32] & 0xf;
212 props
->phys_state
= out_mad
->data
[33] >> 4;
213 props
->port_cap_flags
= be32_to_cpup((__be32
*) (out_mad
->data
+ 20));
214 props
->gid_tbl_len
= to_mdev(ibdev
)->dev
->caps
.gid_table_len
[port
];
215 props
->max_msg_sz
= to_mdev(ibdev
)->dev
->caps
.max_msg_sz
;
216 props
->pkey_tbl_len
= to_mdev(ibdev
)->dev
->caps
.pkey_table_len
[port
];
217 props
->bad_pkey_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 46));
218 props
->qkey_viol_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 48));
219 props
->active_width
= out_mad
->data
[31] & 0xf;
220 props
->active_speed
= out_mad
->data
[35] >> 4;
221 props
->max_mtu
= out_mad
->data
[41] & 0xf;
222 props
->active_mtu
= out_mad
->data
[36] >> 4;
223 props
->subnet_timeout
= out_mad
->data
[51] & 0x1f;
224 props
->max_vl_num
= out_mad
->data
[37] >> 4;
225 props
->init_type_reply
= out_mad
->data
[41] >> 4;
227 /* Check if extended speeds (EDR/FDR/...) are supported */
228 if (props
->port_cap_flags
& IB_PORT_EXTENDED_SPEEDS_SUP
) {
229 ext_active_speed
= out_mad
->data
[62] >> 4;
231 switch (ext_active_speed
) {
233 props
->active_speed
= IB_SPEED_FDR
;
236 props
->active_speed
= IB_SPEED_EDR
;
241 /* If reported active speed is QDR, check if is FDR-10 */
242 if (props
->active_speed
== IB_SPEED_QDR
) {
243 init_query_mad(in_mad
);
244 in_mad
->attr_id
= MLX4_ATTR_EXTENDED_PORT_INFO
;
245 in_mad
->attr_mod
= cpu_to_be32(port
);
247 err
= mlx4_MAD_IFC(to_mdev(ibdev
), 1, 1, port
,
248 NULL
, NULL
, in_mad
, out_mad
);
252 /* Checking LinkSpeedActive for FDR-10 */
253 if (out_mad
->data
[15] & 0x1)
254 props
->active_speed
= IB_SPEED_FDR10
;
257 /* Avoid wrong speed value returned by FW if the IB link is down. */
258 if (props
->state
== IB_PORT_DOWN
)
259 props
->active_speed
= IB_SPEED_SDR
;
267 static u8
state_to_phys_state(enum ib_port_state state
)
269 return state
== IB_PORT_ACTIVE
? 5 : 3;
272 static int eth_link_query_port(struct ib_device
*ibdev
, u8 port
,
273 struct ib_port_attr
*props
)
276 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
277 struct mlx4_ib_iboe
*iboe
= &mdev
->iboe
;
278 struct net_device
*ndev
;
280 struct mlx4_cmd_mailbox
*mailbox
;
283 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
285 return PTR_ERR(mailbox
);
287 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, port
, 0,
288 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
293 props
->active_width
= (((u8
*)mailbox
->buf
)[5] == 0x40) ?
294 IB_WIDTH_4X
: IB_WIDTH_1X
;
295 props
->active_speed
= IB_SPEED_QDR
;
296 props
->port_cap_flags
= IB_PORT_CM_SUP
;
297 props
->gid_tbl_len
= mdev
->dev
->caps
.gid_table_len
[port
];
298 props
->max_msg_sz
= mdev
->dev
->caps
.max_msg_sz
;
299 props
->pkey_tbl_len
= 1;
300 props
->max_mtu
= IB_MTU_4096
;
301 props
->max_vl_num
= 2;
302 props
->state
= IB_PORT_DOWN
;
303 props
->phys_state
= state_to_phys_state(props
->state
);
304 props
->active_mtu
= IB_MTU_256
;
305 spin_lock(&iboe
->lock
);
306 ndev
= iboe
->netdevs
[port
- 1];
310 tmp
= iboe_get_mtu(ndev
->mtu
);
311 props
->active_mtu
= tmp
? min(props
->max_mtu
, tmp
) : IB_MTU_256
;
313 props
->state
= (netif_running(ndev
) && netif_carrier_ok(ndev
)) ?
314 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
315 props
->phys_state
= state_to_phys_state(props
->state
);
317 spin_unlock(&iboe
->lock
);
319 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
323 static int mlx4_ib_query_port(struct ib_device
*ibdev
, u8 port
,
324 struct ib_port_attr
*props
)
328 memset(props
, 0, sizeof *props
);
330 err
= mlx4_ib_port_link_layer(ibdev
, port
) == IB_LINK_LAYER_INFINIBAND
?
331 ib_link_query_port(ibdev
, port
, props
) :
332 eth_link_query_port(ibdev
, port
, props
);
337 static int __mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
340 struct ib_smp
*in_mad
= NULL
;
341 struct ib_smp
*out_mad
= NULL
;
344 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
345 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
346 if (!in_mad
|| !out_mad
)
349 init_query_mad(in_mad
);
350 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
351 in_mad
->attr_mod
= cpu_to_be32(port
);
353 err
= mlx4_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
357 memcpy(gid
->raw
, out_mad
->data
+ 8, 8);
359 init_query_mad(in_mad
);
360 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
361 in_mad
->attr_mod
= cpu_to_be32(index
/ 8);
363 err
= mlx4_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
367 memcpy(gid
->raw
+ 8, out_mad
->data
+ (index
% 8) * 8, 8);
375 static int iboe_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
378 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
380 *gid
= dev
->iboe
.gid_table
[port
- 1][index
];
385 static int mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
388 if (rdma_port_get_link_layer(ibdev
, port
) == IB_LINK_LAYER_INFINIBAND
)
389 return __mlx4_ib_query_gid(ibdev
, port
, index
, gid
);
391 return iboe_query_gid(ibdev
, port
, index
, gid
);
394 static int mlx4_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
397 struct ib_smp
*in_mad
= NULL
;
398 struct ib_smp
*out_mad
= NULL
;
401 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
402 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
403 if (!in_mad
|| !out_mad
)
406 init_query_mad(in_mad
);
407 in_mad
->attr_id
= IB_SMP_ATTR_PKEY_TABLE
;
408 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
410 err
= mlx4_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
414 *pkey
= be16_to_cpu(((__be16
*) out_mad
->data
)[index
% 32]);
422 static int mlx4_ib_modify_device(struct ib_device
*ibdev
, int mask
,
423 struct ib_device_modify
*props
)
425 struct mlx4_cmd_mailbox
*mailbox
;
428 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
431 if (!(mask
& IB_DEVICE_MODIFY_NODE_DESC
))
434 spin_lock_irqsave(&to_mdev(ibdev
)->sm_lock
, flags
);
435 memcpy(ibdev
->node_desc
, props
->node_desc
, 64);
436 spin_unlock_irqrestore(&to_mdev(ibdev
)->sm_lock
, flags
);
439 * If possible, pass node desc to FW, so it can generate
440 * a 144 trap. If cmd fails, just ignore.
442 mailbox
= mlx4_alloc_cmd_mailbox(to_mdev(ibdev
)->dev
);
446 memset(mailbox
->buf
, 0, 256);
447 memcpy(mailbox
->buf
, props
->node_desc
, 64);
448 mlx4_cmd(to_mdev(ibdev
)->dev
, mailbox
->dma
, 1, 0,
449 MLX4_CMD_SET_NODE
, MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
451 mlx4_free_cmd_mailbox(to_mdev(ibdev
)->dev
, mailbox
);
456 static int mlx4_SET_PORT(struct mlx4_ib_dev
*dev
, u8 port
, int reset_qkey_viols
,
459 struct mlx4_cmd_mailbox
*mailbox
;
461 u8 is_eth
= dev
->dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
;
463 mailbox
= mlx4_alloc_cmd_mailbox(dev
->dev
);
465 return PTR_ERR(mailbox
);
467 memset(mailbox
->buf
, 0, 256);
469 if (dev
->dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
470 *(u8
*) mailbox
->buf
= !!reset_qkey_viols
<< 6;
471 ((__be32
*) mailbox
->buf
)[2] = cpu_to_be32(cap_mask
);
473 ((u8
*) mailbox
->buf
)[3] = !!reset_qkey_viols
;
474 ((__be32
*) mailbox
->buf
)[1] = cpu_to_be32(cap_mask
);
477 err
= mlx4_cmd(dev
->dev
, mailbox
->dma
, port
, is_eth
, MLX4_CMD_SET_PORT
,
478 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
480 mlx4_free_cmd_mailbox(dev
->dev
, mailbox
);
484 static int mlx4_ib_modify_port(struct ib_device
*ibdev
, u8 port
, int mask
,
485 struct ib_port_modify
*props
)
487 struct ib_port_attr attr
;
491 mutex_lock(&to_mdev(ibdev
)->cap_mask_mutex
);
493 err
= mlx4_ib_query_port(ibdev
, port
, &attr
);
497 cap_mask
= (attr
.port_cap_flags
| props
->set_port_cap_mask
) &
498 ~props
->clr_port_cap_mask
;
500 err
= mlx4_SET_PORT(to_mdev(ibdev
), port
,
501 !!(mask
& IB_PORT_RESET_QKEY_CNTR
),
505 mutex_unlock(&to_mdev(ibdev
)->cap_mask_mutex
);
509 static struct ib_ucontext
*mlx4_ib_alloc_ucontext(struct ib_device
*ibdev
,
510 struct ib_udata
*udata
)
512 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
513 struct mlx4_ib_ucontext
*context
;
514 struct mlx4_ib_alloc_ucontext_resp resp
;
518 return ERR_PTR(-EAGAIN
);
520 resp
.qp_tab_size
= dev
->dev
->caps
.num_qps
;
521 resp
.bf_reg_size
= dev
->dev
->caps
.bf_reg_size
;
522 resp
.bf_regs_per_page
= dev
->dev
->caps
.bf_regs_per_page
;
524 context
= kmalloc(sizeof *context
, GFP_KERNEL
);
526 return ERR_PTR(-ENOMEM
);
528 err
= mlx4_uar_alloc(to_mdev(ibdev
)->dev
, &context
->uar
);
534 INIT_LIST_HEAD(&context
->db_page_list
);
535 mutex_init(&context
->db_page_mutex
);
537 err
= ib_copy_to_udata(udata
, &resp
, sizeof resp
);
539 mlx4_uar_free(to_mdev(ibdev
)->dev
, &context
->uar
);
541 return ERR_PTR(-EFAULT
);
544 return &context
->ibucontext
;
547 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
549 struct mlx4_ib_ucontext
*context
= to_mucontext(ibcontext
);
551 mlx4_uar_free(to_mdev(ibcontext
->device
)->dev
, &context
->uar
);
557 static int mlx4_ib_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
559 struct mlx4_ib_dev
*dev
= to_mdev(context
->device
);
561 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
564 if (vma
->vm_pgoff
== 0) {
565 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
567 if (io_remap_pfn_range(vma
, vma
->vm_start
,
568 to_mucontext(context
)->uar
.pfn
,
569 PAGE_SIZE
, vma
->vm_page_prot
))
571 } else if (vma
->vm_pgoff
== 1 && dev
->dev
->caps
.bf_reg_size
!= 0) {
572 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
574 if (io_remap_pfn_range(vma
, vma
->vm_start
,
575 to_mucontext(context
)->uar
.pfn
+
576 dev
->dev
->caps
.num_uars
,
577 PAGE_SIZE
, vma
->vm_page_prot
))
585 static struct ib_pd
*mlx4_ib_alloc_pd(struct ib_device
*ibdev
,
586 struct ib_ucontext
*context
,
587 struct ib_udata
*udata
)
589 struct mlx4_ib_pd
*pd
;
592 pd
= kmalloc(sizeof *pd
, GFP_KERNEL
);
594 return ERR_PTR(-ENOMEM
);
596 err
= mlx4_pd_alloc(to_mdev(ibdev
)->dev
, &pd
->pdn
);
603 if (ib_copy_to_udata(udata
, &pd
->pdn
, sizeof (__u32
))) {
604 mlx4_pd_free(to_mdev(ibdev
)->dev
, pd
->pdn
);
606 return ERR_PTR(-EFAULT
);
612 static int mlx4_ib_dealloc_pd(struct ib_pd
*pd
)
614 mlx4_pd_free(to_mdev(pd
->device
)->dev
, to_mpd(pd
)->pdn
);
620 static struct ib_xrcd
*mlx4_ib_alloc_xrcd(struct ib_device
*ibdev
,
621 struct ib_ucontext
*context
,
622 struct ib_udata
*udata
)
624 struct mlx4_ib_xrcd
*xrcd
;
627 if (!(to_mdev(ibdev
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
628 return ERR_PTR(-ENOSYS
);
630 xrcd
= kmalloc(sizeof *xrcd
, GFP_KERNEL
);
632 return ERR_PTR(-ENOMEM
);
634 err
= mlx4_xrcd_alloc(to_mdev(ibdev
)->dev
, &xrcd
->xrcdn
);
638 xrcd
->pd
= ib_alloc_pd(ibdev
);
639 if (IS_ERR(xrcd
->pd
)) {
640 err
= PTR_ERR(xrcd
->pd
);
644 xrcd
->cq
= ib_create_cq(ibdev
, NULL
, NULL
, xrcd
, 1, 0);
645 if (IS_ERR(xrcd
->cq
)) {
646 err
= PTR_ERR(xrcd
->cq
);
650 return &xrcd
->ibxrcd
;
653 ib_dealloc_pd(xrcd
->pd
);
655 mlx4_xrcd_free(to_mdev(ibdev
)->dev
, xrcd
->xrcdn
);
661 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
663 ib_destroy_cq(to_mxrcd(xrcd
)->cq
);
664 ib_dealloc_pd(to_mxrcd(xrcd
)->pd
);
665 mlx4_xrcd_free(to_mdev(xrcd
->device
)->dev
, to_mxrcd(xrcd
)->xrcdn
);
671 static int add_gid_entry(struct ib_qp
*ibqp
, union ib_gid
*gid
)
673 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
674 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
675 struct mlx4_ib_gid_entry
*ge
;
677 ge
= kzalloc(sizeof *ge
, GFP_KERNEL
);
682 if (mlx4_ib_add_mc(mdev
, mqp
, gid
)) {
683 ge
->port
= mqp
->port
;
687 mutex_lock(&mqp
->mutex
);
688 list_add_tail(&ge
->list
, &mqp
->gid_list
);
689 mutex_unlock(&mqp
->mutex
);
694 int mlx4_ib_add_mc(struct mlx4_ib_dev
*mdev
, struct mlx4_ib_qp
*mqp
,
698 struct net_device
*ndev
;
704 spin_lock(&mdev
->iboe
.lock
);
705 ndev
= mdev
->iboe
.netdevs
[mqp
->port
- 1];
708 spin_unlock(&mdev
->iboe
.lock
);
711 rdma_get_mcast_mac((struct in6_addr
*)gid
, mac
);
713 dev_mc_add(mdev
->iboe
.netdevs
[mqp
->port
- 1], mac
);
722 struct mlx4_ib_steering
{
723 struct list_head list
;
728 static int mlx4_ib_mcg_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
731 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
732 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
734 struct mlx4_ib_steering
*ib_steering
= NULL
;
736 if (mdev
->dev
->caps
.steering_mode
==
737 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
738 ib_steering
= kmalloc(sizeof(*ib_steering
), GFP_KERNEL
);
743 err
= mlx4_multicast_attach(mdev
->dev
, &mqp
->mqp
, gid
->raw
, mqp
->port
,
745 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
),
746 MLX4_PROT_IB_IPV6
, ®_id
);
750 err
= add_gid_entry(ibqp
, gid
);
755 memcpy(ib_steering
->gid
.raw
, gid
->raw
, 16);
756 ib_steering
->reg_id
= reg_id
;
757 mutex_lock(&mqp
->mutex
);
758 list_add(&ib_steering
->list
, &mqp
->steering_rules
);
759 mutex_unlock(&mqp
->mutex
);
764 mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
765 MLX4_PROT_IB_IPV6
, reg_id
);
772 static struct mlx4_ib_gid_entry
*find_gid_entry(struct mlx4_ib_qp
*qp
, u8
*raw
)
774 struct mlx4_ib_gid_entry
*ge
;
775 struct mlx4_ib_gid_entry
*tmp
;
776 struct mlx4_ib_gid_entry
*ret
= NULL
;
778 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
779 if (!memcmp(raw
, ge
->gid
.raw
, 16)) {
788 static int mlx4_ib_mcg_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
791 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
792 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
794 struct net_device
*ndev
;
795 struct mlx4_ib_gid_entry
*ge
;
798 if (mdev
->dev
->caps
.steering_mode
==
799 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
800 struct mlx4_ib_steering
*ib_steering
;
802 mutex_lock(&mqp
->mutex
);
803 list_for_each_entry(ib_steering
, &mqp
->steering_rules
, list
) {
804 if (!memcmp(ib_steering
->gid
.raw
, gid
->raw
, 16)) {
805 list_del(&ib_steering
->list
);
809 mutex_unlock(&mqp
->mutex
);
810 if (&ib_steering
->list
== &mqp
->steering_rules
) {
811 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
814 reg_id
= ib_steering
->reg_id
;
818 err
= mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
819 MLX4_PROT_IB_IPV6
, reg_id
);
823 mutex_lock(&mqp
->mutex
);
824 ge
= find_gid_entry(mqp
, gid
->raw
);
826 spin_lock(&mdev
->iboe
.lock
);
827 ndev
= ge
->added
? mdev
->iboe
.netdevs
[ge
->port
- 1] : NULL
;
830 spin_unlock(&mdev
->iboe
.lock
);
831 rdma_get_mcast_mac((struct in6_addr
*)gid
, mac
);
834 dev_mc_del(mdev
->iboe
.netdevs
[ge
->port
- 1], mac
);
841 pr_warn("could not find mgid entry\n");
843 mutex_unlock(&mqp
->mutex
);
848 static int init_node_data(struct mlx4_ib_dev
*dev
)
850 struct ib_smp
*in_mad
= NULL
;
851 struct ib_smp
*out_mad
= NULL
;
854 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
855 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
856 if (!in_mad
|| !out_mad
)
859 init_query_mad(in_mad
);
860 in_mad
->attr_id
= IB_SMP_ATTR_NODE_DESC
;
862 err
= mlx4_MAD_IFC(dev
, 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
866 memcpy(dev
->ib_dev
.node_desc
, out_mad
->data
, 64);
868 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
870 err
= mlx4_MAD_IFC(dev
, 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
874 memcpy(&dev
->ib_dev
.node_guid
, out_mad
->data
+ 12, 8);
882 static ssize_t
show_hca(struct device
*device
, struct device_attribute
*attr
,
885 struct mlx4_ib_dev
*dev
=
886 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
887 return sprintf(buf
, "MT%d\n", dev
->dev
->pdev
->device
);
890 static ssize_t
show_fw_ver(struct device
*device
, struct device_attribute
*attr
,
893 struct mlx4_ib_dev
*dev
=
894 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
895 return sprintf(buf
, "%d.%d.%d\n", (int) (dev
->dev
->caps
.fw_ver
>> 32),
896 (int) (dev
->dev
->caps
.fw_ver
>> 16) & 0xffff,
897 (int) dev
->dev
->caps
.fw_ver
& 0xffff);
900 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
903 struct mlx4_ib_dev
*dev
=
904 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
905 return sprintf(buf
, "%x\n", dev
->dev
->rev_id
);
908 static ssize_t
show_board(struct device
*device
, struct device_attribute
*attr
,
911 struct mlx4_ib_dev
*dev
=
912 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
913 return sprintf(buf
, "%.*s\n", MLX4_BOARD_ID_LEN
,
917 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
918 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
919 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
920 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
922 static struct device_attribute
*mlx4_class_attributes
[] = {
929 static void mlx4_addrconf_ifid_eui48(u8
*eui
, u16 vlan_id
, struct net_device
*dev
)
931 memcpy(eui
, dev
->dev_addr
, 3);
932 memcpy(eui
+ 5, dev
->dev_addr
+ 3, 3);
933 if (vlan_id
< 0x1000) {
934 eui
[3] = vlan_id
>> 8;
935 eui
[4] = vlan_id
& 0xff;
943 static void update_gids_task(struct work_struct
*work
)
945 struct update_gid_work
*gw
= container_of(work
, struct update_gid_work
, work
);
946 struct mlx4_cmd_mailbox
*mailbox
;
949 struct mlx4_dev
*dev
= gw
->dev
->dev
;
951 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
952 if (IS_ERR(mailbox
)) {
953 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox
));
958 memcpy(gids
, gw
->gids
, sizeof gw
->gids
);
960 err
= mlx4_cmd(dev
, mailbox
->dma
, MLX4_SET_PORT_GID_TABLE
<< 8 | gw
->port
,
961 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
964 pr_warn("set port command failed\n");
966 memcpy(gw
->dev
->iboe
.gid_table
[gw
->port
- 1], gw
->gids
, sizeof gw
->gids
);
967 mlx4_ib_dispatch_event(gw
->dev
, gw
->port
, IB_EVENT_GID_CHANGE
);
970 mlx4_free_cmd_mailbox(dev
, mailbox
);
974 static int update_ipv6_gids(struct mlx4_ib_dev
*dev
, int port
, int clear
)
976 struct net_device
*ndev
= dev
->iboe
.netdevs
[port
- 1];
977 struct update_gid_work
*work
;
978 struct net_device
*tmp
;
988 work
= kzalloc(sizeof *work
, GFP_ATOMIC
);
992 hits
= kzalloc(128, GFP_ATOMIC
);
999 for_each_netdev_rcu(&init_net
, tmp
) {
1000 if (ndev
&& (tmp
== ndev
|| rdma_vlan_dev_real_dev(tmp
) == ndev
)) {
1001 gid
.global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
1002 vid
= rdma_vlan_dev_vlan_id(tmp
);
1003 mlx4_addrconf_ifid_eui48(&gid
.raw
[8], vid
, ndev
);
1006 for (i
= 0; i
< 128; ++i
) {
1008 !memcmp(&dev
->iboe
.gid_table
[port
- 1][i
], &zgid
, sizeof zgid
))
1010 if (!memcmp(&dev
->iboe
.gid_table
[port
- 1][i
], &gid
, sizeof gid
)) {
1019 (memcmp(&dev
->iboe
.gid_table
[port
- 1][0],
1020 &gid
, sizeof gid
) ||
1021 !memcmp(&dev
->iboe
.gid_table
[port
- 1][0],
1022 &zgid
, sizeof gid
))) {
1023 dev
->iboe
.gid_table
[port
- 1][0] = gid
;
1026 } else if (free
>= 0) {
1027 dev
->iboe
.gid_table
[port
- 1][free
] = gid
;
1036 for (i
= 0; i
< 128; ++i
)
1038 if (memcmp(&dev
->iboe
.gid_table
[port
- 1][i
], &zgid
, sizeof zgid
))
1040 dev
->iboe
.gid_table
[port
- 1][i
] = zgid
;
1044 memcpy(work
->gids
, dev
->iboe
.gid_table
[port
- 1], sizeof work
->gids
);
1045 INIT_WORK(&work
->work
, update_gids_task
);
1048 queue_work(wq
, &work
->work
);
1060 static void handle_en_event(struct mlx4_ib_dev
*dev
, int port
, unsigned long event
)
1064 case NETDEV_CHANGEADDR
:
1065 update_ipv6_gids(dev
, port
, 0);
1069 update_ipv6_gids(dev
, port
, 1);
1070 dev
->iboe
.netdevs
[port
- 1] = NULL
;
1074 static void netdev_added(struct mlx4_ib_dev
*dev
, int port
)
1076 update_ipv6_gids(dev
, port
, 0);
1079 static void netdev_removed(struct mlx4_ib_dev
*dev
, int port
)
1081 update_ipv6_gids(dev
, port
, 1);
1084 static int mlx4_ib_netdev_event(struct notifier_block
*this, unsigned long event
,
1087 struct net_device
*dev
= ptr
;
1088 struct mlx4_ib_dev
*ibdev
;
1089 struct net_device
*oldnd
;
1090 struct mlx4_ib_iboe
*iboe
;
1093 if (!net_eq(dev_net(dev
), &init_net
))
1096 ibdev
= container_of(this, struct mlx4_ib_dev
, iboe
.nb
);
1097 iboe
= &ibdev
->iboe
;
1099 spin_lock(&iboe
->lock
);
1100 mlx4_foreach_ib_transport_port(port
, ibdev
->dev
) {
1101 oldnd
= iboe
->netdevs
[port
- 1];
1102 iboe
->netdevs
[port
- 1] =
1103 mlx4_get_protocol_dev(ibdev
->dev
, MLX4_PROT_ETH
, port
);
1104 if (oldnd
!= iboe
->netdevs
[port
- 1]) {
1105 if (iboe
->netdevs
[port
- 1])
1106 netdev_added(ibdev
, port
);
1108 netdev_removed(ibdev
, port
);
1112 if (dev
== iboe
->netdevs
[0] ||
1113 (iboe
->netdevs
[0] && rdma_vlan_dev_real_dev(dev
) == iboe
->netdevs
[0]))
1114 handle_en_event(ibdev
, 1, event
);
1115 else if (dev
== iboe
->netdevs
[1]
1116 || (iboe
->netdevs
[1] && rdma_vlan_dev_real_dev(dev
) == iboe
->netdevs
[1]))
1117 handle_en_event(ibdev
, 2, event
);
1119 spin_unlock(&iboe
->lock
);
1124 static void mlx4_ib_alloc_eqs(struct mlx4_dev
*dev
, struct mlx4_ib_dev
*ibdev
)
1127 int eq_per_port
= 0;
1132 /* Legacy mode or comp_pool is not large enough */
1133 if (dev
->caps
.comp_pool
== 0 ||
1134 dev
->caps
.num_ports
> dev
->caps
.comp_pool
)
1137 eq_per_port
= rounddown_pow_of_two(dev
->caps
.comp_pool
/
1138 dev
->caps
.num_ports
);
1142 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
1143 added_eqs
+= eq_per_port
;
1145 total_eqs
= dev
->caps
.num_comp_vectors
+ added_eqs
;
1147 ibdev
->eq_table
= kzalloc(total_eqs
* sizeof(int), GFP_KERNEL
);
1148 if (!ibdev
->eq_table
)
1151 ibdev
->eq_added
= added_eqs
;
1154 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
) {
1155 for (j
= 0; j
< eq_per_port
; j
++) {
1156 sprintf(name
, "mlx4-ib-%d-%d@%s",
1157 i
, j
, dev
->pdev
->bus
->name
);
1158 /* Set IRQ for specific name (per ring) */
1159 if (mlx4_assign_eq(dev
, name
, NULL
,
1160 &ibdev
->eq_table
[eq
])) {
1161 /* Use legacy (same as mlx4_en driver) */
1162 pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq
);
1163 ibdev
->eq_table
[eq
] =
1164 (eq
% dev
->caps
.num_comp_vectors
);
1170 /* Fill the reset of the vector with legacy EQ */
1171 for (i
= 0, eq
= added_eqs
; i
< dev
->caps
.num_comp_vectors
; i
++)
1172 ibdev
->eq_table
[eq
++] = i
;
1174 /* Advertise the new number of EQs to clients */
1175 ibdev
->ib_dev
.num_comp_vectors
= total_eqs
;
1178 static void mlx4_ib_free_eqs(struct mlx4_dev
*dev
, struct mlx4_ib_dev
*ibdev
)
1182 /* no additional eqs were added */
1183 if (!ibdev
->eq_table
)
1186 /* Reset the advertised EQ number */
1187 ibdev
->ib_dev
.num_comp_vectors
= dev
->caps
.num_comp_vectors
;
1189 /* Free only the added eqs */
1190 for (i
= 0; i
< ibdev
->eq_added
; i
++) {
1191 /* Don't free legacy eqs if used */
1192 if (ibdev
->eq_table
[i
] <= dev
->caps
.num_comp_vectors
)
1194 mlx4_release_eq(dev
, ibdev
->eq_table
[i
]);
1197 kfree(ibdev
->eq_table
);
1200 static void *mlx4_ib_add(struct mlx4_dev
*dev
)
1202 struct mlx4_ib_dev
*ibdev
;
1206 struct mlx4_ib_iboe
*iboe
;
1208 pr_info_once("%s", mlx4_ib_version
);
1210 if (mlx4_is_mfunc(dev
)) {
1211 pr_warn("IB not yet supported in SRIOV\n");
1215 mlx4_foreach_ib_transport_port(i
, dev
)
1218 /* No point in registering a device with no ports... */
1222 ibdev
= (struct mlx4_ib_dev
*) ib_alloc_device(sizeof *ibdev
);
1224 dev_err(&dev
->pdev
->dev
, "Device struct alloc failed\n");
1228 iboe
= &ibdev
->iboe
;
1230 if (mlx4_pd_alloc(dev
, &ibdev
->priv_pdn
))
1233 if (mlx4_uar_alloc(dev
, &ibdev
->priv_uar
))
1236 ibdev
->uar_map
= ioremap((phys_addr_t
) ibdev
->priv_uar
.pfn
<< PAGE_SHIFT
,
1238 if (!ibdev
->uar_map
)
1240 MLX4_INIT_DOORBELL_LOCK(&ibdev
->uar_lock
);
1244 strlcpy(ibdev
->ib_dev
.name
, "mlx4_%d", IB_DEVICE_NAME_MAX
);
1245 ibdev
->ib_dev
.owner
= THIS_MODULE
;
1246 ibdev
->ib_dev
.node_type
= RDMA_NODE_IB_CA
;
1247 ibdev
->ib_dev
.local_dma_lkey
= dev
->caps
.reserved_lkey
;
1248 ibdev
->num_ports
= num_ports
;
1249 ibdev
->ib_dev
.phys_port_cnt
= ibdev
->num_ports
;
1250 ibdev
->ib_dev
.num_comp_vectors
= dev
->caps
.num_comp_vectors
;
1251 ibdev
->ib_dev
.dma_device
= &dev
->pdev
->dev
;
1253 ibdev
->ib_dev
.uverbs_abi_ver
= MLX4_IB_UVERBS_ABI_VERSION
;
1254 ibdev
->ib_dev
.uverbs_cmd_mask
=
1255 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
1256 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
1257 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
1258 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
1259 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
1260 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
1261 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
1262 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
1263 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
1264 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
1265 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
1266 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
1267 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
1268 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
1269 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
1270 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
1271 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
) |
1272 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
1273 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
1274 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
1275 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
1276 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ
) |
1277 (1ull << IB_USER_VERBS_CMD_OPEN_QP
);
1279 ibdev
->ib_dev
.query_device
= mlx4_ib_query_device
;
1280 ibdev
->ib_dev
.query_port
= mlx4_ib_query_port
;
1281 ibdev
->ib_dev
.get_link_layer
= mlx4_ib_port_link_layer
;
1282 ibdev
->ib_dev
.query_gid
= mlx4_ib_query_gid
;
1283 ibdev
->ib_dev
.query_pkey
= mlx4_ib_query_pkey
;
1284 ibdev
->ib_dev
.modify_device
= mlx4_ib_modify_device
;
1285 ibdev
->ib_dev
.modify_port
= mlx4_ib_modify_port
;
1286 ibdev
->ib_dev
.alloc_ucontext
= mlx4_ib_alloc_ucontext
;
1287 ibdev
->ib_dev
.dealloc_ucontext
= mlx4_ib_dealloc_ucontext
;
1288 ibdev
->ib_dev
.mmap
= mlx4_ib_mmap
;
1289 ibdev
->ib_dev
.alloc_pd
= mlx4_ib_alloc_pd
;
1290 ibdev
->ib_dev
.dealloc_pd
= mlx4_ib_dealloc_pd
;
1291 ibdev
->ib_dev
.create_ah
= mlx4_ib_create_ah
;
1292 ibdev
->ib_dev
.query_ah
= mlx4_ib_query_ah
;
1293 ibdev
->ib_dev
.destroy_ah
= mlx4_ib_destroy_ah
;
1294 ibdev
->ib_dev
.create_srq
= mlx4_ib_create_srq
;
1295 ibdev
->ib_dev
.modify_srq
= mlx4_ib_modify_srq
;
1296 ibdev
->ib_dev
.query_srq
= mlx4_ib_query_srq
;
1297 ibdev
->ib_dev
.destroy_srq
= mlx4_ib_destroy_srq
;
1298 ibdev
->ib_dev
.post_srq_recv
= mlx4_ib_post_srq_recv
;
1299 ibdev
->ib_dev
.create_qp
= mlx4_ib_create_qp
;
1300 ibdev
->ib_dev
.modify_qp
= mlx4_ib_modify_qp
;
1301 ibdev
->ib_dev
.query_qp
= mlx4_ib_query_qp
;
1302 ibdev
->ib_dev
.destroy_qp
= mlx4_ib_destroy_qp
;
1303 ibdev
->ib_dev
.post_send
= mlx4_ib_post_send
;
1304 ibdev
->ib_dev
.post_recv
= mlx4_ib_post_recv
;
1305 ibdev
->ib_dev
.create_cq
= mlx4_ib_create_cq
;
1306 ibdev
->ib_dev
.modify_cq
= mlx4_ib_modify_cq
;
1307 ibdev
->ib_dev
.resize_cq
= mlx4_ib_resize_cq
;
1308 ibdev
->ib_dev
.destroy_cq
= mlx4_ib_destroy_cq
;
1309 ibdev
->ib_dev
.poll_cq
= mlx4_ib_poll_cq
;
1310 ibdev
->ib_dev
.req_notify_cq
= mlx4_ib_arm_cq
;
1311 ibdev
->ib_dev
.get_dma_mr
= mlx4_ib_get_dma_mr
;
1312 ibdev
->ib_dev
.reg_user_mr
= mlx4_ib_reg_user_mr
;
1313 ibdev
->ib_dev
.dereg_mr
= mlx4_ib_dereg_mr
;
1314 ibdev
->ib_dev
.alloc_fast_reg_mr
= mlx4_ib_alloc_fast_reg_mr
;
1315 ibdev
->ib_dev
.alloc_fast_reg_page_list
= mlx4_ib_alloc_fast_reg_page_list
;
1316 ibdev
->ib_dev
.free_fast_reg_page_list
= mlx4_ib_free_fast_reg_page_list
;
1317 ibdev
->ib_dev
.attach_mcast
= mlx4_ib_mcg_attach
;
1318 ibdev
->ib_dev
.detach_mcast
= mlx4_ib_mcg_detach
;
1319 ibdev
->ib_dev
.process_mad
= mlx4_ib_process_mad
;
1321 ibdev
->ib_dev
.alloc_fmr
= mlx4_ib_fmr_alloc
;
1322 ibdev
->ib_dev
.map_phys_fmr
= mlx4_ib_map_phys_fmr
;
1323 ibdev
->ib_dev
.unmap_fmr
= mlx4_ib_unmap_fmr
;
1324 ibdev
->ib_dev
.dealloc_fmr
= mlx4_ib_fmr_dealloc
;
1326 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) {
1327 ibdev
->ib_dev
.alloc_xrcd
= mlx4_ib_alloc_xrcd
;
1328 ibdev
->ib_dev
.dealloc_xrcd
= mlx4_ib_dealloc_xrcd
;
1329 ibdev
->ib_dev
.uverbs_cmd_mask
|=
1330 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD
) |
1331 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD
);
1334 mlx4_ib_alloc_eqs(dev
, ibdev
);
1336 spin_lock_init(&iboe
->lock
);
1338 if (init_node_data(ibdev
))
1341 for (i
= 0; i
< ibdev
->num_ports
; ++i
) {
1342 if (mlx4_ib_port_link_layer(&ibdev
->ib_dev
, i
+ 1) ==
1343 IB_LINK_LAYER_ETHERNET
) {
1344 err
= mlx4_counter_alloc(ibdev
->dev
, &ibdev
->counters
[i
]);
1346 ibdev
->counters
[i
] = -1;
1348 ibdev
->counters
[i
] = -1;
1351 spin_lock_init(&ibdev
->sm_lock
);
1352 mutex_init(&ibdev
->cap_mask_mutex
);
1354 if (ib_register_device(&ibdev
->ib_dev
, NULL
))
1357 if (mlx4_ib_mad_init(ibdev
))
1360 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IBOE
&& !iboe
->nb
.notifier_call
) {
1361 iboe
->nb
.notifier_call
= mlx4_ib_netdev_event
;
1362 err
= register_netdevice_notifier(&iboe
->nb
);
1367 for (j
= 0; j
< ARRAY_SIZE(mlx4_class_attributes
); ++j
) {
1368 if (device_create_file(&ibdev
->ib_dev
.dev
,
1369 mlx4_class_attributes
[j
]))
1373 ibdev
->ib_active
= true;
1378 if (unregister_netdevice_notifier(&ibdev
->iboe
.nb
))
1379 pr_warn("failure unregistering notifier\n");
1380 flush_workqueue(wq
);
1383 ib_unregister_device(&ibdev
->ib_dev
);
1387 if (ibdev
->counters
[i
- 1] != -1)
1388 mlx4_counter_free(ibdev
->dev
, ibdev
->counters
[i
- 1]);
1391 iounmap(ibdev
->uar_map
);
1394 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
1397 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
1400 ib_dealloc_device(&ibdev
->ib_dev
);
1405 static void mlx4_ib_remove(struct mlx4_dev
*dev
, void *ibdev_ptr
)
1407 struct mlx4_ib_dev
*ibdev
= ibdev_ptr
;
1410 mlx4_ib_mad_cleanup(ibdev
);
1411 ib_unregister_device(&ibdev
->ib_dev
);
1412 if (ibdev
->iboe
.nb
.notifier_call
) {
1413 if (unregister_netdevice_notifier(&ibdev
->iboe
.nb
))
1414 pr_warn("failure unregistering notifier\n");
1415 ibdev
->iboe
.nb
.notifier_call
= NULL
;
1417 iounmap(ibdev
->uar_map
);
1418 for (p
= 0; p
< ibdev
->num_ports
; ++p
)
1419 if (ibdev
->counters
[p
] != -1)
1420 mlx4_counter_free(ibdev
->dev
, ibdev
->counters
[p
]);
1421 mlx4_foreach_port(p
, dev
, MLX4_PORT_TYPE_IB
)
1422 mlx4_CLOSE_PORT(dev
, p
);
1424 mlx4_ib_free_eqs(dev
, ibdev
);
1426 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
1427 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
1428 ib_dealloc_device(&ibdev
->ib_dev
);
1431 static void mlx4_ib_event(struct mlx4_dev
*dev
, void *ibdev_ptr
,
1432 enum mlx4_dev_event event
, unsigned long param
)
1434 struct ib_event ibev
;
1435 struct mlx4_ib_dev
*ibdev
= to_mdev((struct ib_device
*) ibdev_ptr
);
1436 struct mlx4_eqe
*eqe
= NULL
;
1437 struct ib_event_work
*ew
;
1440 if (event
== MLX4_DEV_EVENT_PORT_MGMT_CHANGE
)
1441 eqe
= (struct mlx4_eqe
*)param
;
1445 if (port
> ibdev
->num_ports
)
1449 case MLX4_DEV_EVENT_PORT_UP
:
1450 ibev
.event
= IB_EVENT_PORT_ACTIVE
;
1453 case MLX4_DEV_EVENT_PORT_DOWN
:
1454 ibev
.event
= IB_EVENT_PORT_ERR
;
1457 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR
:
1458 ibdev
->ib_active
= false;
1459 ibev
.event
= IB_EVENT_DEVICE_FATAL
;
1462 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE
:
1463 ew
= kmalloc(sizeof *ew
, GFP_ATOMIC
);
1465 pr_err("failed to allocate memory for events work\n");
1469 INIT_WORK(&ew
->work
, handle_port_mgmt_change_event
);
1470 memcpy(&ew
->ib_eqe
, eqe
, sizeof *eqe
);
1472 handle_port_mgmt_change_event(&ew
->work
);
1479 ibev
.device
= ibdev_ptr
;
1480 ibev
.element
.port_num
= port
;
1482 ib_dispatch_event(&ibev
);
1485 static struct mlx4_interface mlx4_ib_interface
= {
1487 .remove
= mlx4_ib_remove
,
1488 .event
= mlx4_ib_event
,
1489 .protocol
= MLX4_PROT_IB_IPV6
1492 static int __init
mlx4_ib_init(void)
1496 wq
= create_singlethread_workqueue("mlx4_ib");
1500 err
= mlx4_register_interface(&mlx4_ib_interface
);
1502 destroy_workqueue(wq
);
1509 static void __exit
mlx4_ib_cleanup(void)
1511 mlx4_unregister_interface(&mlx4_ib_interface
);
1512 destroy_workqueue(wq
);
1515 module_init(mlx4_ib_init
);
1516 module_exit(mlx4_ib_cleanup
);