2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/spinlock.h>
41 #include <linux/ethtool.h>
42 #include <linux/rtnetlink.h>
43 #include <linux/inetdevice.h>
47 #include <asm/byteorder.h>
49 #include <rdma/iw_cm.h>
50 #include <rdma/ib_verbs.h>
51 #include <rdma/ib_smi.h>
52 #include <rdma/ib_umem.h>
53 #include <rdma/ib_user_verbs.h>
57 static int fastreg_support
= 1;
58 module_param(fastreg_support
, int, 0644);
59 MODULE_PARM_DESC(fastreg_support
, "Advertise fastreg support (default=1)");
61 static struct ib_ah
*c4iw_ah_create(struct ib_pd
*pd
,
62 struct ib_ah_attr
*ah_attr
)
64 return ERR_PTR(-ENOSYS
);
67 static int c4iw_ah_destroy(struct ib_ah
*ah
)
72 static int c4iw_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
77 static int c4iw_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
82 static int c4iw_process_mad(struct ib_device
*ibdev
, int mad_flags
,
83 u8 port_num
, struct ib_wc
*in_wc
,
84 struct ib_grh
*in_grh
, struct ib_mad
*in_mad
,
85 struct ib_mad
*out_mad
)
90 static int c4iw_dealloc_ucontext(struct ib_ucontext
*context
)
92 struct c4iw_dev
*rhp
= to_c4iw_dev(context
->device
);
93 struct c4iw_ucontext
*ucontext
= to_c4iw_ucontext(context
);
94 struct c4iw_mm_entry
*mm
, *tmp
;
96 PDBG("%s context %p\n", __func__
, context
);
97 list_for_each_entry_safe(mm
, tmp
, &ucontext
->mmaps
, entry
)
99 c4iw_release_dev_ucontext(&rhp
->rdev
, &ucontext
->uctx
);
104 static struct ib_ucontext
*c4iw_alloc_ucontext(struct ib_device
*ibdev
,
105 struct ib_udata
*udata
)
107 struct c4iw_ucontext
*context
;
108 struct c4iw_dev
*rhp
= to_c4iw_dev(ibdev
);
110 PDBG("%s ibdev %p\n", __func__
, ibdev
);
111 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
113 return ERR_PTR(-ENOMEM
);
114 c4iw_init_dev_ucontext(&rhp
->rdev
, &context
->uctx
);
115 INIT_LIST_HEAD(&context
->mmaps
);
116 spin_lock_init(&context
->mmap_lock
);
117 return &context
->ibucontext
;
120 static int c4iw_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
122 int len
= vma
->vm_end
- vma
->vm_start
;
123 u32 key
= vma
->vm_pgoff
<< PAGE_SHIFT
;
124 struct c4iw_rdev
*rdev
;
126 struct c4iw_mm_entry
*mm
;
127 struct c4iw_ucontext
*ucontext
;
130 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__
, vma
->vm_pgoff
,
133 if (vma
->vm_start
& (PAGE_SIZE
-1))
136 rdev
= &(to_c4iw_dev(context
->device
)->rdev
);
137 ucontext
= to_c4iw_ucontext(context
);
139 mm
= remove_mmap(ucontext
, key
, len
);
145 if ((addr
>= pci_resource_start(rdev
->lldi
.pdev
, 0)) &&
146 (addr
< (pci_resource_start(rdev
->lldi
.pdev
, 0) +
147 pci_resource_len(rdev
->lldi
.pdev
, 0)))) {
150 * MA_SYNC register...
152 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
153 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
155 len
, vma
->vm_page_prot
);
156 } else if ((addr
>= pci_resource_start(rdev
->lldi
.pdev
, 2)) &&
157 (addr
< (pci_resource_start(rdev
->lldi
.pdev
, 2) +
158 pci_resource_len(rdev
->lldi
.pdev
, 2)))) {
161 * Map user DB or OCQP memory...
163 if (addr
>= rdev
->oc_mw_pa
)
164 vma
->vm_page_prot
= t4_pgprot_wc(vma
->vm_page_prot
);
166 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
167 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
169 len
, vma
->vm_page_prot
);
173 * Map WQ or CQ contig dma memory...
175 ret
= remap_pfn_range(vma
, vma
->vm_start
,
177 len
, vma
->vm_page_prot
);
183 static int c4iw_deallocate_pd(struct ib_pd
*pd
)
185 struct c4iw_dev
*rhp
;
188 php
= to_c4iw_pd(pd
);
190 PDBG("%s ibpd %p pdid 0x%x\n", __func__
, pd
, php
->pdid
);
191 c4iw_put_resource(&rhp
->rdev
.resource
.pdid_fifo
, php
->pdid
,
192 &rhp
->rdev
.resource
.pdid_fifo_lock
);
197 static struct ib_pd
*c4iw_allocate_pd(struct ib_device
*ibdev
,
198 struct ib_ucontext
*context
,
199 struct ib_udata
*udata
)
203 struct c4iw_dev
*rhp
;
205 PDBG("%s ibdev %p\n", __func__
, ibdev
);
206 rhp
= (struct c4iw_dev
*) ibdev
;
207 pdid
= c4iw_get_resource(&rhp
->rdev
.resource
.pdid_fifo
,
208 &rhp
->rdev
.resource
.pdid_fifo_lock
);
210 return ERR_PTR(-EINVAL
);
211 php
= kzalloc(sizeof(*php
), GFP_KERNEL
);
213 c4iw_put_resource(&rhp
->rdev
.resource
.pdid_fifo
, pdid
,
214 &rhp
->rdev
.resource
.pdid_fifo_lock
);
215 return ERR_PTR(-ENOMEM
);
220 if (ib_copy_to_udata(udata
, &php
->pdid
, sizeof(u32
))) {
221 c4iw_deallocate_pd(&php
->ibpd
);
222 return ERR_PTR(-EFAULT
);
225 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__
, pdid
, php
);
229 static int c4iw_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
232 PDBG("%s ibdev %p\n", __func__
, ibdev
);
237 static int c4iw_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
240 struct c4iw_dev
*dev
;
242 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
243 __func__
, ibdev
, port
, index
, gid
);
244 dev
= to_c4iw_dev(ibdev
);
246 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
247 memcpy(&(gid
->raw
[0]), dev
->rdev
.lldi
.ports
[port
-1]->dev_addr
, 6);
251 static int c4iw_query_device(struct ib_device
*ibdev
,
252 struct ib_device_attr
*props
)
255 struct c4iw_dev
*dev
;
256 PDBG("%s ibdev %p\n", __func__
, ibdev
);
258 dev
= to_c4iw_dev(ibdev
);
259 memset(props
, 0, sizeof *props
);
260 memcpy(&props
->sys_image_guid
, dev
->rdev
.lldi
.ports
[0]->dev_addr
, 6);
261 props
->hw_ver
= dev
->rdev
.lldi
.adapter_type
;
262 props
->fw_ver
= dev
->rdev
.lldi
.fw_vers
;
263 props
->device_cap_flags
= dev
->device_cap_flags
;
264 props
->page_size_cap
= T4_PAGESIZE_MASK
;
265 props
->vendor_id
= (u32
)dev
->rdev
.lldi
.pdev
->vendor
;
266 props
->vendor_part_id
= (u32
)dev
->rdev
.lldi
.pdev
->device
;
267 props
->max_mr_size
= T4_MAX_MR_SIZE
;
268 props
->max_qp
= T4_MAX_NUM_QP
;
269 props
->max_qp_wr
= T4_MAX_QP_DEPTH
;
270 props
->max_sge
= T4_MAX_RECV_SGE
;
271 props
->max_sge_rd
= 1;
272 props
->max_qp_rd_atom
= c4iw_max_read_depth
;
273 props
->max_qp_init_rd_atom
= c4iw_max_read_depth
;
274 props
->max_cq
= T4_MAX_NUM_CQ
;
275 props
->max_cqe
= T4_MAX_CQ_DEPTH
;
276 props
->max_mr
= c4iw_num_stags(&dev
->rdev
);
277 props
->max_pd
= T4_MAX_NUM_PD
;
278 props
->local_ca_ack_delay
= 0;
279 props
->max_fast_reg_page_list_len
= T4_MAX_FR_DEPTH
;
284 static int c4iw_query_port(struct ib_device
*ibdev
, u8 port
,
285 struct ib_port_attr
*props
)
287 struct c4iw_dev
*dev
;
288 struct net_device
*netdev
;
289 struct in_device
*inetdev
;
291 PDBG("%s ibdev %p\n", __func__
, ibdev
);
293 dev
= to_c4iw_dev(ibdev
);
294 netdev
= dev
->rdev
.lldi
.ports
[port
-1];
296 memset(props
, 0, sizeof(struct ib_port_attr
));
297 props
->max_mtu
= IB_MTU_4096
;
298 if (netdev
->mtu
>= 4096)
299 props
->active_mtu
= IB_MTU_4096
;
300 else if (netdev
->mtu
>= 2048)
301 props
->active_mtu
= IB_MTU_2048
;
302 else if (netdev
->mtu
>= 1024)
303 props
->active_mtu
= IB_MTU_1024
;
304 else if (netdev
->mtu
>= 512)
305 props
->active_mtu
= IB_MTU_512
;
307 props
->active_mtu
= IB_MTU_256
;
309 if (!netif_carrier_ok(netdev
))
310 props
->state
= IB_PORT_DOWN
;
312 inetdev
= in_dev_get(netdev
);
314 if (inetdev
->ifa_list
)
315 props
->state
= IB_PORT_ACTIVE
;
317 props
->state
= IB_PORT_INIT
;
320 props
->state
= IB_PORT_INIT
;
323 props
->port_cap_flags
=
325 IB_PORT_SNMP_TUNNEL_SUP
|
327 IB_PORT_DEVICE_MGMT_SUP
|
328 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
329 props
->gid_tbl_len
= 1;
330 props
->pkey_tbl_len
= 1;
331 props
->active_width
= 2;
332 props
->active_speed
= 2;
333 props
->max_msg_sz
= -1;
338 static ssize_t
show_rev(struct device
*dev
, struct device_attribute
*attr
,
341 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
343 PDBG("%s dev 0x%p\n", __func__
, dev
);
344 return sprintf(buf
, "%d\n", c4iw_dev
->rdev
.lldi
.adapter_type
);
347 static ssize_t
show_fw_ver(struct device
*dev
, struct device_attribute
*attr
,
350 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
352 PDBG("%s dev 0x%p\n", __func__
, dev
);
354 return sprintf(buf
, "%u.%u.%u.%u\n",
355 FW_HDR_FW_VER_MAJOR_GET(c4iw_dev
->rdev
.lldi
.fw_vers
),
356 FW_HDR_FW_VER_MINOR_GET(c4iw_dev
->rdev
.lldi
.fw_vers
),
357 FW_HDR_FW_VER_MICRO_GET(c4iw_dev
->rdev
.lldi
.fw_vers
),
358 FW_HDR_FW_VER_BUILD_GET(c4iw_dev
->rdev
.lldi
.fw_vers
));
361 static ssize_t
show_hca(struct device
*dev
, struct device_attribute
*attr
,
364 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
366 struct ethtool_drvinfo info
;
367 struct net_device
*lldev
= c4iw_dev
->rdev
.lldi
.ports
[0];
369 PDBG("%s dev 0x%p\n", __func__
, dev
);
370 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
371 return sprintf(buf
, "%s\n", info
.driver
);
374 static ssize_t
show_board(struct device
*dev
, struct device_attribute
*attr
,
377 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
379 PDBG("%s dev 0x%p\n", __func__
, dev
);
380 return sprintf(buf
, "%x.%x\n", c4iw_dev
->rdev
.lldi
.pdev
->vendor
,
381 c4iw_dev
->rdev
.lldi
.pdev
->device
);
384 static int c4iw_get_mib(struct ib_device
*ibdev
,
385 union rdma_protocol_stats
*stats
)
387 struct tp_tcp_stats v4
, v6
;
388 struct c4iw_dev
*c4iw_dev
= to_c4iw_dev(ibdev
);
390 cxgb4_get_tcp_stats(c4iw_dev
->rdev
.lldi
.pdev
, &v4
, &v6
);
391 memset(stats
, 0, sizeof *stats
);
392 stats
->iw
.tcpInSegs
= v4
.tcpInSegs
+ v6
.tcpInSegs
;
393 stats
->iw
.tcpOutSegs
= v4
.tcpOutSegs
+ v6
.tcpOutSegs
;
394 stats
->iw
.tcpRetransSegs
= v4
.tcpRetransSegs
+ v6
.tcpRetransSegs
;
395 stats
->iw
.tcpOutRsts
= v4
.tcpOutRsts
+ v6
.tcpOutSegs
;
400 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
401 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
402 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
403 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
405 static struct device_attribute
*c4iw_class_attributes
[] = {
412 int c4iw_register_device(struct c4iw_dev
*dev
)
417 PDBG("%s c4iw_dev %p\n", __func__
, dev
);
418 BUG_ON(!dev
->rdev
.lldi
.ports
[0]);
419 strlcpy(dev
->ibdev
.name
, "cxgb4_%d", IB_DEVICE_NAME_MAX
);
420 memset(&dev
->ibdev
.node_guid
, 0, sizeof(dev
->ibdev
.node_guid
));
421 memcpy(&dev
->ibdev
.node_guid
, dev
->rdev
.lldi
.ports
[0]->dev_addr
, 6);
422 dev
->ibdev
.owner
= THIS_MODULE
;
423 dev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
| IB_DEVICE_MEM_WINDOW
;
425 dev
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
426 dev
->ibdev
.local_dma_lkey
= 0;
427 dev
->ibdev
.uverbs_cmd_mask
=
428 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
429 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
430 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
431 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
432 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
433 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
434 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
435 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
436 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
437 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
438 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
439 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
440 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
441 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
442 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
443 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
444 (1ull << IB_USER_VERBS_CMD_POST_RECV
);
445 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
446 memcpy(dev
->ibdev
.node_desc
, C4IW_NODE_DESC
, sizeof(C4IW_NODE_DESC
));
447 dev
->ibdev
.phys_port_cnt
= dev
->rdev
.lldi
.nports
;
448 dev
->ibdev
.num_comp_vectors
= 1;
449 dev
->ibdev
.dma_device
= &(dev
->rdev
.lldi
.pdev
->dev
);
450 dev
->ibdev
.query_device
= c4iw_query_device
;
451 dev
->ibdev
.query_port
= c4iw_query_port
;
452 dev
->ibdev
.query_pkey
= c4iw_query_pkey
;
453 dev
->ibdev
.query_gid
= c4iw_query_gid
;
454 dev
->ibdev
.alloc_ucontext
= c4iw_alloc_ucontext
;
455 dev
->ibdev
.dealloc_ucontext
= c4iw_dealloc_ucontext
;
456 dev
->ibdev
.mmap
= c4iw_mmap
;
457 dev
->ibdev
.alloc_pd
= c4iw_allocate_pd
;
458 dev
->ibdev
.dealloc_pd
= c4iw_deallocate_pd
;
459 dev
->ibdev
.create_ah
= c4iw_ah_create
;
460 dev
->ibdev
.destroy_ah
= c4iw_ah_destroy
;
461 dev
->ibdev
.create_qp
= c4iw_create_qp
;
462 dev
->ibdev
.modify_qp
= c4iw_ib_modify_qp
;
463 dev
->ibdev
.destroy_qp
= c4iw_destroy_qp
;
464 dev
->ibdev
.create_cq
= c4iw_create_cq
;
465 dev
->ibdev
.destroy_cq
= c4iw_destroy_cq
;
466 dev
->ibdev
.resize_cq
= c4iw_resize_cq
;
467 dev
->ibdev
.poll_cq
= c4iw_poll_cq
;
468 dev
->ibdev
.get_dma_mr
= c4iw_get_dma_mr
;
469 dev
->ibdev
.reg_phys_mr
= c4iw_register_phys_mem
;
470 dev
->ibdev
.rereg_phys_mr
= c4iw_reregister_phys_mem
;
471 dev
->ibdev
.reg_user_mr
= c4iw_reg_user_mr
;
472 dev
->ibdev
.dereg_mr
= c4iw_dereg_mr
;
473 dev
->ibdev
.alloc_mw
= c4iw_alloc_mw
;
474 dev
->ibdev
.bind_mw
= c4iw_bind_mw
;
475 dev
->ibdev
.dealloc_mw
= c4iw_dealloc_mw
;
476 dev
->ibdev
.alloc_fast_reg_mr
= c4iw_alloc_fast_reg_mr
;
477 dev
->ibdev
.alloc_fast_reg_page_list
= c4iw_alloc_fastreg_pbl
;
478 dev
->ibdev
.free_fast_reg_page_list
= c4iw_free_fastreg_pbl
;
479 dev
->ibdev
.attach_mcast
= c4iw_multicast_attach
;
480 dev
->ibdev
.detach_mcast
= c4iw_multicast_detach
;
481 dev
->ibdev
.process_mad
= c4iw_process_mad
;
482 dev
->ibdev
.req_notify_cq
= c4iw_arm_cq
;
483 dev
->ibdev
.post_send
= c4iw_post_send
;
484 dev
->ibdev
.post_recv
= c4iw_post_receive
;
485 dev
->ibdev
.get_protocol_stats
= c4iw_get_mib
;
486 dev
->ibdev
.uverbs_abi_ver
= C4IW_UVERBS_ABI_VERSION
;
488 dev
->ibdev
.iwcm
= kmalloc(sizeof(struct iw_cm_verbs
), GFP_KERNEL
);
489 if (!dev
->ibdev
.iwcm
)
492 dev
->ibdev
.iwcm
->connect
= c4iw_connect
;
493 dev
->ibdev
.iwcm
->accept
= c4iw_accept_cr
;
494 dev
->ibdev
.iwcm
->reject
= c4iw_reject_cr
;
495 dev
->ibdev
.iwcm
->create_listen
= c4iw_create_listen
;
496 dev
->ibdev
.iwcm
->destroy_listen
= c4iw_destroy_listen
;
497 dev
->ibdev
.iwcm
->add_ref
= c4iw_qp_add_ref
;
498 dev
->ibdev
.iwcm
->rem_ref
= c4iw_qp_rem_ref
;
499 dev
->ibdev
.iwcm
->get_qp
= c4iw_get_qp
;
501 ret
= ib_register_device(&dev
->ibdev
, NULL
);
505 for (i
= 0; i
< ARRAY_SIZE(c4iw_class_attributes
); ++i
) {
506 ret
= device_create_file(&dev
->ibdev
.dev
,
507 c4iw_class_attributes
[i
]);
513 ib_unregister_device(&dev
->ibdev
);
515 kfree(dev
->ibdev
.iwcm
);
519 void c4iw_unregister_device(struct c4iw_dev
*dev
)
523 PDBG("%s c4iw_dev %p\n", __func__
, dev
);
524 for (i
= 0; i
< ARRAY_SIZE(c4iw_class_attributes
); ++i
)
525 device_remove_file(&dev
->ibdev
.dev
,
526 c4iw_class_attributes
[i
]);
527 ib_unregister_device(&dev
->ibdev
);
528 kfree(dev
->ibdev
.iwcm
);