2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/spinlock.h>
41 #include <linux/ethtool.h>
42 #include <linux/rtnetlink.h>
43 #include <linux/inetdevice.h>
47 #include <asm/byteorder.h>
49 #include <rdma/iw_cm.h>
50 #include <rdma/ib_verbs.h>
51 #include <rdma/ib_smi.h>
52 #include <rdma/ib_umem.h>
53 #include <rdma/ib_user_verbs.h>
57 static int fastreg_support
= 1;
58 module_param(fastreg_support
, int, 0644);
59 MODULE_PARM_DESC(fastreg_support
, "Advertise fastreg support (default=1)");
61 static int c4iw_modify_port(struct ib_device
*ibdev
,
62 u8 port
, int port_modify_mask
,
63 struct ib_port_modify
*props
)
68 static struct ib_ah
*c4iw_ah_create(struct ib_pd
*pd
,
69 struct ib_ah_attr
*ah_attr
)
71 return ERR_PTR(-ENOSYS
);
74 static int c4iw_ah_destroy(struct ib_ah
*ah
)
79 static int c4iw_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
84 static int c4iw_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
89 static int c4iw_process_mad(struct ib_device
*ibdev
, int mad_flags
,
90 u8 port_num
, struct ib_wc
*in_wc
,
91 struct ib_grh
*in_grh
, struct ib_mad
*in_mad
,
92 struct ib_mad
*out_mad
)
97 static int c4iw_dealloc_ucontext(struct ib_ucontext
*context
)
99 struct c4iw_dev
*rhp
= to_c4iw_dev(context
->device
);
100 struct c4iw_ucontext
*ucontext
= to_c4iw_ucontext(context
);
101 struct c4iw_mm_entry
*mm
, *tmp
;
103 PDBG("%s context %p\n", __func__
, context
);
104 list_for_each_entry_safe(mm
, tmp
, &ucontext
->mmaps
, entry
)
106 c4iw_release_dev_ucontext(&rhp
->rdev
, &ucontext
->uctx
);
111 static struct ib_ucontext
*c4iw_alloc_ucontext(struct ib_device
*ibdev
,
112 struct ib_udata
*udata
)
114 struct c4iw_ucontext
*context
;
115 struct c4iw_dev
*rhp
= to_c4iw_dev(ibdev
);
117 PDBG("%s ibdev %p\n", __func__
, ibdev
);
118 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
120 return ERR_PTR(-ENOMEM
);
121 c4iw_init_dev_ucontext(&rhp
->rdev
, &context
->uctx
);
122 INIT_LIST_HEAD(&context
->mmaps
);
123 spin_lock_init(&context
->mmap_lock
);
124 return &context
->ibucontext
;
127 static int c4iw_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
129 int len
= vma
->vm_end
- vma
->vm_start
;
130 u32 key
= vma
->vm_pgoff
<< PAGE_SHIFT
;
131 struct c4iw_rdev
*rdev
;
133 struct c4iw_mm_entry
*mm
;
134 struct c4iw_ucontext
*ucontext
;
137 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__
, vma
->vm_pgoff
,
140 if (vma
->vm_start
& (PAGE_SIZE
-1))
143 rdev
= &(to_c4iw_dev(context
->device
)->rdev
);
144 ucontext
= to_c4iw_ucontext(context
);
146 mm
= remove_mmap(ucontext
, key
, len
);
152 if ((addr
>= pci_resource_start(rdev
->lldi
.pdev
, 0)) &&
153 (addr
< (pci_resource_start(rdev
->lldi
.pdev
, 0) +
154 pci_resource_len(rdev
->lldi
.pdev
, 0)))) {
157 * MA_SYNC register...
159 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
160 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
162 len
, vma
->vm_page_prot
);
163 } else if ((addr
>= pci_resource_start(rdev
->lldi
.pdev
, 2)) &&
164 (addr
< (pci_resource_start(rdev
->lldi
.pdev
, 2) +
165 pci_resource_len(rdev
->lldi
.pdev
, 2)))) {
168 * Map user DB or OCQP memory...
170 if (addr
>= rdev
->oc_mw_pa
)
171 vma
->vm_page_prot
= t4_pgprot_wc(vma
->vm_page_prot
);
173 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
174 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
176 len
, vma
->vm_page_prot
);
180 * Map WQ or CQ contig dma memory...
182 ret
= remap_pfn_range(vma
, vma
->vm_start
,
184 len
, vma
->vm_page_prot
);
190 static int c4iw_deallocate_pd(struct ib_pd
*pd
)
192 struct c4iw_dev
*rhp
;
195 php
= to_c4iw_pd(pd
);
197 PDBG("%s ibpd %p pdid 0x%x\n", __func__
, pd
, php
->pdid
);
198 c4iw_put_resource(&rhp
->rdev
.resource
.pdid_fifo
, php
->pdid
,
199 &rhp
->rdev
.resource
.pdid_fifo_lock
);
204 static struct ib_pd
*c4iw_allocate_pd(struct ib_device
*ibdev
,
205 struct ib_ucontext
*context
,
206 struct ib_udata
*udata
)
210 struct c4iw_dev
*rhp
;
212 PDBG("%s ibdev %p\n", __func__
, ibdev
);
213 rhp
= (struct c4iw_dev
*) ibdev
;
214 pdid
= c4iw_get_resource(&rhp
->rdev
.resource
.pdid_fifo
,
215 &rhp
->rdev
.resource
.pdid_fifo_lock
);
217 return ERR_PTR(-EINVAL
);
218 php
= kzalloc(sizeof(*php
), GFP_KERNEL
);
220 c4iw_put_resource(&rhp
->rdev
.resource
.pdid_fifo
, pdid
,
221 &rhp
->rdev
.resource
.pdid_fifo_lock
);
222 return ERR_PTR(-ENOMEM
);
227 if (ib_copy_to_udata(udata
, &php
->pdid
, sizeof(u32
))) {
228 c4iw_deallocate_pd(&php
->ibpd
);
229 return ERR_PTR(-EFAULT
);
232 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__
, pdid
, php
);
236 static int c4iw_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
239 PDBG("%s ibdev %p\n", __func__
, ibdev
);
244 static int c4iw_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
247 struct c4iw_dev
*dev
;
249 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
250 __func__
, ibdev
, port
, index
, gid
);
251 dev
= to_c4iw_dev(ibdev
);
253 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
254 memcpy(&(gid
->raw
[0]), dev
->rdev
.lldi
.ports
[port
-1]->dev_addr
, 6);
258 static int c4iw_query_device(struct ib_device
*ibdev
,
259 struct ib_device_attr
*props
)
262 struct c4iw_dev
*dev
;
263 PDBG("%s ibdev %p\n", __func__
, ibdev
);
265 dev
= to_c4iw_dev(ibdev
);
266 memset(props
, 0, sizeof *props
);
267 memcpy(&props
->sys_image_guid
, dev
->rdev
.lldi
.ports
[0]->dev_addr
, 6);
268 props
->hw_ver
= dev
->rdev
.lldi
.adapter_type
;
269 props
->fw_ver
= dev
->rdev
.lldi
.fw_vers
;
270 props
->device_cap_flags
= dev
->device_cap_flags
;
271 props
->page_size_cap
= T4_PAGESIZE_MASK
;
272 props
->vendor_id
= (u32
)dev
->rdev
.lldi
.pdev
->vendor
;
273 props
->vendor_part_id
= (u32
)dev
->rdev
.lldi
.pdev
->device
;
274 props
->max_mr_size
= T4_MAX_MR_SIZE
;
275 props
->max_qp
= T4_MAX_NUM_QP
;
276 props
->max_qp_wr
= T4_MAX_QP_DEPTH
;
277 props
->max_sge
= T4_MAX_RECV_SGE
;
278 props
->max_sge_rd
= 1;
279 props
->max_qp_rd_atom
= c4iw_max_read_depth
;
280 props
->max_qp_init_rd_atom
= c4iw_max_read_depth
;
281 props
->max_cq
= T4_MAX_NUM_CQ
;
282 props
->max_cqe
= T4_MAX_CQ_DEPTH
;
283 props
->max_mr
= c4iw_num_stags(&dev
->rdev
);
284 props
->max_pd
= T4_MAX_NUM_PD
;
285 props
->local_ca_ack_delay
= 0;
286 props
->max_fast_reg_page_list_len
= T4_MAX_FR_DEPTH
;
291 static int c4iw_query_port(struct ib_device
*ibdev
, u8 port
,
292 struct ib_port_attr
*props
)
294 struct c4iw_dev
*dev
;
295 struct net_device
*netdev
;
296 struct in_device
*inetdev
;
298 PDBG("%s ibdev %p\n", __func__
, ibdev
);
300 dev
= to_c4iw_dev(ibdev
);
301 netdev
= dev
->rdev
.lldi
.ports
[port
-1];
303 memset(props
, 0, sizeof(struct ib_port_attr
));
304 props
->max_mtu
= IB_MTU_4096
;
305 if (netdev
->mtu
>= 4096)
306 props
->active_mtu
= IB_MTU_4096
;
307 else if (netdev
->mtu
>= 2048)
308 props
->active_mtu
= IB_MTU_2048
;
309 else if (netdev
->mtu
>= 1024)
310 props
->active_mtu
= IB_MTU_1024
;
311 else if (netdev
->mtu
>= 512)
312 props
->active_mtu
= IB_MTU_512
;
314 props
->active_mtu
= IB_MTU_256
;
316 if (!netif_carrier_ok(netdev
))
317 props
->state
= IB_PORT_DOWN
;
319 inetdev
= in_dev_get(netdev
);
321 if (inetdev
->ifa_list
)
322 props
->state
= IB_PORT_ACTIVE
;
324 props
->state
= IB_PORT_INIT
;
327 props
->state
= IB_PORT_INIT
;
330 props
->port_cap_flags
=
332 IB_PORT_SNMP_TUNNEL_SUP
|
334 IB_PORT_DEVICE_MGMT_SUP
|
335 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
336 props
->gid_tbl_len
= 1;
337 props
->pkey_tbl_len
= 1;
338 props
->active_width
= 2;
339 props
->active_speed
= 2;
340 props
->max_msg_sz
= -1;
345 static ssize_t
show_rev(struct device
*dev
, struct device_attribute
*attr
,
348 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
350 PDBG("%s dev 0x%p\n", __func__
, dev
);
351 return sprintf(buf
, "%d\n", c4iw_dev
->rdev
.lldi
.adapter_type
);
354 static ssize_t
show_fw_ver(struct device
*dev
, struct device_attribute
*attr
,
357 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
359 PDBG("%s dev 0x%p\n", __func__
, dev
);
361 return sprintf(buf
, "%u.%u.%u.%u\n",
362 FW_HDR_FW_VER_MAJOR_GET(c4iw_dev
->rdev
.lldi
.fw_vers
),
363 FW_HDR_FW_VER_MINOR_GET(c4iw_dev
->rdev
.lldi
.fw_vers
),
364 FW_HDR_FW_VER_MICRO_GET(c4iw_dev
->rdev
.lldi
.fw_vers
),
365 FW_HDR_FW_VER_BUILD_GET(c4iw_dev
->rdev
.lldi
.fw_vers
));
368 static ssize_t
show_hca(struct device
*dev
, struct device_attribute
*attr
,
371 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
373 struct ethtool_drvinfo info
;
374 struct net_device
*lldev
= c4iw_dev
->rdev
.lldi
.ports
[0];
376 PDBG("%s dev 0x%p\n", __func__
, dev
);
377 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
378 return sprintf(buf
, "%s\n", info
.driver
);
381 static ssize_t
show_board(struct device
*dev
, struct device_attribute
*attr
,
384 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
386 PDBG("%s dev 0x%p\n", __func__
, dev
);
387 return sprintf(buf
, "%x.%x\n", c4iw_dev
->rdev
.lldi
.pdev
->vendor
,
388 c4iw_dev
->rdev
.lldi
.pdev
->device
);
391 static int c4iw_get_mib(struct ib_device
*ibdev
,
392 union rdma_protocol_stats
*stats
)
394 struct tp_tcp_stats v4
, v6
;
395 struct c4iw_dev
*c4iw_dev
= to_c4iw_dev(ibdev
);
397 cxgb4_get_tcp_stats(c4iw_dev
->rdev
.lldi
.pdev
, &v4
, &v6
);
398 memset(stats
, 0, sizeof *stats
);
399 stats
->iw
.tcpInSegs
= v4
.tcpInSegs
+ v6
.tcpInSegs
;
400 stats
->iw
.tcpOutSegs
= v4
.tcpOutSegs
+ v6
.tcpOutSegs
;
401 stats
->iw
.tcpRetransSegs
= v4
.tcpRetransSegs
+ v6
.tcpRetransSegs
;
402 stats
->iw
.tcpOutRsts
= v4
.tcpOutRsts
+ v6
.tcpOutSegs
;
407 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
408 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
409 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
410 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
412 static struct device_attribute
*c4iw_class_attributes
[] = {
419 int c4iw_register_device(struct c4iw_dev
*dev
)
424 PDBG("%s c4iw_dev %p\n", __func__
, dev
);
425 BUG_ON(!dev
->rdev
.lldi
.ports
[0]);
426 strlcpy(dev
->ibdev
.name
, "cxgb4_%d", IB_DEVICE_NAME_MAX
);
427 memset(&dev
->ibdev
.node_guid
, 0, sizeof(dev
->ibdev
.node_guid
));
428 memcpy(&dev
->ibdev
.node_guid
, dev
->rdev
.lldi
.ports
[0]->dev_addr
, 6);
429 dev
->ibdev
.owner
= THIS_MODULE
;
430 dev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
| IB_DEVICE_MEM_WINDOW
;
432 dev
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
433 dev
->ibdev
.local_dma_lkey
= 0;
434 dev
->ibdev
.uverbs_cmd_mask
=
435 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
436 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
437 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
438 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
439 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
440 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
441 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
442 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
443 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
444 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
445 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
446 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
447 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
448 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
449 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
450 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
451 (1ull << IB_USER_VERBS_CMD_POST_RECV
);
452 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
453 memcpy(dev
->ibdev
.node_desc
, C4IW_NODE_DESC
, sizeof(C4IW_NODE_DESC
));
454 dev
->ibdev
.phys_port_cnt
= dev
->rdev
.lldi
.nports
;
455 dev
->ibdev
.num_comp_vectors
= 1;
456 dev
->ibdev
.dma_device
= &(dev
->rdev
.lldi
.pdev
->dev
);
457 dev
->ibdev
.query_device
= c4iw_query_device
;
458 dev
->ibdev
.query_port
= c4iw_query_port
;
459 dev
->ibdev
.modify_port
= c4iw_modify_port
;
460 dev
->ibdev
.query_pkey
= c4iw_query_pkey
;
461 dev
->ibdev
.query_gid
= c4iw_query_gid
;
462 dev
->ibdev
.alloc_ucontext
= c4iw_alloc_ucontext
;
463 dev
->ibdev
.dealloc_ucontext
= c4iw_dealloc_ucontext
;
464 dev
->ibdev
.mmap
= c4iw_mmap
;
465 dev
->ibdev
.alloc_pd
= c4iw_allocate_pd
;
466 dev
->ibdev
.dealloc_pd
= c4iw_deallocate_pd
;
467 dev
->ibdev
.create_ah
= c4iw_ah_create
;
468 dev
->ibdev
.destroy_ah
= c4iw_ah_destroy
;
469 dev
->ibdev
.create_qp
= c4iw_create_qp
;
470 dev
->ibdev
.modify_qp
= c4iw_ib_modify_qp
;
471 dev
->ibdev
.destroy_qp
= c4iw_destroy_qp
;
472 dev
->ibdev
.create_cq
= c4iw_create_cq
;
473 dev
->ibdev
.destroy_cq
= c4iw_destroy_cq
;
474 dev
->ibdev
.resize_cq
= c4iw_resize_cq
;
475 dev
->ibdev
.poll_cq
= c4iw_poll_cq
;
476 dev
->ibdev
.get_dma_mr
= c4iw_get_dma_mr
;
477 dev
->ibdev
.reg_phys_mr
= c4iw_register_phys_mem
;
478 dev
->ibdev
.rereg_phys_mr
= c4iw_reregister_phys_mem
;
479 dev
->ibdev
.reg_user_mr
= c4iw_reg_user_mr
;
480 dev
->ibdev
.dereg_mr
= c4iw_dereg_mr
;
481 dev
->ibdev
.alloc_mw
= c4iw_alloc_mw
;
482 dev
->ibdev
.bind_mw
= c4iw_bind_mw
;
483 dev
->ibdev
.dealloc_mw
= c4iw_dealloc_mw
;
484 dev
->ibdev
.alloc_fast_reg_mr
= c4iw_alloc_fast_reg_mr
;
485 dev
->ibdev
.alloc_fast_reg_page_list
= c4iw_alloc_fastreg_pbl
;
486 dev
->ibdev
.free_fast_reg_page_list
= c4iw_free_fastreg_pbl
;
487 dev
->ibdev
.attach_mcast
= c4iw_multicast_attach
;
488 dev
->ibdev
.detach_mcast
= c4iw_multicast_detach
;
489 dev
->ibdev
.process_mad
= c4iw_process_mad
;
490 dev
->ibdev
.req_notify_cq
= c4iw_arm_cq
;
491 dev
->ibdev
.post_send
= c4iw_post_send
;
492 dev
->ibdev
.post_recv
= c4iw_post_receive
;
493 dev
->ibdev
.get_protocol_stats
= c4iw_get_mib
;
494 dev
->ibdev
.uverbs_abi_ver
= C4IW_UVERBS_ABI_VERSION
;
496 dev
->ibdev
.iwcm
= kmalloc(sizeof(struct iw_cm_verbs
), GFP_KERNEL
);
497 if (!dev
->ibdev
.iwcm
)
500 dev
->ibdev
.iwcm
->connect
= c4iw_connect
;
501 dev
->ibdev
.iwcm
->accept
= c4iw_accept_cr
;
502 dev
->ibdev
.iwcm
->reject
= c4iw_reject_cr
;
503 dev
->ibdev
.iwcm
->create_listen
= c4iw_create_listen
;
504 dev
->ibdev
.iwcm
->destroy_listen
= c4iw_destroy_listen
;
505 dev
->ibdev
.iwcm
->add_ref
= c4iw_qp_add_ref
;
506 dev
->ibdev
.iwcm
->rem_ref
= c4iw_qp_rem_ref
;
507 dev
->ibdev
.iwcm
->get_qp
= c4iw_get_qp
;
509 ret
= ib_register_device(&dev
->ibdev
, NULL
);
513 for (i
= 0; i
< ARRAY_SIZE(c4iw_class_attributes
); ++i
) {
514 ret
= device_create_file(&dev
->ibdev
.dev
,
515 c4iw_class_attributes
[i
]);
522 ib_unregister_device(&dev
->ibdev
);
524 kfree(dev
->ibdev
.iwcm
);
528 void c4iw_unregister_device(struct c4iw_dev
*dev
)
532 PDBG("%s c4iw_dev %p\n", __func__
, dev
);
533 for (i
= 0; i
< ARRAY_SIZE(c4iw_class_attributes
); ++i
)
534 device_remove_file(&dev
->ibdev
.dev
,
535 c4iw_class_attributes
[i
]);
536 ib_unregister_device(&dev
->ibdev
);
537 kfree(dev
->ibdev
.iwcm
);