2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/spinlock.h>
41 #include <linux/ethtool.h>
42 #include <linux/rtnetlink.h>
43 #include <linux/inetdevice.h>
47 #include <asm/byteorder.h>
49 #include <rdma/iw_cm.h>
50 #include <rdma/ib_verbs.h>
51 #include <rdma/ib_smi.h>
52 #include <rdma/ib_umem.h>
53 #include <rdma/ib_user_verbs.h>
57 static int fastreg_support
= 1;
58 module_param(fastreg_support
, int, 0644);
59 MODULE_PARM_DESC(fastreg_support
, "Advertise fastreg support (default=1)");
61 static struct ib_ah
*c4iw_ah_create(struct ib_pd
*pd
,
62 struct ib_ah_attr
*ah_attr
)
64 return ERR_PTR(-ENOSYS
);
67 static int c4iw_ah_destroy(struct ib_ah
*ah
)
72 static int c4iw_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
77 static int c4iw_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
82 static int c4iw_process_mad(struct ib_device
*ibdev
, int mad_flags
,
83 u8 port_num
, const struct ib_wc
*in_wc
,
84 const struct ib_grh
*in_grh
,
85 const struct ib_mad_hdr
*in_mad
,
87 struct ib_mad_hdr
*out_mad
,
89 u16
*out_mad_pkey_index
)
94 static int c4iw_dealloc_ucontext(struct ib_ucontext
*context
)
96 struct c4iw_dev
*rhp
= to_c4iw_dev(context
->device
);
97 struct c4iw_ucontext
*ucontext
= to_c4iw_ucontext(context
);
98 struct c4iw_mm_entry
*mm
, *tmp
;
100 PDBG("%s context %p\n", __func__
, context
);
101 list_for_each_entry_safe(mm
, tmp
, &ucontext
->mmaps
, entry
)
103 c4iw_release_dev_ucontext(&rhp
->rdev
, &ucontext
->uctx
);
108 static struct ib_ucontext
*c4iw_alloc_ucontext(struct ib_device
*ibdev
,
109 struct ib_udata
*udata
)
111 struct c4iw_ucontext
*context
;
112 struct c4iw_dev
*rhp
= to_c4iw_dev(ibdev
);
114 struct c4iw_alloc_ucontext_resp uresp
;
116 struct c4iw_mm_entry
*mm
= NULL
;
118 PDBG("%s ibdev %p\n", __func__
, ibdev
);
119 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
125 c4iw_init_dev_ucontext(&rhp
->rdev
, &context
->uctx
);
126 INIT_LIST_HEAD(&context
->mmaps
);
127 spin_lock_init(&context
->mmap_lock
);
129 if (udata
->outlen
< sizeof(uresp
) - sizeof(uresp
.reserved
)) {
131 pr_err(MOD
"Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
132 rhp
->rdev
.flags
|= T4_STATUS_PAGE_DISABLED
;
134 mm
= kmalloc(sizeof(*mm
), GFP_KERNEL
);
140 uresp
.status_page_size
= PAGE_SIZE
;
142 spin_lock(&context
->mmap_lock
);
143 uresp
.status_page_key
= context
->key
;
144 context
->key
+= PAGE_SIZE
;
145 spin_unlock(&context
->mmap_lock
);
147 ret
= ib_copy_to_udata(udata
, &uresp
,
148 sizeof(uresp
) - sizeof(uresp
.reserved
));
152 mm
->key
= uresp
.status_page_key
;
153 mm
->addr
= virt_to_phys(rhp
->rdev
.status_page
);
155 insert_mmap(context
, mm
);
157 return &context
->ibucontext
;
166 static int c4iw_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
168 int len
= vma
->vm_end
- vma
->vm_start
;
169 u32 key
= vma
->vm_pgoff
<< PAGE_SHIFT
;
170 struct c4iw_rdev
*rdev
;
172 struct c4iw_mm_entry
*mm
;
173 struct c4iw_ucontext
*ucontext
;
176 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__
, vma
->vm_pgoff
,
179 if (vma
->vm_start
& (PAGE_SIZE
-1))
182 rdev
= &(to_c4iw_dev(context
->device
)->rdev
);
183 ucontext
= to_c4iw_ucontext(context
);
185 mm
= remove_mmap(ucontext
, key
, len
);
191 if ((addr
>= pci_resource_start(rdev
->lldi
.pdev
, 0)) &&
192 (addr
< (pci_resource_start(rdev
->lldi
.pdev
, 0) +
193 pci_resource_len(rdev
->lldi
.pdev
, 0)))) {
196 * MA_SYNC register...
198 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
199 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
201 len
, vma
->vm_page_prot
);
202 } else if ((addr
>= pci_resource_start(rdev
->lldi
.pdev
, 2)) &&
203 (addr
< (pci_resource_start(rdev
->lldi
.pdev
, 2) +
204 pci_resource_len(rdev
->lldi
.pdev
, 2)))) {
207 * Map user DB or OCQP memory...
209 if (addr
>= rdev
->oc_mw_pa
)
210 vma
->vm_page_prot
= t4_pgprot_wc(vma
->vm_page_prot
);
212 if (!is_t4(rdev
->lldi
.adapter_type
))
214 t4_pgprot_wc(vma
->vm_page_prot
);
217 pgprot_noncached(vma
->vm_page_prot
);
219 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
221 len
, vma
->vm_page_prot
);
225 * Map WQ or CQ contig dma memory...
227 ret
= remap_pfn_range(vma
, vma
->vm_start
,
229 len
, vma
->vm_page_prot
);
235 static int c4iw_deallocate_pd(struct ib_pd
*pd
)
237 struct c4iw_dev
*rhp
;
240 php
= to_c4iw_pd(pd
);
242 PDBG("%s ibpd %p pdid 0x%x\n", __func__
, pd
, php
->pdid
);
243 c4iw_put_resource(&rhp
->rdev
.resource
.pdid_table
, php
->pdid
);
244 mutex_lock(&rhp
->rdev
.stats
.lock
);
245 rhp
->rdev
.stats
.pd
.cur
--;
246 mutex_unlock(&rhp
->rdev
.stats
.lock
);
251 static struct ib_pd
*c4iw_allocate_pd(struct ib_device
*ibdev
,
252 struct ib_ucontext
*context
,
253 struct ib_udata
*udata
)
257 struct c4iw_dev
*rhp
;
259 PDBG("%s ibdev %p\n", __func__
, ibdev
);
260 rhp
= (struct c4iw_dev
*) ibdev
;
261 pdid
= c4iw_get_resource(&rhp
->rdev
.resource
.pdid_table
);
263 return ERR_PTR(-EINVAL
);
264 php
= kzalloc(sizeof(*php
), GFP_KERNEL
);
266 c4iw_put_resource(&rhp
->rdev
.resource
.pdid_table
, pdid
);
267 return ERR_PTR(-ENOMEM
);
272 if (ib_copy_to_udata(udata
, &php
->pdid
, sizeof(u32
))) {
273 c4iw_deallocate_pd(&php
->ibpd
);
274 return ERR_PTR(-EFAULT
);
277 mutex_lock(&rhp
->rdev
.stats
.lock
);
278 rhp
->rdev
.stats
.pd
.cur
++;
279 if (rhp
->rdev
.stats
.pd
.cur
> rhp
->rdev
.stats
.pd
.max
)
280 rhp
->rdev
.stats
.pd
.max
= rhp
->rdev
.stats
.pd
.cur
;
281 mutex_unlock(&rhp
->rdev
.stats
.lock
);
282 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__
, pdid
, php
);
286 static int c4iw_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
289 PDBG("%s ibdev %p\n", __func__
, ibdev
);
294 static int c4iw_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
297 struct c4iw_dev
*dev
;
299 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
300 __func__
, ibdev
, port
, index
, gid
);
301 dev
= to_c4iw_dev(ibdev
);
303 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
304 memcpy(&(gid
->raw
[0]), dev
->rdev
.lldi
.ports
[port
-1]->dev_addr
, 6);
308 static int c4iw_query_device(struct ib_device
*ibdev
, struct ib_device_attr
*props
,
309 struct ib_udata
*uhw
)
312 struct c4iw_dev
*dev
;
314 PDBG("%s ibdev %p\n", __func__
, ibdev
);
316 if (uhw
->inlen
|| uhw
->outlen
)
319 dev
= to_c4iw_dev(ibdev
);
320 memset(props
, 0, sizeof *props
);
321 memcpy(&props
->sys_image_guid
, dev
->rdev
.lldi
.ports
[0]->dev_addr
, 6);
322 props
->hw_ver
= CHELSIO_CHIP_RELEASE(dev
->rdev
.lldi
.adapter_type
);
323 props
->fw_ver
= dev
->rdev
.lldi
.fw_vers
;
324 props
->device_cap_flags
= dev
->device_cap_flags
;
325 props
->page_size_cap
= T4_PAGESIZE_MASK
;
326 props
->vendor_id
= (u32
)dev
->rdev
.lldi
.pdev
->vendor
;
327 props
->vendor_part_id
= (u32
)dev
->rdev
.lldi
.pdev
->device
;
328 props
->max_mr_size
= T4_MAX_MR_SIZE
;
329 props
->max_qp
= dev
->rdev
.lldi
.vr
->qp
.size
/ 2;
330 props
->max_qp_wr
= dev
->rdev
.hw_queue
.t4_max_qp_depth
;
331 props
->max_sge
= T4_MAX_RECV_SGE
;
332 props
->max_sge_rd
= 1;
333 props
->max_res_rd_atom
= dev
->rdev
.lldi
.max_ird_adapter
;
334 props
->max_qp_rd_atom
= min(dev
->rdev
.lldi
.max_ordird_qp
,
335 c4iw_max_read_depth
);
336 props
->max_qp_init_rd_atom
= props
->max_qp_rd_atom
;
337 props
->max_cq
= dev
->rdev
.lldi
.vr
->qp
.size
;
338 props
->max_cqe
= dev
->rdev
.hw_queue
.t4_max_cq_depth
;
339 props
->max_mr
= c4iw_num_stags(&dev
->rdev
);
340 props
->max_pd
= T4_MAX_NUM_PD
;
341 props
->local_ca_ack_delay
= 0;
342 props
->max_fast_reg_page_list_len
= t4_max_fr_depth(use_dsgl
);
347 static int c4iw_query_port(struct ib_device
*ibdev
, u8 port
,
348 struct ib_port_attr
*props
)
350 struct c4iw_dev
*dev
;
351 struct net_device
*netdev
;
352 struct in_device
*inetdev
;
354 PDBG("%s ibdev %p\n", __func__
, ibdev
);
356 dev
= to_c4iw_dev(ibdev
);
357 netdev
= dev
->rdev
.lldi
.ports
[port
-1];
359 memset(props
, 0, sizeof(struct ib_port_attr
));
360 props
->max_mtu
= IB_MTU_4096
;
361 if (netdev
->mtu
>= 4096)
362 props
->active_mtu
= IB_MTU_4096
;
363 else if (netdev
->mtu
>= 2048)
364 props
->active_mtu
= IB_MTU_2048
;
365 else if (netdev
->mtu
>= 1024)
366 props
->active_mtu
= IB_MTU_1024
;
367 else if (netdev
->mtu
>= 512)
368 props
->active_mtu
= IB_MTU_512
;
370 props
->active_mtu
= IB_MTU_256
;
372 if (!netif_carrier_ok(netdev
))
373 props
->state
= IB_PORT_DOWN
;
375 inetdev
= in_dev_get(netdev
);
377 if (inetdev
->ifa_list
)
378 props
->state
= IB_PORT_ACTIVE
;
380 props
->state
= IB_PORT_INIT
;
383 props
->state
= IB_PORT_INIT
;
386 props
->port_cap_flags
=
388 IB_PORT_SNMP_TUNNEL_SUP
|
390 IB_PORT_DEVICE_MGMT_SUP
|
391 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
392 props
->gid_tbl_len
= 1;
393 props
->pkey_tbl_len
= 1;
394 props
->active_width
= 2;
395 props
->active_speed
= IB_SPEED_DDR
;
396 props
->max_msg_sz
= -1;
401 static ssize_t
show_rev(struct device
*dev
, struct device_attribute
*attr
,
404 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
406 PDBG("%s dev 0x%p\n", __func__
, dev
);
407 return sprintf(buf
, "%d\n",
408 CHELSIO_CHIP_RELEASE(c4iw_dev
->rdev
.lldi
.adapter_type
));
411 static ssize_t
show_fw_ver(struct device
*dev
, struct device_attribute
*attr
,
414 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
416 PDBG("%s dev 0x%p\n", __func__
, dev
);
418 return sprintf(buf
, "%u.%u.%u.%u\n",
419 FW_HDR_FW_VER_MAJOR_G(c4iw_dev
->rdev
.lldi
.fw_vers
),
420 FW_HDR_FW_VER_MINOR_G(c4iw_dev
->rdev
.lldi
.fw_vers
),
421 FW_HDR_FW_VER_MICRO_G(c4iw_dev
->rdev
.lldi
.fw_vers
),
422 FW_HDR_FW_VER_BUILD_G(c4iw_dev
->rdev
.lldi
.fw_vers
));
425 static ssize_t
show_hca(struct device
*dev
, struct device_attribute
*attr
,
428 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
430 struct ethtool_drvinfo info
;
431 struct net_device
*lldev
= c4iw_dev
->rdev
.lldi
.ports
[0];
433 PDBG("%s dev 0x%p\n", __func__
, dev
);
434 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
435 return sprintf(buf
, "%s\n", info
.driver
);
438 static ssize_t
show_board(struct device
*dev
, struct device_attribute
*attr
,
441 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
443 PDBG("%s dev 0x%p\n", __func__
, dev
);
444 return sprintf(buf
, "%x.%x\n", c4iw_dev
->rdev
.lldi
.pdev
->vendor
,
445 c4iw_dev
->rdev
.lldi
.pdev
->device
);
448 static int c4iw_get_mib(struct ib_device
*ibdev
,
449 union rdma_protocol_stats
*stats
)
451 struct tp_tcp_stats v4
, v6
;
452 struct c4iw_dev
*c4iw_dev
= to_c4iw_dev(ibdev
);
454 cxgb4_get_tcp_stats(c4iw_dev
->rdev
.lldi
.pdev
, &v4
, &v6
);
455 memset(stats
, 0, sizeof *stats
);
456 stats
->iw
.tcpInSegs
= v4
.tcp_in_segs
+ v6
.tcp_in_segs
;
457 stats
->iw
.tcpOutSegs
= v4
.tcp_out_segs
+ v6
.tcp_out_segs
;
458 stats
->iw
.tcpRetransSegs
= v4
.tcp_retrans_segs
+ v6
.tcp_retrans_segs
;
459 stats
->iw
.tcpOutRsts
= v4
.tcp_out_rsts
+ v6
.tcp_out_rsts
;
464 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
465 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
466 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
467 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
469 static struct device_attribute
*c4iw_class_attributes
[] = {
476 static int c4iw_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
477 struct ib_port_immutable
*immutable
)
479 struct ib_port_attr attr
;
482 err
= c4iw_query_port(ibdev
, port_num
, &attr
);
486 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
487 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
488 immutable
->core_cap_flags
= RDMA_CORE_PORT_IWARP
;
493 int c4iw_register_device(struct c4iw_dev
*dev
)
498 PDBG("%s c4iw_dev %p\n", __func__
, dev
);
499 BUG_ON(!dev
->rdev
.lldi
.ports
[0]);
500 strlcpy(dev
->ibdev
.name
, "cxgb4_%d", IB_DEVICE_NAME_MAX
);
501 memset(&dev
->ibdev
.node_guid
, 0, sizeof(dev
->ibdev
.node_guid
));
502 memcpy(&dev
->ibdev
.node_guid
, dev
->rdev
.lldi
.ports
[0]->dev_addr
, 6);
503 dev
->ibdev
.owner
= THIS_MODULE
;
504 dev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
| IB_DEVICE_MEM_WINDOW
;
506 dev
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
507 dev
->ibdev
.local_dma_lkey
= 0;
508 dev
->ibdev
.uverbs_cmd_mask
=
509 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
510 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
511 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
512 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
513 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
514 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
515 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
516 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
517 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
518 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
519 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
520 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
521 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
522 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
523 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
524 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
525 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
526 (1ull << IB_USER_VERBS_CMD_POST_RECV
);
527 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
528 memcpy(dev
->ibdev
.node_desc
, C4IW_NODE_DESC
, sizeof(C4IW_NODE_DESC
));
529 dev
->ibdev
.phys_port_cnt
= dev
->rdev
.lldi
.nports
;
530 dev
->ibdev
.num_comp_vectors
= dev
->rdev
.lldi
.nciq
;
531 dev
->ibdev
.dma_device
= &(dev
->rdev
.lldi
.pdev
->dev
);
532 dev
->ibdev
.query_device
= c4iw_query_device
;
533 dev
->ibdev
.query_port
= c4iw_query_port
;
534 dev
->ibdev
.query_pkey
= c4iw_query_pkey
;
535 dev
->ibdev
.query_gid
= c4iw_query_gid
;
536 dev
->ibdev
.alloc_ucontext
= c4iw_alloc_ucontext
;
537 dev
->ibdev
.dealloc_ucontext
= c4iw_dealloc_ucontext
;
538 dev
->ibdev
.mmap
= c4iw_mmap
;
539 dev
->ibdev
.alloc_pd
= c4iw_allocate_pd
;
540 dev
->ibdev
.dealloc_pd
= c4iw_deallocate_pd
;
541 dev
->ibdev
.create_ah
= c4iw_ah_create
;
542 dev
->ibdev
.destroy_ah
= c4iw_ah_destroy
;
543 dev
->ibdev
.create_qp
= c4iw_create_qp
;
544 dev
->ibdev
.modify_qp
= c4iw_ib_modify_qp
;
545 dev
->ibdev
.query_qp
= c4iw_ib_query_qp
;
546 dev
->ibdev
.destroy_qp
= c4iw_destroy_qp
;
547 dev
->ibdev
.create_cq
= c4iw_create_cq
;
548 dev
->ibdev
.destroy_cq
= c4iw_destroy_cq
;
549 dev
->ibdev
.resize_cq
= c4iw_resize_cq
;
550 dev
->ibdev
.poll_cq
= c4iw_poll_cq
;
551 dev
->ibdev
.get_dma_mr
= c4iw_get_dma_mr
;
552 dev
->ibdev
.reg_phys_mr
= c4iw_register_phys_mem
;
553 dev
->ibdev
.rereg_phys_mr
= c4iw_reregister_phys_mem
;
554 dev
->ibdev
.reg_user_mr
= c4iw_reg_user_mr
;
555 dev
->ibdev
.dereg_mr
= c4iw_dereg_mr
;
556 dev
->ibdev
.alloc_mw
= c4iw_alloc_mw
;
557 dev
->ibdev
.bind_mw
= c4iw_bind_mw
;
558 dev
->ibdev
.dealloc_mw
= c4iw_dealloc_mw
;
559 dev
->ibdev
.alloc_mr
= c4iw_alloc_mr
;
560 dev
->ibdev
.map_mr_sg
= c4iw_map_mr_sg
;
561 dev
->ibdev
.attach_mcast
= c4iw_multicast_attach
;
562 dev
->ibdev
.detach_mcast
= c4iw_multicast_detach
;
563 dev
->ibdev
.process_mad
= c4iw_process_mad
;
564 dev
->ibdev
.req_notify_cq
= c4iw_arm_cq
;
565 dev
->ibdev
.post_send
= c4iw_post_send
;
566 dev
->ibdev
.post_recv
= c4iw_post_receive
;
567 dev
->ibdev
.get_protocol_stats
= c4iw_get_mib
;
568 dev
->ibdev
.uverbs_abi_ver
= C4IW_UVERBS_ABI_VERSION
;
569 dev
->ibdev
.get_port_immutable
= c4iw_port_immutable
;
571 dev
->ibdev
.iwcm
= kmalloc(sizeof(struct iw_cm_verbs
), GFP_KERNEL
);
572 if (!dev
->ibdev
.iwcm
)
575 dev
->ibdev
.iwcm
->connect
= c4iw_connect
;
576 dev
->ibdev
.iwcm
->accept
= c4iw_accept_cr
;
577 dev
->ibdev
.iwcm
->reject
= c4iw_reject_cr
;
578 dev
->ibdev
.iwcm
->create_listen
= c4iw_create_listen
;
579 dev
->ibdev
.iwcm
->destroy_listen
= c4iw_destroy_listen
;
580 dev
->ibdev
.iwcm
->add_ref
= c4iw_qp_add_ref
;
581 dev
->ibdev
.iwcm
->rem_ref
= c4iw_qp_rem_ref
;
582 dev
->ibdev
.iwcm
->get_qp
= c4iw_get_qp
;
584 ret
= ib_register_device(&dev
->ibdev
, NULL
);
588 for (i
= 0; i
< ARRAY_SIZE(c4iw_class_attributes
); ++i
) {
589 ret
= device_create_file(&dev
->ibdev
.dev
,
590 c4iw_class_attributes
[i
]);
596 ib_unregister_device(&dev
->ibdev
);
598 kfree(dev
->ibdev
.iwcm
);
602 void c4iw_unregister_device(struct c4iw_dev
*dev
)
606 PDBG("%s c4iw_dev %p\n", __func__
, dev
);
607 for (i
= 0; i
< ARRAY_SIZE(c4iw_class_attributes
); ++i
)
608 device_remove_file(&dev
->ibdev
.dev
,
609 c4iw_class_attributes
[i
]);
610 ib_unregister_device(&dev
->ibdev
);
611 kfree(dev
->ibdev
.iwcm
);