2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/spinlock.h>
41 #include <linux/ethtool.h>
42 #include <linux/rtnetlink.h>
43 #include <linux/inetdevice.h>
47 #include <asm/byteorder.h>
49 #include <rdma/iw_cm.h>
50 #include <rdma/ib_verbs.h>
51 #include <rdma/ib_smi.h>
52 #include <rdma/ib_umem.h>
53 #include <rdma/ib_user_verbs.h>
57 static int fastreg_support
= 1;
58 module_param(fastreg_support
, int, 0644);
59 MODULE_PARM_DESC(fastreg_support
, "Advertise fastreg support (default=1)");
61 static struct ib_ah
*c4iw_ah_create(struct ib_pd
*pd
,
62 struct ib_ah_attr
*ah_attr
)
64 return ERR_PTR(-ENOSYS
);
67 static int c4iw_ah_destroy(struct ib_ah
*ah
)
72 static int c4iw_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
77 static int c4iw_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
82 static int c4iw_process_mad(struct ib_device
*ibdev
, int mad_flags
,
83 u8 port_num
, struct ib_wc
*in_wc
,
84 struct ib_grh
*in_grh
, struct ib_mad
*in_mad
,
85 struct ib_mad
*out_mad
)
90 static int c4iw_dealloc_ucontext(struct ib_ucontext
*context
)
92 struct c4iw_dev
*rhp
= to_c4iw_dev(context
->device
);
93 struct c4iw_ucontext
*ucontext
= to_c4iw_ucontext(context
);
94 struct c4iw_mm_entry
*mm
, *tmp
;
96 PDBG("%s context %p\n", __func__
, context
);
97 list_for_each_entry_safe(mm
, tmp
, &ucontext
->mmaps
, entry
)
99 c4iw_release_dev_ucontext(&rhp
->rdev
, &ucontext
->uctx
);
104 static struct ib_ucontext
*c4iw_alloc_ucontext(struct ib_device
*ibdev
,
105 struct ib_udata
*udata
)
107 struct c4iw_ucontext
*context
;
108 struct c4iw_dev
*rhp
= to_c4iw_dev(ibdev
);
110 PDBG("%s ibdev %p\n", __func__
, ibdev
);
111 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
113 return ERR_PTR(-ENOMEM
);
114 c4iw_init_dev_ucontext(&rhp
->rdev
, &context
->uctx
);
115 INIT_LIST_HEAD(&context
->mmaps
);
116 spin_lock_init(&context
->mmap_lock
);
117 return &context
->ibucontext
;
120 static int c4iw_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
122 int len
= vma
->vm_end
- vma
->vm_start
;
123 u32 key
= vma
->vm_pgoff
<< PAGE_SHIFT
;
124 struct c4iw_rdev
*rdev
;
126 struct c4iw_mm_entry
*mm
;
127 struct c4iw_ucontext
*ucontext
;
130 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__
, vma
->vm_pgoff
,
133 if (vma
->vm_start
& (PAGE_SIZE
-1))
136 rdev
= &(to_c4iw_dev(context
->device
)->rdev
);
137 ucontext
= to_c4iw_ucontext(context
);
139 mm
= remove_mmap(ucontext
, key
, len
);
145 if ((addr
>= pci_resource_start(rdev
->lldi
.pdev
, 0)) &&
146 (addr
< (pci_resource_start(rdev
->lldi
.pdev
, 0) +
147 pci_resource_len(rdev
->lldi
.pdev
, 0)))) {
150 * MA_SYNC register...
152 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
153 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
155 len
, vma
->vm_page_prot
);
156 } else if ((addr
>= pci_resource_start(rdev
->lldi
.pdev
, 2)) &&
157 (addr
< (pci_resource_start(rdev
->lldi
.pdev
, 2) +
158 pci_resource_len(rdev
->lldi
.pdev
, 2)))) {
161 * Map user DB or OCQP memory...
163 if (addr
>= rdev
->oc_mw_pa
)
164 vma
->vm_page_prot
= t4_pgprot_wc(vma
->vm_page_prot
);
166 if (is_t5(rdev
->lldi
.adapter_type
))
168 t4_pgprot_wc(vma
->vm_page_prot
);
171 pgprot_noncached(vma
->vm_page_prot
);
173 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
175 len
, vma
->vm_page_prot
);
179 * Map WQ or CQ contig dma memory...
181 ret
= remap_pfn_range(vma
, vma
->vm_start
,
183 len
, vma
->vm_page_prot
);
189 static int c4iw_deallocate_pd(struct ib_pd
*pd
)
191 struct c4iw_dev
*rhp
;
194 php
= to_c4iw_pd(pd
);
196 PDBG("%s ibpd %p pdid 0x%x\n", __func__
, pd
, php
->pdid
);
197 c4iw_put_resource(&rhp
->rdev
.resource
.pdid_table
, php
->pdid
);
198 mutex_lock(&rhp
->rdev
.stats
.lock
);
199 rhp
->rdev
.stats
.pd
.cur
--;
200 mutex_unlock(&rhp
->rdev
.stats
.lock
);
205 static struct ib_pd
*c4iw_allocate_pd(struct ib_device
*ibdev
,
206 struct ib_ucontext
*context
,
207 struct ib_udata
*udata
)
211 struct c4iw_dev
*rhp
;
213 PDBG("%s ibdev %p\n", __func__
, ibdev
);
214 rhp
= (struct c4iw_dev
*) ibdev
;
215 pdid
= c4iw_get_resource(&rhp
->rdev
.resource
.pdid_table
);
217 return ERR_PTR(-EINVAL
);
218 php
= kzalloc(sizeof(*php
), GFP_KERNEL
);
220 c4iw_put_resource(&rhp
->rdev
.resource
.pdid_table
, pdid
);
221 return ERR_PTR(-ENOMEM
);
226 if (ib_copy_to_udata(udata
, &php
->pdid
, sizeof(u32
))) {
227 c4iw_deallocate_pd(&php
->ibpd
);
228 return ERR_PTR(-EFAULT
);
231 mutex_lock(&rhp
->rdev
.stats
.lock
);
232 rhp
->rdev
.stats
.pd
.cur
++;
233 if (rhp
->rdev
.stats
.pd
.cur
> rhp
->rdev
.stats
.pd
.max
)
234 rhp
->rdev
.stats
.pd
.max
= rhp
->rdev
.stats
.pd
.cur
;
235 mutex_unlock(&rhp
->rdev
.stats
.lock
);
236 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__
, pdid
, php
);
240 static int c4iw_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
243 PDBG("%s ibdev %p\n", __func__
, ibdev
);
248 static int c4iw_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
251 struct c4iw_dev
*dev
;
253 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
254 __func__
, ibdev
, port
, index
, gid
);
255 dev
= to_c4iw_dev(ibdev
);
257 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
258 memcpy(&(gid
->raw
[0]), dev
->rdev
.lldi
.ports
[port
-1]->dev_addr
, 6);
262 static int c4iw_query_device(struct ib_device
*ibdev
,
263 struct ib_device_attr
*props
)
266 struct c4iw_dev
*dev
;
267 PDBG("%s ibdev %p\n", __func__
, ibdev
);
269 dev
= to_c4iw_dev(ibdev
);
270 memset(props
, 0, sizeof *props
);
271 memcpy(&props
->sys_image_guid
, dev
->rdev
.lldi
.ports
[0]->dev_addr
, 6);
272 props
->hw_ver
= CHELSIO_CHIP_RELEASE(dev
->rdev
.lldi
.adapter_type
);
273 props
->fw_ver
= dev
->rdev
.lldi
.fw_vers
;
274 props
->device_cap_flags
= dev
->device_cap_flags
;
275 props
->page_size_cap
= T4_PAGESIZE_MASK
;
276 props
->vendor_id
= (u32
)dev
->rdev
.lldi
.pdev
->vendor
;
277 props
->vendor_part_id
= (u32
)dev
->rdev
.lldi
.pdev
->device
;
278 props
->max_mr_size
= T4_MAX_MR_SIZE
;
279 props
->max_qp
= T4_MAX_NUM_QP
;
280 props
->max_qp_wr
= T4_MAX_QP_DEPTH
;
281 props
->max_sge
= T4_MAX_RECV_SGE
;
282 props
->max_sge_rd
= 1;
283 props
->max_qp_rd_atom
= c4iw_max_read_depth
;
284 props
->max_qp_init_rd_atom
= c4iw_max_read_depth
;
285 props
->max_cq
= T4_MAX_NUM_CQ
;
286 props
->max_cqe
= T4_MAX_CQ_DEPTH
;
287 props
->max_mr
= c4iw_num_stags(&dev
->rdev
);
288 props
->max_pd
= T4_MAX_NUM_PD
;
289 props
->local_ca_ack_delay
= 0;
290 props
->max_fast_reg_page_list_len
= T4_MAX_FR_DEPTH
;
295 static int c4iw_query_port(struct ib_device
*ibdev
, u8 port
,
296 struct ib_port_attr
*props
)
298 struct c4iw_dev
*dev
;
299 struct net_device
*netdev
;
300 struct in_device
*inetdev
;
302 PDBG("%s ibdev %p\n", __func__
, ibdev
);
304 dev
= to_c4iw_dev(ibdev
);
305 netdev
= dev
->rdev
.lldi
.ports
[port
-1];
307 memset(props
, 0, sizeof(struct ib_port_attr
));
308 props
->max_mtu
= IB_MTU_4096
;
309 if (netdev
->mtu
>= 4096)
310 props
->active_mtu
= IB_MTU_4096
;
311 else if (netdev
->mtu
>= 2048)
312 props
->active_mtu
= IB_MTU_2048
;
313 else if (netdev
->mtu
>= 1024)
314 props
->active_mtu
= IB_MTU_1024
;
315 else if (netdev
->mtu
>= 512)
316 props
->active_mtu
= IB_MTU_512
;
318 props
->active_mtu
= IB_MTU_256
;
320 if (!netif_carrier_ok(netdev
))
321 props
->state
= IB_PORT_DOWN
;
323 inetdev
= in_dev_get(netdev
);
325 if (inetdev
->ifa_list
)
326 props
->state
= IB_PORT_ACTIVE
;
328 props
->state
= IB_PORT_INIT
;
331 props
->state
= IB_PORT_INIT
;
334 props
->port_cap_flags
=
336 IB_PORT_SNMP_TUNNEL_SUP
|
338 IB_PORT_DEVICE_MGMT_SUP
|
339 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
340 props
->gid_tbl_len
= 1;
341 props
->pkey_tbl_len
= 1;
342 props
->active_width
= 2;
343 props
->active_speed
= IB_SPEED_DDR
;
344 props
->max_msg_sz
= -1;
349 static ssize_t
show_rev(struct device
*dev
, struct device_attribute
*attr
,
352 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
354 PDBG("%s dev 0x%p\n", __func__
, dev
);
355 return sprintf(buf
, "%d\n",
356 CHELSIO_CHIP_RELEASE(c4iw_dev
->rdev
.lldi
.adapter_type
));
359 static ssize_t
show_fw_ver(struct device
*dev
, struct device_attribute
*attr
,
362 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
364 PDBG("%s dev 0x%p\n", __func__
, dev
);
366 return sprintf(buf
, "%u.%u.%u.%u\n",
367 FW_HDR_FW_VER_MAJOR_GET(c4iw_dev
->rdev
.lldi
.fw_vers
),
368 FW_HDR_FW_VER_MINOR_GET(c4iw_dev
->rdev
.lldi
.fw_vers
),
369 FW_HDR_FW_VER_MICRO_GET(c4iw_dev
->rdev
.lldi
.fw_vers
),
370 FW_HDR_FW_VER_BUILD_GET(c4iw_dev
->rdev
.lldi
.fw_vers
));
373 static ssize_t
show_hca(struct device
*dev
, struct device_attribute
*attr
,
376 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
378 struct ethtool_drvinfo info
;
379 struct net_device
*lldev
= c4iw_dev
->rdev
.lldi
.ports
[0];
381 PDBG("%s dev 0x%p\n", __func__
, dev
);
382 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
383 return sprintf(buf
, "%s\n", info
.driver
);
386 static ssize_t
show_board(struct device
*dev
, struct device_attribute
*attr
,
389 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
391 PDBG("%s dev 0x%p\n", __func__
, dev
);
392 return sprintf(buf
, "%x.%x\n", c4iw_dev
->rdev
.lldi
.pdev
->vendor
,
393 c4iw_dev
->rdev
.lldi
.pdev
->device
);
396 static int c4iw_get_mib(struct ib_device
*ibdev
,
397 union rdma_protocol_stats
*stats
)
399 struct tp_tcp_stats v4
, v6
;
400 struct c4iw_dev
*c4iw_dev
= to_c4iw_dev(ibdev
);
402 cxgb4_get_tcp_stats(c4iw_dev
->rdev
.lldi
.pdev
, &v4
, &v6
);
403 memset(stats
, 0, sizeof *stats
);
404 stats
->iw
.tcpInSegs
= v4
.tcpInSegs
+ v6
.tcpInSegs
;
405 stats
->iw
.tcpOutSegs
= v4
.tcpOutSegs
+ v6
.tcpOutSegs
;
406 stats
->iw
.tcpRetransSegs
= v4
.tcpRetransSegs
+ v6
.tcpRetransSegs
;
407 stats
->iw
.tcpOutRsts
= v4
.tcpOutRsts
+ v6
.tcpOutSegs
;
412 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
413 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
414 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
415 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
417 static struct device_attribute
*c4iw_class_attributes
[] = {
424 int c4iw_register_device(struct c4iw_dev
*dev
)
429 PDBG("%s c4iw_dev %p\n", __func__
, dev
);
430 BUG_ON(!dev
->rdev
.lldi
.ports
[0]);
431 strlcpy(dev
->ibdev
.name
, "cxgb4_%d", IB_DEVICE_NAME_MAX
);
432 memset(&dev
->ibdev
.node_guid
, 0, sizeof(dev
->ibdev
.node_guid
));
433 memcpy(&dev
->ibdev
.node_guid
, dev
->rdev
.lldi
.ports
[0]->dev_addr
, 6);
434 dev
->ibdev
.owner
= THIS_MODULE
;
435 dev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
| IB_DEVICE_MEM_WINDOW
;
437 dev
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
438 dev
->ibdev
.local_dma_lkey
= 0;
439 dev
->ibdev
.uverbs_cmd_mask
=
440 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
441 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
442 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
443 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
444 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
445 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
446 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
447 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
448 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
449 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
450 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
451 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
452 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
453 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
454 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
455 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
456 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
457 (1ull << IB_USER_VERBS_CMD_POST_RECV
);
458 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
459 memcpy(dev
->ibdev
.node_desc
, C4IW_NODE_DESC
, sizeof(C4IW_NODE_DESC
));
460 dev
->ibdev
.phys_port_cnt
= dev
->rdev
.lldi
.nports
;
461 dev
->ibdev
.num_comp_vectors
= 1;
462 dev
->ibdev
.dma_device
= &(dev
->rdev
.lldi
.pdev
->dev
);
463 dev
->ibdev
.query_device
= c4iw_query_device
;
464 dev
->ibdev
.query_port
= c4iw_query_port
;
465 dev
->ibdev
.query_pkey
= c4iw_query_pkey
;
466 dev
->ibdev
.query_gid
= c4iw_query_gid
;
467 dev
->ibdev
.alloc_ucontext
= c4iw_alloc_ucontext
;
468 dev
->ibdev
.dealloc_ucontext
= c4iw_dealloc_ucontext
;
469 dev
->ibdev
.mmap
= c4iw_mmap
;
470 dev
->ibdev
.alloc_pd
= c4iw_allocate_pd
;
471 dev
->ibdev
.dealloc_pd
= c4iw_deallocate_pd
;
472 dev
->ibdev
.create_ah
= c4iw_ah_create
;
473 dev
->ibdev
.destroy_ah
= c4iw_ah_destroy
;
474 dev
->ibdev
.create_qp
= c4iw_create_qp
;
475 dev
->ibdev
.modify_qp
= c4iw_ib_modify_qp
;
476 dev
->ibdev
.query_qp
= c4iw_ib_query_qp
;
477 dev
->ibdev
.destroy_qp
= c4iw_destroy_qp
;
478 dev
->ibdev
.create_cq
= c4iw_create_cq
;
479 dev
->ibdev
.destroy_cq
= c4iw_destroy_cq
;
480 dev
->ibdev
.resize_cq
= c4iw_resize_cq
;
481 dev
->ibdev
.poll_cq
= c4iw_poll_cq
;
482 dev
->ibdev
.get_dma_mr
= c4iw_get_dma_mr
;
483 dev
->ibdev
.reg_phys_mr
= c4iw_register_phys_mem
;
484 dev
->ibdev
.rereg_phys_mr
= c4iw_reregister_phys_mem
;
485 dev
->ibdev
.reg_user_mr
= c4iw_reg_user_mr
;
486 dev
->ibdev
.dereg_mr
= c4iw_dereg_mr
;
487 dev
->ibdev
.alloc_mw
= c4iw_alloc_mw
;
488 dev
->ibdev
.bind_mw
= c4iw_bind_mw
;
489 dev
->ibdev
.dealloc_mw
= c4iw_dealloc_mw
;
490 dev
->ibdev
.alloc_fast_reg_mr
= c4iw_alloc_fast_reg_mr
;
491 dev
->ibdev
.alloc_fast_reg_page_list
= c4iw_alloc_fastreg_pbl
;
492 dev
->ibdev
.free_fast_reg_page_list
= c4iw_free_fastreg_pbl
;
493 dev
->ibdev
.attach_mcast
= c4iw_multicast_attach
;
494 dev
->ibdev
.detach_mcast
= c4iw_multicast_detach
;
495 dev
->ibdev
.process_mad
= c4iw_process_mad
;
496 dev
->ibdev
.req_notify_cq
= c4iw_arm_cq
;
497 dev
->ibdev
.post_send
= c4iw_post_send
;
498 dev
->ibdev
.post_recv
= c4iw_post_receive
;
499 dev
->ibdev
.get_protocol_stats
= c4iw_get_mib
;
500 dev
->ibdev
.uverbs_abi_ver
= C4IW_UVERBS_ABI_VERSION
;
502 dev
->ibdev
.iwcm
= kmalloc(sizeof(struct iw_cm_verbs
), GFP_KERNEL
);
503 if (!dev
->ibdev
.iwcm
)
506 dev
->ibdev
.iwcm
->connect
= c4iw_connect
;
507 dev
->ibdev
.iwcm
->accept
= c4iw_accept_cr
;
508 dev
->ibdev
.iwcm
->reject
= c4iw_reject_cr
;
509 dev
->ibdev
.iwcm
->create_listen
= c4iw_create_listen
;
510 dev
->ibdev
.iwcm
->destroy_listen
= c4iw_destroy_listen
;
511 dev
->ibdev
.iwcm
->add_ref
= c4iw_qp_add_ref
;
512 dev
->ibdev
.iwcm
->rem_ref
= c4iw_qp_rem_ref
;
513 dev
->ibdev
.iwcm
->get_qp
= c4iw_get_qp
;
515 ret
= ib_register_device(&dev
->ibdev
, NULL
);
519 for (i
= 0; i
< ARRAY_SIZE(c4iw_class_attributes
); ++i
) {
520 ret
= device_create_file(&dev
->ibdev
.dev
,
521 c4iw_class_attributes
[i
]);
527 ib_unregister_device(&dev
->ibdev
);
529 kfree(dev
->ibdev
.iwcm
);
533 void c4iw_unregister_device(struct c4iw_dev
*dev
)
537 PDBG("%s c4iw_dev %p\n", __func__
, dev
);
538 for (i
= 0; i
< ARRAY_SIZE(c4iw_class_attributes
); ++i
)
539 device_remove_file(&dev
->ibdev
.dev
,
540 c4iw_class_attributes
[i
]);
541 ib_unregister_device(&dev
->ibdev
);
542 kfree(dev
->ibdev
.iwcm
);