2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/spinlock.h>
41 #include <linux/ethtool.h>
42 #include <linux/rtnetlink.h>
43 #include <linux/inetdevice.h>
47 #include <asm/byteorder.h>
49 #include <rdma/iw_cm.h>
50 #include <rdma/ib_verbs.h>
51 #include <rdma/ib_smi.h>
52 #include <rdma/ib_umem.h>
53 #include <rdma/ib_user_verbs.h>
57 static int fastreg_support
= 1;
58 module_param(fastreg_support
, int, 0644);
59 MODULE_PARM_DESC(fastreg_support
, "Advertise fastreg support (default=1)");
61 static void c4iw_dealloc_ucontext(struct ib_ucontext
*context
)
63 struct c4iw_ucontext
*ucontext
= to_c4iw_ucontext(context
);
65 struct c4iw_mm_entry
*mm
, *tmp
;
67 pr_debug("context %p\n", context
);
68 rhp
= to_c4iw_dev(ucontext
->ibucontext
.device
);
70 list_for_each_entry_safe(mm
, tmp
, &ucontext
->mmaps
, entry
)
72 c4iw_release_dev_ucontext(&rhp
->rdev
, &ucontext
->uctx
);
75 static int c4iw_alloc_ucontext(struct ib_ucontext
*ucontext
,
76 struct ib_udata
*udata
)
78 struct ib_device
*ibdev
= ucontext
->device
;
79 struct c4iw_ucontext
*context
= to_c4iw_ucontext(ucontext
);
80 struct c4iw_dev
*rhp
= to_c4iw_dev(ibdev
);
81 struct c4iw_alloc_ucontext_resp uresp
;
83 struct c4iw_mm_entry
*mm
= NULL
;
85 pr_debug("ibdev %p\n", ibdev
);
86 c4iw_init_dev_ucontext(&rhp
->rdev
, &context
->uctx
);
87 INIT_LIST_HEAD(&context
->mmaps
);
88 spin_lock_init(&context
->mmap_lock
);
90 if (udata
->outlen
< sizeof(uresp
) - sizeof(uresp
.reserved
)) {
91 pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
92 rhp
->rdev
.flags
|= T4_STATUS_PAGE_DISABLED
;
94 mm
= kmalloc(sizeof(*mm
), GFP_KERNEL
);
100 uresp
.status_page_size
= PAGE_SIZE
;
102 spin_lock(&context
->mmap_lock
);
103 uresp
.status_page_key
= context
->key
;
104 context
->key
+= PAGE_SIZE
;
105 spin_unlock(&context
->mmap_lock
);
107 ret
= ib_copy_to_udata(udata
, &uresp
,
108 sizeof(uresp
) - sizeof(uresp
.reserved
));
112 mm
->key
= uresp
.status_page_key
;
113 mm
->addr
= virt_to_phys(rhp
->rdev
.status_page
);
115 insert_mmap(context
, mm
);
124 static int c4iw_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
126 int len
= vma
->vm_end
- vma
->vm_start
;
127 u32 key
= vma
->vm_pgoff
<< PAGE_SHIFT
;
128 struct c4iw_rdev
*rdev
;
130 struct c4iw_mm_entry
*mm
;
131 struct c4iw_ucontext
*ucontext
;
134 pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma
->vm_pgoff
,
137 if (vma
->vm_start
& (PAGE_SIZE
-1))
140 rdev
= &(to_c4iw_dev(context
->device
)->rdev
);
141 ucontext
= to_c4iw_ucontext(context
);
143 mm
= remove_mmap(ucontext
, key
, len
);
149 if ((addr
>= pci_resource_start(rdev
->lldi
.pdev
, 0)) &&
150 (addr
< (pci_resource_start(rdev
->lldi
.pdev
, 0) +
151 pci_resource_len(rdev
->lldi
.pdev
, 0)))) {
154 * MA_SYNC register...
156 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
157 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
159 len
, vma
->vm_page_prot
);
160 } else if ((addr
>= pci_resource_start(rdev
->lldi
.pdev
, 2)) &&
161 (addr
< (pci_resource_start(rdev
->lldi
.pdev
, 2) +
162 pci_resource_len(rdev
->lldi
.pdev
, 2)))) {
165 * Map user DB or OCQP memory...
167 if (addr
>= rdev
->oc_mw_pa
)
168 vma
->vm_page_prot
= t4_pgprot_wc(vma
->vm_page_prot
);
170 if (!is_t4(rdev
->lldi
.adapter_type
))
172 t4_pgprot_wc(vma
->vm_page_prot
);
175 pgprot_noncached(vma
->vm_page_prot
);
177 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
179 len
, vma
->vm_page_prot
);
183 * Map WQ or CQ contig dma memory...
185 ret
= remap_pfn_range(vma
, vma
->vm_start
,
187 len
, vma
->vm_page_prot
);
193 static void c4iw_deallocate_pd(struct ib_pd
*pd
, struct ib_udata
*udata
)
195 struct c4iw_dev
*rhp
;
198 php
= to_c4iw_pd(pd
);
200 pr_debug("ibpd %p pdid 0x%x\n", pd
, php
->pdid
);
201 c4iw_put_resource(&rhp
->rdev
.resource
.pdid_table
, php
->pdid
);
202 mutex_lock(&rhp
->rdev
.stats
.lock
);
203 rhp
->rdev
.stats
.pd
.cur
--;
204 mutex_unlock(&rhp
->rdev
.stats
.lock
);
207 static int c4iw_allocate_pd(struct ib_pd
*pd
, struct ib_udata
*udata
)
209 struct c4iw_pd
*php
= to_c4iw_pd(pd
);
210 struct ib_device
*ibdev
= pd
->device
;
212 struct c4iw_dev
*rhp
;
214 pr_debug("ibdev %p\n", ibdev
);
215 rhp
= (struct c4iw_dev
*) ibdev
;
216 pdid
= c4iw_get_resource(&rhp
->rdev
.resource
.pdid_table
);
223 struct c4iw_alloc_pd_resp uresp
= {.pdid
= php
->pdid
};
225 if (ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
))) {
226 c4iw_deallocate_pd(&php
->ibpd
, udata
);
230 mutex_lock(&rhp
->rdev
.stats
.lock
);
231 rhp
->rdev
.stats
.pd
.cur
++;
232 if (rhp
->rdev
.stats
.pd
.cur
> rhp
->rdev
.stats
.pd
.max
)
233 rhp
->rdev
.stats
.pd
.max
= rhp
->rdev
.stats
.pd
.cur
;
234 mutex_unlock(&rhp
->rdev
.stats
.lock
);
235 pr_debug("pdid 0x%0x ptr 0x%p\n", pdid
, php
);
239 static int c4iw_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
242 pr_debug("ibdev %p\n", ibdev
);
247 static int c4iw_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
250 struct c4iw_dev
*dev
;
252 pr_debug("ibdev %p, port %d, index %d, gid %p\n",
253 ibdev
, port
, index
, gid
);
256 dev
= to_c4iw_dev(ibdev
);
257 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
258 memcpy(&(gid
->raw
[0]), dev
->rdev
.lldi
.ports
[port
-1]->dev_addr
, 6);
262 static int c4iw_query_device(struct ib_device
*ibdev
, struct ib_device_attr
*props
,
263 struct ib_udata
*uhw
)
266 struct c4iw_dev
*dev
;
268 pr_debug("ibdev %p\n", ibdev
);
270 if (uhw
->inlen
|| uhw
->outlen
)
273 dev
= to_c4iw_dev(ibdev
);
274 memcpy(&props
->sys_image_guid
, dev
->rdev
.lldi
.ports
[0]->dev_addr
, 6);
275 props
->hw_ver
= CHELSIO_CHIP_RELEASE(dev
->rdev
.lldi
.adapter_type
);
276 props
->fw_ver
= dev
->rdev
.lldi
.fw_vers
;
277 props
->device_cap_flags
= dev
->device_cap_flags
;
278 props
->page_size_cap
= T4_PAGESIZE_MASK
;
279 props
->vendor_id
= (u32
)dev
->rdev
.lldi
.pdev
->vendor
;
280 props
->vendor_part_id
= (u32
)dev
->rdev
.lldi
.pdev
->device
;
281 props
->max_mr_size
= T4_MAX_MR_SIZE
;
282 props
->max_qp
= dev
->rdev
.lldi
.vr
->qp
.size
/ 2;
283 props
->max_srq
= dev
->rdev
.lldi
.vr
->srq
.size
;
284 props
->max_qp_wr
= dev
->rdev
.hw_queue
.t4_max_qp_depth
;
285 props
->max_srq_wr
= dev
->rdev
.hw_queue
.t4_max_qp_depth
;
286 props
->max_send_sge
= min(T4_MAX_SEND_SGE
, T4_MAX_WRITE_SGE
);
287 props
->max_recv_sge
= T4_MAX_RECV_SGE
;
288 props
->max_srq_sge
= T4_MAX_RECV_SGE
;
289 props
->max_sge_rd
= 1;
290 props
->max_res_rd_atom
= dev
->rdev
.lldi
.max_ird_adapter
;
291 props
->max_qp_rd_atom
= min(dev
->rdev
.lldi
.max_ordird_qp
,
292 c4iw_max_read_depth
);
293 props
->max_qp_init_rd_atom
= props
->max_qp_rd_atom
;
294 props
->max_cq
= dev
->rdev
.lldi
.vr
->qp
.size
;
295 props
->max_cqe
= dev
->rdev
.hw_queue
.t4_max_cq_depth
;
296 props
->max_mr
= c4iw_num_stags(&dev
->rdev
);
297 props
->max_pd
= T4_MAX_NUM_PD
;
298 props
->local_ca_ack_delay
= 0;
299 props
->max_fast_reg_page_list_len
=
300 t4_max_fr_depth(dev
->rdev
.lldi
.ulptx_memwrite_dsgl
&& use_dsgl
);
305 static int c4iw_query_port(struct ib_device
*ibdev
, u8 port
,
306 struct ib_port_attr
*props
)
309 pr_debug("ibdev %p\n", ibdev
);
310 ret
= ib_get_eth_speed(ibdev
, port
, &props
->active_speed
,
311 &props
->active_width
);
313 props
->port_cap_flags
=
315 IB_PORT_SNMP_TUNNEL_SUP
|
317 IB_PORT_DEVICE_MGMT_SUP
|
318 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
319 props
->gid_tbl_len
= 1;
320 props
->pkey_tbl_len
= 1;
321 props
->max_msg_sz
= -1;
326 static ssize_t
hw_rev_show(struct device
*dev
,
327 struct device_attribute
*attr
, char *buf
)
329 struct c4iw_dev
*c4iw_dev
=
330 rdma_device_to_drv_device(dev
, struct c4iw_dev
, ibdev
);
332 pr_debug("dev 0x%p\n", dev
);
333 return sprintf(buf
, "%d\n",
334 CHELSIO_CHIP_RELEASE(c4iw_dev
->rdev
.lldi
.adapter_type
));
336 static DEVICE_ATTR_RO(hw_rev
);
338 static ssize_t
hca_type_show(struct device
*dev
,
339 struct device_attribute
*attr
, char *buf
)
341 struct c4iw_dev
*c4iw_dev
=
342 rdma_device_to_drv_device(dev
, struct c4iw_dev
, ibdev
);
343 struct ethtool_drvinfo info
;
344 struct net_device
*lldev
= c4iw_dev
->rdev
.lldi
.ports
[0];
346 pr_debug("dev 0x%p\n", dev
);
347 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
348 return sprintf(buf
, "%s\n", info
.driver
);
350 static DEVICE_ATTR_RO(hca_type
);
352 static ssize_t
board_id_show(struct device
*dev
, struct device_attribute
*attr
,
355 struct c4iw_dev
*c4iw_dev
=
356 rdma_device_to_drv_device(dev
, struct c4iw_dev
, ibdev
);
358 pr_debug("dev 0x%p\n", dev
);
359 return sprintf(buf
, "%x.%x\n", c4iw_dev
->rdev
.lldi
.pdev
->vendor
,
360 c4iw_dev
->rdev
.lldi
.pdev
->device
);
362 static DEVICE_ATTR_RO(board_id
);
376 static const char * const names
[] = {
377 [IP4INSEGS
] = "ip4InSegs",
378 [IP4OUTSEGS
] = "ip4OutSegs",
379 [IP4RETRANSSEGS
] = "ip4RetransSegs",
380 [IP4OUTRSTS
] = "ip4OutRsts",
381 [IP6INSEGS
] = "ip6InSegs",
382 [IP6OUTSEGS
] = "ip6OutSegs",
383 [IP6RETRANSSEGS
] = "ip6RetransSegs",
384 [IP6OUTRSTS
] = "ip6OutRsts"
387 static struct rdma_hw_stats
*c4iw_alloc_stats(struct ib_device
*ibdev
,
390 BUILD_BUG_ON(ARRAY_SIZE(names
) != NR_COUNTERS
);
395 return rdma_alloc_hw_stats_struct(names
, NR_COUNTERS
,
396 RDMA_HW_STATS_DEFAULT_LIFESPAN
);
399 static int c4iw_get_mib(struct ib_device
*ibdev
,
400 struct rdma_hw_stats
*stats
,
403 struct tp_tcp_stats v4
, v6
;
404 struct c4iw_dev
*c4iw_dev
= to_c4iw_dev(ibdev
);
406 cxgb4_get_tcp_stats(c4iw_dev
->rdev
.lldi
.pdev
, &v4
, &v6
);
407 stats
->value
[IP4INSEGS
] = v4
.tcp_in_segs
;
408 stats
->value
[IP4OUTSEGS
] = v4
.tcp_out_segs
;
409 stats
->value
[IP4RETRANSSEGS
] = v4
.tcp_retrans_segs
;
410 stats
->value
[IP4OUTRSTS
] = v4
.tcp_out_rsts
;
411 stats
->value
[IP6INSEGS
] = v6
.tcp_in_segs
;
412 stats
->value
[IP6OUTSEGS
] = v6
.tcp_out_segs
;
413 stats
->value
[IP6RETRANSSEGS
] = v6
.tcp_retrans_segs
;
414 stats
->value
[IP6OUTRSTS
] = v6
.tcp_out_rsts
;
416 return stats
->num_counters
;
419 static struct attribute
*c4iw_class_attributes
[] = {
420 &dev_attr_hw_rev
.attr
,
421 &dev_attr_hca_type
.attr
,
422 &dev_attr_board_id
.attr
,
426 static const struct attribute_group c4iw_attr_group
= {
427 .attrs
= c4iw_class_attributes
,
430 static int c4iw_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
431 struct ib_port_immutable
*immutable
)
433 struct ib_port_attr attr
;
436 immutable
->core_cap_flags
= RDMA_CORE_PORT_IWARP
;
438 err
= ib_query_port(ibdev
, port_num
, &attr
);
442 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
443 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
448 static void get_dev_fw_str(struct ib_device
*dev
, char *str
)
450 struct c4iw_dev
*c4iw_dev
= container_of(dev
, struct c4iw_dev
,
452 pr_debug("dev 0x%p\n", dev
);
454 snprintf(str
, IB_FW_VERSION_NAME_MAX
, "%u.%u.%u.%u",
455 FW_HDR_FW_VER_MAJOR_G(c4iw_dev
->rdev
.lldi
.fw_vers
),
456 FW_HDR_FW_VER_MINOR_G(c4iw_dev
->rdev
.lldi
.fw_vers
),
457 FW_HDR_FW_VER_MICRO_G(c4iw_dev
->rdev
.lldi
.fw_vers
),
458 FW_HDR_FW_VER_BUILD_G(c4iw_dev
->rdev
.lldi
.fw_vers
));
461 static int fill_res_entry(struct sk_buff
*msg
, struct rdma_restrack_entry
*res
)
463 return (res
->type
< ARRAY_SIZE(c4iw_restrack_funcs
) &&
464 c4iw_restrack_funcs
[res
->type
]) ?
465 c4iw_restrack_funcs
[res
->type
](msg
, res
) : 0;
468 static const struct ib_device_ops c4iw_dev_ops
= {
469 .owner
= THIS_MODULE
,
470 .driver_id
= RDMA_DRIVER_CXGB4
,
471 .uverbs_abi_ver
= C4IW_UVERBS_ABI_VERSION
,
473 .alloc_hw_stats
= c4iw_alloc_stats
,
474 .alloc_mr
= c4iw_alloc_mr
,
475 .alloc_mw
= c4iw_alloc_mw
,
476 .alloc_pd
= c4iw_allocate_pd
,
477 .alloc_ucontext
= c4iw_alloc_ucontext
,
478 .create_cq
= c4iw_create_cq
,
479 .create_qp
= c4iw_create_qp
,
480 .create_srq
= c4iw_create_srq
,
481 .dealloc_mw
= c4iw_dealloc_mw
,
482 .dealloc_pd
= c4iw_deallocate_pd
,
483 .dealloc_ucontext
= c4iw_dealloc_ucontext
,
484 .dereg_mr
= c4iw_dereg_mr
,
485 .destroy_cq
= c4iw_destroy_cq
,
486 .destroy_qp
= c4iw_destroy_qp
,
487 .destroy_srq
= c4iw_destroy_srq
,
488 .fill_res_entry
= fill_res_entry
,
489 .get_dev_fw_str
= get_dev_fw_str
,
490 .get_dma_mr
= c4iw_get_dma_mr
,
491 .get_hw_stats
= c4iw_get_mib
,
492 .get_port_immutable
= c4iw_port_immutable
,
493 .iw_accept
= c4iw_accept_cr
,
494 .iw_add_ref
= c4iw_qp_add_ref
,
495 .iw_connect
= c4iw_connect
,
496 .iw_create_listen
= c4iw_create_listen
,
497 .iw_destroy_listen
= c4iw_destroy_listen
,
498 .iw_get_qp
= c4iw_get_qp
,
499 .iw_reject
= c4iw_reject_cr
,
500 .iw_rem_ref
= c4iw_qp_rem_ref
,
501 .map_mr_sg
= c4iw_map_mr_sg
,
503 .modify_qp
= c4iw_ib_modify_qp
,
504 .modify_srq
= c4iw_modify_srq
,
505 .poll_cq
= c4iw_poll_cq
,
506 .post_recv
= c4iw_post_receive
,
507 .post_send
= c4iw_post_send
,
508 .post_srq_recv
= c4iw_post_srq_recv
,
509 .query_device
= c4iw_query_device
,
510 .query_gid
= c4iw_query_gid
,
511 .query_pkey
= c4iw_query_pkey
,
512 .query_port
= c4iw_query_port
,
513 .query_qp
= c4iw_ib_query_qp
,
514 .reg_user_mr
= c4iw_reg_user_mr
,
515 .req_notify_cq
= c4iw_arm_cq
,
516 INIT_RDMA_OBJ_SIZE(ib_pd
, c4iw_pd
, ibpd
),
517 INIT_RDMA_OBJ_SIZE(ib_cq
, c4iw_cq
, ibcq
),
518 INIT_RDMA_OBJ_SIZE(ib_srq
, c4iw_srq
, ibsrq
),
519 INIT_RDMA_OBJ_SIZE(ib_ucontext
, c4iw_ucontext
, ibucontext
),
522 static int set_netdevs(struct ib_device
*ib_dev
, struct c4iw_rdev
*rdev
)
527 for (i
= 0; i
< rdev
->lldi
.nports
; i
++) {
528 ret
= ib_device_set_netdev(ib_dev
, rdev
->lldi
.ports
[i
],
536 void c4iw_register_device(struct work_struct
*work
)
539 struct uld_ctx
*ctx
= container_of(work
, struct uld_ctx
, reg_work
);
540 struct c4iw_dev
*dev
= ctx
->dev
;
542 pr_debug("c4iw_dev %p\n", dev
);
543 memset(&dev
->ibdev
.node_guid
, 0, sizeof(dev
->ibdev
.node_guid
));
544 memcpy(&dev
->ibdev
.node_guid
, dev
->rdev
.lldi
.ports
[0]->dev_addr
, 6);
545 dev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
| IB_DEVICE_MEM_WINDOW
;
547 dev
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
548 dev
->ibdev
.local_dma_lkey
= 0;
549 dev
->ibdev
.uverbs_cmd_mask
=
550 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
551 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
552 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
553 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
554 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
555 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
556 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
557 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
558 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
559 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
560 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
561 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
562 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
563 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
564 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
565 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
566 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
567 (1ull << IB_USER_VERBS_CMD_POST_RECV
) |
568 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
569 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
570 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
);
571 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
572 BUILD_BUG_ON(sizeof(C4IW_NODE_DESC
) > IB_DEVICE_NODE_DESC_MAX
);
573 memcpy(dev
->ibdev
.node_desc
, C4IW_NODE_DESC
, sizeof(C4IW_NODE_DESC
));
574 dev
->ibdev
.phys_port_cnt
= dev
->rdev
.lldi
.nports
;
575 dev
->ibdev
.num_comp_vectors
= dev
->rdev
.lldi
.nciq
;
576 dev
->ibdev
.dev
.parent
= &dev
->rdev
.lldi
.pdev
->dev
;
578 memcpy(dev
->ibdev
.iw_ifname
, dev
->rdev
.lldi
.ports
[0]->name
,
579 sizeof(dev
->ibdev
.iw_ifname
));
581 rdma_set_device_sysfs_group(&dev
->ibdev
, &c4iw_attr_group
);
582 ib_set_device_ops(&dev
->ibdev
, &c4iw_dev_ops
);
583 ret
= set_netdevs(&dev
->ibdev
, &dev
->rdev
);
585 goto err_dealloc_ctx
;
586 ret
= ib_register_device(&dev
->ibdev
, "cxgb4_%d");
588 goto err_dealloc_ctx
;
592 pr_err("%s - Failed registering iwarp device: %d\n",
593 pci_name(ctx
->lldi
.pdev
), ret
);
598 void c4iw_unregister_device(struct c4iw_dev
*dev
)
600 pr_debug("c4iw_dev %p\n", dev
);
601 ib_unregister_device(&dev
->ibdev
);