2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
36 #include <rdma/ib_verbs.h>
40 #define DRV_VERSION "0.1"
42 MODULE_AUTHOR("Steve Wise");
43 MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
44 MODULE_LICENSE("Dual BSD/GPL");
45 MODULE_VERSION(DRV_VERSION
);
47 static LIST_HEAD(dev_list
);
48 static DEFINE_MUTEX(dev_mutex
);
50 static struct dentry
*c4iw_debugfs_root
;
52 struct c4iw_debugfs_data
{
53 struct c4iw_dev
*devp
;
59 static int count_idrs(int id
, void *p
, void *data
)
63 *countp
= *countp
+ 1;
67 static ssize_t
debugfs_read(struct file
*file
, char __user
*buf
, size_t count
,
70 struct c4iw_debugfs_data
*d
= file
->private_data
;
72 return simple_read_from_buffer(buf
, count
, ppos
, d
->buf
, d
->pos
);
75 static int dump_qp(int id
, void *p
, void *data
)
77 struct c4iw_qp
*qp
= p
;
78 struct c4iw_debugfs_data
*qpd
= data
;
82 if (id
!= qp
->wq
.sq
.qid
)
85 space
= qpd
->bufsize
- qpd
->pos
- 1;
90 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
, "qp id %u state %u "
91 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
92 qp
->wq
.sq
.qid
, (int)qp
->attr
.state
,
93 qp
->ep
->hwtid
, (int)qp
->ep
->com
.state
,
94 &qp
->ep
->com
.local_addr
.sin_addr
.s_addr
,
95 ntohs(qp
->ep
->com
.local_addr
.sin_port
),
96 &qp
->ep
->com
.remote_addr
.sin_addr
.s_addr
,
97 ntohs(qp
->ep
->com
.remote_addr
.sin_port
));
99 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
, "qp id %u state %u\n",
100 qp
->wq
.sq
.qid
, (int)qp
->attr
.state
);
106 static int qp_release(struct inode
*inode
, struct file
*file
)
108 struct c4iw_debugfs_data
*qpd
= file
->private_data
;
110 printk(KERN_INFO
"%s null qpd?\n", __func__
);
118 static int qp_open(struct inode
*inode
, struct file
*file
)
120 struct c4iw_debugfs_data
*qpd
;
124 qpd
= kmalloc(sizeof *qpd
, GFP_KERNEL
);
129 qpd
->devp
= inode
->i_private
;
132 spin_lock_irq(&qpd
->devp
->lock
);
133 idr_for_each(&qpd
->devp
->qpidr
, count_idrs
, &count
);
134 spin_unlock_irq(&qpd
->devp
->lock
);
136 qpd
->bufsize
= count
* 128;
137 qpd
->buf
= kmalloc(qpd
->bufsize
, GFP_KERNEL
);
143 spin_lock_irq(&qpd
->devp
->lock
);
144 idr_for_each(&qpd
->devp
->qpidr
, dump_qp
, qpd
);
145 spin_unlock_irq(&qpd
->devp
->lock
);
147 qpd
->buf
[qpd
->pos
++] = 0;
148 file
->private_data
= qpd
;
156 static const struct file_operations qp_debugfs_fops
= {
157 .owner
= THIS_MODULE
,
159 .release
= qp_release
,
160 .read
= debugfs_read
,
161 .llseek
= default_llseek
,
164 static int dump_stag(int id
, void *p
, void *data
)
166 struct c4iw_debugfs_data
*stagd
= data
;
170 space
= stagd
->bufsize
- stagd
->pos
- 1;
174 cc
= snprintf(stagd
->buf
+ stagd
->pos
, space
, "0x%x\n", id
<<8);
180 static int stag_release(struct inode
*inode
, struct file
*file
)
182 struct c4iw_debugfs_data
*stagd
= file
->private_data
;
184 printk(KERN_INFO
"%s null stagd?\n", __func__
);
192 static int stag_open(struct inode
*inode
, struct file
*file
)
194 struct c4iw_debugfs_data
*stagd
;
198 stagd
= kmalloc(sizeof *stagd
, GFP_KERNEL
);
203 stagd
->devp
= inode
->i_private
;
206 spin_lock_irq(&stagd
->devp
->lock
);
207 idr_for_each(&stagd
->devp
->mmidr
, count_idrs
, &count
);
208 spin_unlock_irq(&stagd
->devp
->lock
);
210 stagd
->bufsize
= count
* sizeof("0x12345678\n");
211 stagd
->buf
= kmalloc(stagd
->bufsize
, GFP_KERNEL
);
217 spin_lock_irq(&stagd
->devp
->lock
);
218 idr_for_each(&stagd
->devp
->mmidr
, dump_stag
, stagd
);
219 spin_unlock_irq(&stagd
->devp
->lock
);
221 stagd
->buf
[stagd
->pos
++] = 0;
222 file
->private_data
= stagd
;
230 static const struct file_operations stag_debugfs_fops
= {
231 .owner
= THIS_MODULE
,
233 .release
= stag_release
,
234 .read
= debugfs_read
,
235 .llseek
= default_llseek
,
238 static int setup_debugfs(struct c4iw_dev
*devp
)
242 if (!devp
->debugfs_root
)
245 de
= debugfs_create_file("qps", S_IWUSR
, devp
->debugfs_root
,
246 (void *)devp
, &qp_debugfs_fops
);
247 if (de
&& de
->d_inode
)
248 de
->d_inode
->i_size
= 4096;
250 de
= debugfs_create_file("stags", S_IWUSR
, devp
->debugfs_root
,
251 (void *)devp
, &stag_debugfs_fops
);
252 if (de
&& de
->d_inode
)
253 de
->d_inode
->i_size
= 4096;
257 void c4iw_release_dev_ucontext(struct c4iw_rdev
*rdev
,
258 struct c4iw_dev_ucontext
*uctx
)
260 struct list_head
*pos
, *nxt
;
261 struct c4iw_qid_list
*entry
;
263 mutex_lock(&uctx
->lock
);
264 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
265 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
266 list_del_init(&entry
->entry
);
267 if (!(entry
->qid
& rdev
->qpmask
))
268 c4iw_put_resource(&rdev
->resource
.qid_fifo
, entry
->qid
,
269 &rdev
->resource
.qid_fifo_lock
);
273 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
274 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
275 list_del_init(&entry
->entry
);
278 mutex_unlock(&uctx
->lock
);
281 void c4iw_init_dev_ucontext(struct c4iw_rdev
*rdev
,
282 struct c4iw_dev_ucontext
*uctx
)
284 INIT_LIST_HEAD(&uctx
->qpids
);
285 INIT_LIST_HEAD(&uctx
->cqids
);
286 mutex_init(&uctx
->lock
);
289 /* Caller takes care of locking if needed */
290 static int c4iw_rdev_open(struct c4iw_rdev
*rdev
)
294 c4iw_init_dev_ucontext(rdev
, &rdev
->uctx
);
297 * qpshift is the number of bits to shift the qpid left in order
298 * to get the correct address of the doorbell for that qp.
300 rdev
->qpshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.udb_density
);
301 rdev
->qpmask
= rdev
->lldi
.udb_density
- 1;
302 rdev
->cqshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.ucq_density
);
303 rdev
->cqmask
= rdev
->lldi
.ucq_density
- 1;
304 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
305 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
306 "qp qid start %u size %u cq qid start %u size %u\n",
307 __func__
, pci_name(rdev
->lldi
.pdev
), rdev
->lldi
.vr
->stag
.start
,
308 rdev
->lldi
.vr
->stag
.size
, c4iw_num_stags(rdev
),
309 rdev
->lldi
.vr
->pbl
.start
,
310 rdev
->lldi
.vr
->pbl
.size
, rdev
->lldi
.vr
->rq
.start
,
311 rdev
->lldi
.vr
->rq
.size
,
312 rdev
->lldi
.vr
->qp
.start
,
313 rdev
->lldi
.vr
->qp
.size
,
314 rdev
->lldi
.vr
->cq
.start
,
315 rdev
->lldi
.vr
->cq
.size
);
316 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
317 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
318 (unsigned)pci_resource_len(rdev
->lldi
.pdev
, 2),
319 (void *)pci_resource_start(rdev
->lldi
.pdev
, 2),
322 rdev
->qpshift
, rdev
->qpmask
,
323 rdev
->cqshift
, rdev
->cqmask
);
325 if (c4iw_num_stags(rdev
) == 0) {
330 err
= c4iw_init_resource(rdev
, c4iw_num_stags(rdev
), T4_MAX_NUM_PD
);
332 printk(KERN_ERR MOD
"error %d initializing resources\n", err
);
335 err
= c4iw_pblpool_create(rdev
);
337 printk(KERN_ERR MOD
"error %d initializing pbl pool\n", err
);
340 err
= c4iw_rqtpool_create(rdev
);
342 printk(KERN_ERR MOD
"error %d initializing rqt pool\n", err
);
345 err
= c4iw_ocqp_pool_create(rdev
);
347 printk(KERN_ERR MOD
"error %d initializing ocqp pool\n", err
);
352 c4iw_rqtpool_destroy(rdev
);
354 c4iw_pblpool_destroy(rdev
);
356 c4iw_destroy_resource(&rdev
->resource
);
361 static void c4iw_rdev_close(struct c4iw_rdev
*rdev
)
363 c4iw_pblpool_destroy(rdev
);
364 c4iw_rqtpool_destroy(rdev
);
365 c4iw_destroy_resource(&rdev
->resource
);
368 static void c4iw_remove(struct c4iw_dev
*dev
)
370 PDBG("%s c4iw_dev %p\n", __func__
, dev
);
371 cancel_delayed_work_sync(&dev
->db_drop_task
);
372 list_del(&dev
->entry
);
374 c4iw_unregister_device(dev
);
375 c4iw_rdev_close(&dev
->rdev
);
376 idr_destroy(&dev
->cqidr
);
377 idr_destroy(&dev
->qpidr
);
378 idr_destroy(&dev
->mmidr
);
379 iounmap(dev
->rdev
.oc_mw_kva
);
380 ib_dealloc_device(&dev
->ibdev
);
383 static struct c4iw_dev
*c4iw_alloc(const struct cxgb4_lld_info
*infop
)
385 struct c4iw_dev
*devp
;
388 devp
= (struct c4iw_dev
*)ib_alloc_device(sizeof(*devp
));
390 printk(KERN_ERR MOD
"Cannot allocate ib device\n");
393 devp
->rdev
.lldi
= *infop
;
395 devp
->rdev
.oc_mw_pa
= pci_resource_start(devp
->rdev
.lldi
.pdev
, 2) +
396 (pci_resource_len(devp
->rdev
.lldi
.pdev
, 2) -
397 roundup_pow_of_two(devp
->rdev
.lldi
.vr
->ocq
.size
));
398 devp
->rdev
.oc_mw_kva
= ioremap_wc(devp
->rdev
.oc_mw_pa
,
399 devp
->rdev
.lldi
.vr
->ocq
.size
);
401 printk(KERN_INFO MOD
"ocq memory: "
402 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
403 devp
->rdev
.lldi
.vr
->ocq
.start
, devp
->rdev
.lldi
.vr
->ocq
.size
,
404 devp
->rdev
.oc_mw_pa
, devp
->rdev
.oc_mw_kva
);
406 mutex_lock(&dev_mutex
);
408 ret
= c4iw_rdev_open(&devp
->rdev
);
410 mutex_unlock(&dev_mutex
);
411 printk(KERN_ERR MOD
"Unable to open CXIO rdev err %d\n", ret
);
412 ib_dealloc_device(&devp
->ibdev
);
416 idr_init(&devp
->cqidr
);
417 idr_init(&devp
->qpidr
);
418 idr_init(&devp
->mmidr
);
419 spin_lock_init(&devp
->lock
);
420 list_add_tail(&devp
->entry
, &dev_list
);
421 mutex_unlock(&dev_mutex
);
423 if (c4iw_debugfs_root
) {
424 devp
->debugfs_root
= debugfs_create_dir(
425 pci_name(devp
->rdev
.lldi
.pdev
),
432 static void *c4iw_uld_add(const struct cxgb4_lld_info
*infop
)
434 struct c4iw_dev
*dev
;
435 static int vers_printed
;
439 printk(KERN_INFO MOD
"Chelsio T4 RDMA Driver - version %s\n",
442 dev
= c4iw_alloc(infop
);
446 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
447 __func__
, pci_name(dev
->rdev
.lldi
.pdev
),
448 dev
->rdev
.lldi
.nchan
, dev
->rdev
.lldi
.nrxq
,
449 dev
->rdev
.lldi
.ntxq
, dev
->rdev
.lldi
.nports
);
451 for (i
= 0; i
< dev
->rdev
.lldi
.nrxq
; i
++)
452 PDBG("rxqid[%u] %u\n", i
, dev
->rdev
.lldi
.rxq_ids
[i
]);
457 static int c4iw_uld_rx_handler(void *handle
, const __be64
*rsp
,
458 const struct pkt_gl
*gl
)
460 struct c4iw_dev
*dev
= handle
;
462 const struct cpl_act_establish
*rpl
;
466 /* omit RSS and rsp_ctrl at end of descriptor */
467 unsigned int len
= 64 - sizeof(struct rsp_ctrl
) - 8;
469 skb
= alloc_skb(256, GFP_ATOMIC
);
473 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
474 } else if (gl
== CXGB4_MSG_AN
) {
475 const struct rsp_ctrl
*rc
= (void *)rsp
;
477 u32 qid
= be32_to_cpu(rc
->pldbuflen_qid
);
478 c4iw_ev_handler(dev
, qid
);
481 skb
= cxgb4_pktgl_to_skb(gl
, 128, 128);
487 opcode
= rpl
->ot
.opcode
;
489 if (c4iw_handlers
[opcode
])
490 c4iw_handlers
[opcode
](dev
, skb
);
492 printk(KERN_INFO
"%s no handler opcode 0x%x...\n", __func__
,
500 static int c4iw_uld_state_change(void *handle
, enum cxgb4_state new_state
)
502 struct c4iw_dev
*dev
= handle
;
504 PDBG("%s new_state %u\n", __func__
, new_state
);
507 printk(KERN_INFO MOD
"%s: Up\n", pci_name(dev
->rdev
.lldi
.pdev
));
508 if (!dev
->registered
) {
510 ret
= c4iw_register_device(dev
);
513 "%s: RDMA registration failed: %d\n",
514 pci_name(dev
->rdev
.lldi
.pdev
), ret
);
517 case CXGB4_STATE_DOWN
:
518 printk(KERN_INFO MOD
"%s: Down\n",
519 pci_name(dev
->rdev
.lldi
.pdev
));
521 c4iw_unregister_device(dev
);
523 case CXGB4_STATE_START_RECOVERY
:
524 printk(KERN_INFO MOD
"%s: Fatal Error\n",
525 pci_name(dev
->rdev
.lldi
.pdev
));
527 c4iw_unregister_device(dev
);
529 case CXGB4_STATE_DETACH
:
530 printk(KERN_INFO MOD
"%s: Detach\n",
531 pci_name(dev
->rdev
.lldi
.pdev
));
532 mutex_lock(&dev_mutex
);
534 mutex_unlock(&dev_mutex
);
540 static struct cxgb4_uld_info c4iw_uld_info
= {
543 .rx_handler
= c4iw_uld_rx_handler
,
544 .state_change
= c4iw_uld_state_change
,
547 static int __init
c4iw_init_module(void)
551 err
= c4iw_cm_init();
555 c4iw_debugfs_root
= debugfs_create_dir(DRV_NAME
, NULL
);
556 if (!c4iw_debugfs_root
)
557 printk(KERN_WARNING MOD
558 "could not create debugfs entry, continuing\n");
560 cxgb4_register_uld(CXGB4_ULD_RDMA
, &c4iw_uld_info
);
565 static void __exit
c4iw_exit_module(void)
567 struct c4iw_dev
*dev
, *tmp
;
569 mutex_lock(&dev_mutex
);
570 list_for_each_entry_safe(dev
, tmp
, &dev_list
, entry
) {
573 mutex_unlock(&dev_mutex
);
574 cxgb4_unregister_uld(CXGB4_ULD_RDMA
);
576 debugfs_remove_recursive(c4iw_debugfs_root
);
579 module_init(c4iw_init_module
);
580 module_exit(c4iw_exit_module
);