2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
36 #include <rdma/ib_verbs.h>
40 #define DRV_VERSION "0.1"
42 MODULE_AUTHOR("Steve Wise");
43 MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
44 MODULE_LICENSE("Dual BSD/GPL");
45 MODULE_VERSION(DRV_VERSION
);
47 static LIST_HEAD(uld_ctx_list
);
48 static DEFINE_MUTEX(dev_mutex
);
50 static struct dentry
*c4iw_debugfs_root
;
52 struct c4iw_debugfs_data
{
53 struct c4iw_dev
*devp
;
59 static int count_idrs(int id
, void *p
, void *data
)
63 *countp
= *countp
+ 1;
67 static ssize_t
debugfs_read(struct file
*file
, char __user
*buf
, size_t count
,
70 struct c4iw_debugfs_data
*d
= file
->private_data
;
72 return simple_read_from_buffer(buf
, count
, ppos
, d
->buf
, d
->pos
);
75 static int dump_qp(int id
, void *p
, void *data
)
77 struct c4iw_qp
*qp
= p
;
78 struct c4iw_debugfs_data
*qpd
= data
;
82 if (id
!= qp
->wq
.sq
.qid
)
85 space
= qpd
->bufsize
- qpd
->pos
- 1;
90 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
91 "qp sq id %u rq id %u state %u onchip %u "
92 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
93 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
, (int)qp
->attr
.state
,
94 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
,
95 qp
->ep
->hwtid
, (int)qp
->ep
->com
.state
,
96 &qp
->ep
->com
.local_addr
.sin_addr
.s_addr
,
97 ntohs(qp
->ep
->com
.local_addr
.sin_port
),
98 &qp
->ep
->com
.remote_addr
.sin_addr
.s_addr
,
99 ntohs(qp
->ep
->com
.remote_addr
.sin_port
));
101 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
102 "qp sq id %u rq id %u state %u onchip %u\n",
103 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
105 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
);
111 static int qp_release(struct inode
*inode
, struct file
*file
)
113 struct c4iw_debugfs_data
*qpd
= file
->private_data
;
115 printk(KERN_INFO
"%s null qpd?\n", __func__
);
123 static int qp_open(struct inode
*inode
, struct file
*file
)
125 struct c4iw_debugfs_data
*qpd
;
129 qpd
= kmalloc(sizeof *qpd
, GFP_KERNEL
);
134 qpd
->devp
= inode
->i_private
;
137 spin_lock_irq(&qpd
->devp
->lock
);
138 idr_for_each(&qpd
->devp
->qpidr
, count_idrs
, &count
);
139 spin_unlock_irq(&qpd
->devp
->lock
);
141 qpd
->bufsize
= count
* 128;
142 qpd
->buf
= kmalloc(qpd
->bufsize
, GFP_KERNEL
);
148 spin_lock_irq(&qpd
->devp
->lock
);
149 idr_for_each(&qpd
->devp
->qpidr
, dump_qp
, qpd
);
150 spin_unlock_irq(&qpd
->devp
->lock
);
152 qpd
->buf
[qpd
->pos
++] = 0;
153 file
->private_data
= qpd
;
161 static const struct file_operations qp_debugfs_fops
= {
162 .owner
= THIS_MODULE
,
164 .release
= qp_release
,
165 .read
= debugfs_read
,
166 .llseek
= default_llseek
,
169 static int dump_stag(int id
, void *p
, void *data
)
171 struct c4iw_debugfs_data
*stagd
= data
;
175 space
= stagd
->bufsize
- stagd
->pos
- 1;
179 cc
= snprintf(stagd
->buf
+ stagd
->pos
, space
, "0x%x\n", id
<<8);
185 static int stag_release(struct inode
*inode
, struct file
*file
)
187 struct c4iw_debugfs_data
*stagd
= file
->private_data
;
189 printk(KERN_INFO
"%s null stagd?\n", __func__
);
197 static int stag_open(struct inode
*inode
, struct file
*file
)
199 struct c4iw_debugfs_data
*stagd
;
203 stagd
= kmalloc(sizeof *stagd
, GFP_KERNEL
);
208 stagd
->devp
= inode
->i_private
;
211 spin_lock_irq(&stagd
->devp
->lock
);
212 idr_for_each(&stagd
->devp
->mmidr
, count_idrs
, &count
);
213 spin_unlock_irq(&stagd
->devp
->lock
);
215 stagd
->bufsize
= count
* sizeof("0x12345678\n");
216 stagd
->buf
= kmalloc(stagd
->bufsize
, GFP_KERNEL
);
222 spin_lock_irq(&stagd
->devp
->lock
);
223 idr_for_each(&stagd
->devp
->mmidr
, dump_stag
, stagd
);
224 spin_unlock_irq(&stagd
->devp
->lock
);
226 stagd
->buf
[stagd
->pos
++] = 0;
227 file
->private_data
= stagd
;
235 static const struct file_operations stag_debugfs_fops
= {
236 .owner
= THIS_MODULE
,
238 .release
= stag_release
,
239 .read
= debugfs_read
,
240 .llseek
= default_llseek
,
243 static int setup_debugfs(struct c4iw_dev
*devp
)
247 if (!devp
->debugfs_root
)
250 de
= debugfs_create_file("qps", S_IWUSR
, devp
->debugfs_root
,
251 (void *)devp
, &qp_debugfs_fops
);
252 if (de
&& de
->d_inode
)
253 de
->d_inode
->i_size
= 4096;
255 de
= debugfs_create_file("stags", S_IWUSR
, devp
->debugfs_root
,
256 (void *)devp
, &stag_debugfs_fops
);
257 if (de
&& de
->d_inode
)
258 de
->d_inode
->i_size
= 4096;
262 void c4iw_release_dev_ucontext(struct c4iw_rdev
*rdev
,
263 struct c4iw_dev_ucontext
*uctx
)
265 struct list_head
*pos
, *nxt
;
266 struct c4iw_qid_list
*entry
;
268 mutex_lock(&uctx
->lock
);
269 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
270 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
271 list_del_init(&entry
->entry
);
272 if (!(entry
->qid
& rdev
->qpmask
))
273 c4iw_put_resource(&rdev
->resource
.qid_fifo
, entry
->qid
,
274 &rdev
->resource
.qid_fifo_lock
);
278 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
279 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
280 list_del_init(&entry
->entry
);
283 mutex_unlock(&uctx
->lock
);
286 void c4iw_init_dev_ucontext(struct c4iw_rdev
*rdev
,
287 struct c4iw_dev_ucontext
*uctx
)
289 INIT_LIST_HEAD(&uctx
->qpids
);
290 INIT_LIST_HEAD(&uctx
->cqids
);
291 mutex_init(&uctx
->lock
);
294 /* Caller takes care of locking if needed */
295 static int c4iw_rdev_open(struct c4iw_rdev
*rdev
)
299 c4iw_init_dev_ucontext(rdev
, &rdev
->uctx
);
302 * qpshift is the number of bits to shift the qpid left in order
303 * to get the correct address of the doorbell for that qp.
305 rdev
->qpshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.udb_density
);
306 rdev
->qpmask
= rdev
->lldi
.udb_density
- 1;
307 rdev
->cqshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.ucq_density
);
308 rdev
->cqmask
= rdev
->lldi
.ucq_density
- 1;
309 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
310 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
311 "qp qid start %u size %u cq qid start %u size %u\n",
312 __func__
, pci_name(rdev
->lldi
.pdev
), rdev
->lldi
.vr
->stag
.start
,
313 rdev
->lldi
.vr
->stag
.size
, c4iw_num_stags(rdev
),
314 rdev
->lldi
.vr
->pbl
.start
,
315 rdev
->lldi
.vr
->pbl
.size
, rdev
->lldi
.vr
->rq
.start
,
316 rdev
->lldi
.vr
->rq
.size
,
317 rdev
->lldi
.vr
->qp
.start
,
318 rdev
->lldi
.vr
->qp
.size
,
319 rdev
->lldi
.vr
->cq
.start
,
320 rdev
->lldi
.vr
->cq
.size
);
321 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
322 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
323 (unsigned)pci_resource_len(rdev
->lldi
.pdev
, 2),
324 (void *)pci_resource_start(rdev
->lldi
.pdev
, 2),
327 rdev
->qpshift
, rdev
->qpmask
,
328 rdev
->cqshift
, rdev
->cqmask
);
330 if (c4iw_num_stags(rdev
) == 0) {
335 err
= c4iw_init_resource(rdev
, c4iw_num_stags(rdev
), T4_MAX_NUM_PD
);
337 printk(KERN_ERR MOD
"error %d initializing resources\n", err
);
340 err
= c4iw_pblpool_create(rdev
);
342 printk(KERN_ERR MOD
"error %d initializing pbl pool\n", err
);
345 err
= c4iw_rqtpool_create(rdev
);
347 printk(KERN_ERR MOD
"error %d initializing rqt pool\n", err
);
350 err
= c4iw_ocqp_pool_create(rdev
);
352 printk(KERN_ERR MOD
"error %d initializing ocqp pool\n", err
);
357 c4iw_rqtpool_destroy(rdev
);
359 c4iw_pblpool_destroy(rdev
);
361 c4iw_destroy_resource(&rdev
->resource
);
366 static void c4iw_rdev_close(struct c4iw_rdev
*rdev
)
368 c4iw_pblpool_destroy(rdev
);
369 c4iw_rqtpool_destroy(rdev
);
370 c4iw_destroy_resource(&rdev
->resource
);
374 struct list_head entry
;
375 struct cxgb4_lld_info lldi
;
376 struct c4iw_dev
*dev
;
379 static void c4iw_remove(struct uld_ctx
*ctx
)
381 PDBG("%s c4iw_dev %p\n", __func__
, ctx
->dev
);
382 c4iw_unregister_device(ctx
->dev
);
383 c4iw_rdev_close(&ctx
->dev
->rdev
);
384 idr_destroy(&ctx
->dev
->cqidr
);
385 idr_destroy(&ctx
->dev
->qpidr
);
386 idr_destroy(&ctx
->dev
->mmidr
);
387 iounmap(ctx
->dev
->rdev
.oc_mw_kva
);
388 ib_dealloc_device(&ctx
->dev
->ibdev
);
392 static struct c4iw_dev
*c4iw_alloc(const struct cxgb4_lld_info
*infop
)
394 struct c4iw_dev
*devp
;
397 devp
= (struct c4iw_dev
*)ib_alloc_device(sizeof(*devp
));
399 printk(KERN_ERR MOD
"Cannot allocate ib device\n");
400 return ERR_PTR(-ENOMEM
);
402 devp
->rdev
.lldi
= *infop
;
404 devp
->rdev
.oc_mw_pa
= pci_resource_start(devp
->rdev
.lldi
.pdev
, 2) +
405 (pci_resource_len(devp
->rdev
.lldi
.pdev
, 2) -
406 roundup_pow_of_two(devp
->rdev
.lldi
.vr
->ocq
.size
));
407 devp
->rdev
.oc_mw_kva
= ioremap_wc(devp
->rdev
.oc_mw_pa
,
408 devp
->rdev
.lldi
.vr
->ocq
.size
);
410 PDBG(KERN_INFO MOD
"ocq memory: "
411 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
412 devp
->rdev
.lldi
.vr
->ocq
.start
, devp
->rdev
.lldi
.vr
->ocq
.size
,
413 devp
->rdev
.oc_mw_pa
, devp
->rdev
.oc_mw_kva
);
415 ret
= c4iw_rdev_open(&devp
->rdev
);
417 mutex_unlock(&dev_mutex
);
418 printk(KERN_ERR MOD
"Unable to open CXIO rdev err %d\n", ret
);
419 ib_dealloc_device(&devp
->ibdev
);
423 idr_init(&devp
->cqidr
);
424 idr_init(&devp
->qpidr
);
425 idr_init(&devp
->mmidr
);
426 spin_lock_init(&devp
->lock
);
428 if (c4iw_debugfs_root
) {
429 devp
->debugfs_root
= debugfs_create_dir(
430 pci_name(devp
->rdev
.lldi
.pdev
),
437 static void *c4iw_uld_add(const struct cxgb4_lld_info
*infop
)
440 static int vers_printed
;
444 printk(KERN_INFO MOD
"Chelsio T4 RDMA Driver - version %s\n",
447 ctx
= kzalloc(sizeof *ctx
, GFP_KERNEL
);
449 ctx
= ERR_PTR(-ENOMEM
);
454 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
455 __func__
, pci_name(ctx
->lldi
.pdev
),
456 ctx
->lldi
.nchan
, ctx
->lldi
.nrxq
,
457 ctx
->lldi
.ntxq
, ctx
->lldi
.nports
);
459 mutex_lock(&dev_mutex
);
460 list_add_tail(&ctx
->entry
, &uld_ctx_list
);
461 mutex_unlock(&dev_mutex
);
463 for (i
= 0; i
< ctx
->lldi
.nrxq
; i
++)
464 PDBG("rxqid[%u] %u\n", i
, ctx
->lldi
.rxq_ids
[i
]);
469 static int c4iw_uld_rx_handler(void *handle
, const __be64
*rsp
,
470 const struct pkt_gl
*gl
)
472 struct uld_ctx
*ctx
= handle
;
473 struct c4iw_dev
*dev
= ctx
->dev
;
475 const struct cpl_act_establish
*rpl
;
479 /* omit RSS and rsp_ctrl at end of descriptor */
480 unsigned int len
= 64 - sizeof(struct rsp_ctrl
) - 8;
482 skb
= alloc_skb(256, GFP_ATOMIC
);
486 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
487 } else if (gl
== CXGB4_MSG_AN
) {
488 const struct rsp_ctrl
*rc
= (void *)rsp
;
490 u32 qid
= be32_to_cpu(rc
->pldbuflen_qid
);
491 c4iw_ev_handler(dev
, qid
);
494 skb
= cxgb4_pktgl_to_skb(gl
, 128, 128);
500 opcode
= rpl
->ot
.opcode
;
502 if (c4iw_handlers
[opcode
])
503 c4iw_handlers
[opcode
](dev
, skb
);
505 printk(KERN_INFO
"%s no handler opcode 0x%x...\n", __func__
,
513 static int c4iw_uld_state_change(void *handle
, enum cxgb4_state new_state
)
515 struct uld_ctx
*ctx
= handle
;
517 PDBG("%s new_state %u\n", __func__
, new_state
);
520 printk(KERN_INFO MOD
"%s: Up\n", pci_name(ctx
->lldi
.pdev
));
524 ctx
->dev
= c4iw_alloc(&ctx
->lldi
);
525 if (!IS_ERR(ctx
->dev
))
526 ret
= c4iw_register_device(ctx
->dev
);
527 if (IS_ERR(ctx
->dev
) || ret
)
529 "%s: RDMA registration failed: %d\n",
530 pci_name(ctx
->lldi
.pdev
), ret
);
533 case CXGB4_STATE_DOWN
:
534 printk(KERN_INFO MOD
"%s: Down\n",
535 pci_name(ctx
->lldi
.pdev
));
539 case CXGB4_STATE_START_RECOVERY
:
540 printk(KERN_INFO MOD
"%s: Fatal Error\n",
541 pci_name(ctx
->lldi
.pdev
));
543 struct ib_event event
;
545 ctx
->dev
->rdev
.flags
|= T4_FATAL_ERROR
;
546 memset(&event
, 0, sizeof event
);
547 event
.event
= IB_EVENT_DEVICE_FATAL
;
548 event
.device
= &ctx
->dev
->ibdev
;
549 ib_dispatch_event(&event
);
553 case CXGB4_STATE_DETACH
:
554 printk(KERN_INFO MOD
"%s: Detach\n",
555 pci_name(ctx
->lldi
.pdev
));
563 static struct cxgb4_uld_info c4iw_uld_info
= {
566 .rx_handler
= c4iw_uld_rx_handler
,
567 .state_change
= c4iw_uld_state_change
,
570 static int __init
c4iw_init_module(void)
574 err
= c4iw_cm_init();
578 c4iw_debugfs_root
= debugfs_create_dir(DRV_NAME
, NULL
);
579 if (!c4iw_debugfs_root
)
580 printk(KERN_WARNING MOD
581 "could not create debugfs entry, continuing\n");
583 cxgb4_register_uld(CXGB4_ULD_RDMA
, &c4iw_uld_info
);
588 static void __exit
c4iw_exit_module(void)
590 struct uld_ctx
*ctx
, *tmp
;
592 mutex_lock(&dev_mutex
);
593 list_for_each_entry_safe(ctx
, tmp
, &uld_ctx_list
, entry
) {
598 mutex_unlock(&dev_mutex
);
599 cxgb4_unregister_uld(CXGB4_ULD_RDMA
);
601 debugfs_remove_recursive(c4iw_debugfs_root
);
604 module_init(c4iw_init_module
);
605 module_exit(c4iw_exit_module
);