2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
36 #include <rdma/ib_verbs.h>
40 #define DRV_VERSION "0.1"
42 MODULE_AUTHOR("Steve Wise");
43 MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
44 MODULE_LICENSE("Dual BSD/GPL");
45 MODULE_VERSION(DRV_VERSION
);
47 static LIST_HEAD(uld_ctx_list
);
48 static DEFINE_MUTEX(dev_mutex
);
50 static struct dentry
*c4iw_debugfs_root
;
52 struct c4iw_debugfs_data
{
53 struct c4iw_dev
*devp
;
59 static int count_idrs(int id
, void *p
, void *data
)
63 *countp
= *countp
+ 1;
67 static ssize_t
debugfs_read(struct file
*file
, char __user
*buf
, size_t count
,
70 struct c4iw_debugfs_data
*d
= file
->private_data
;
72 return simple_read_from_buffer(buf
, count
, ppos
, d
->buf
, d
->pos
);
75 static int dump_qp(int id
, void *p
, void *data
)
77 struct c4iw_qp
*qp
= p
;
78 struct c4iw_debugfs_data
*qpd
= data
;
82 if (id
!= qp
->wq
.sq
.qid
)
85 space
= qpd
->bufsize
- qpd
->pos
- 1;
90 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
91 "qp sq id %u rq id %u state %u onchip %u "
92 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
93 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
, (int)qp
->attr
.state
,
94 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
,
95 qp
->ep
->hwtid
, (int)qp
->ep
->com
.state
,
96 &qp
->ep
->com
.local_addr
.sin_addr
.s_addr
,
97 ntohs(qp
->ep
->com
.local_addr
.sin_port
),
98 &qp
->ep
->com
.remote_addr
.sin_addr
.s_addr
,
99 ntohs(qp
->ep
->com
.remote_addr
.sin_port
));
101 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
102 "qp sq id %u rq id %u state %u onchip %u\n",
103 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
105 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
);
111 static int qp_release(struct inode
*inode
, struct file
*file
)
113 struct c4iw_debugfs_data
*qpd
= file
->private_data
;
115 printk(KERN_INFO
"%s null qpd?\n", __func__
);
123 static int qp_open(struct inode
*inode
, struct file
*file
)
125 struct c4iw_debugfs_data
*qpd
;
129 qpd
= kmalloc(sizeof *qpd
, GFP_KERNEL
);
134 qpd
->devp
= inode
->i_private
;
137 spin_lock_irq(&qpd
->devp
->lock
);
138 idr_for_each(&qpd
->devp
->qpidr
, count_idrs
, &count
);
139 spin_unlock_irq(&qpd
->devp
->lock
);
141 qpd
->bufsize
= count
* 128;
142 qpd
->buf
= kmalloc(qpd
->bufsize
, GFP_KERNEL
);
148 spin_lock_irq(&qpd
->devp
->lock
);
149 idr_for_each(&qpd
->devp
->qpidr
, dump_qp
, qpd
);
150 spin_unlock_irq(&qpd
->devp
->lock
);
152 qpd
->buf
[qpd
->pos
++] = 0;
153 file
->private_data
= qpd
;
161 static const struct file_operations qp_debugfs_fops
= {
162 .owner
= THIS_MODULE
,
164 .release
= qp_release
,
165 .read
= debugfs_read
,
166 .llseek
= default_llseek
,
169 static int dump_stag(int id
, void *p
, void *data
)
171 struct c4iw_debugfs_data
*stagd
= data
;
175 space
= stagd
->bufsize
- stagd
->pos
- 1;
179 cc
= snprintf(stagd
->buf
+ stagd
->pos
, space
, "0x%x\n", id
<<8);
185 static int stag_release(struct inode
*inode
, struct file
*file
)
187 struct c4iw_debugfs_data
*stagd
= file
->private_data
;
189 printk(KERN_INFO
"%s null stagd?\n", __func__
);
197 static int stag_open(struct inode
*inode
, struct file
*file
)
199 struct c4iw_debugfs_data
*stagd
;
203 stagd
= kmalloc(sizeof *stagd
, GFP_KERNEL
);
208 stagd
->devp
= inode
->i_private
;
211 spin_lock_irq(&stagd
->devp
->lock
);
212 idr_for_each(&stagd
->devp
->mmidr
, count_idrs
, &count
);
213 spin_unlock_irq(&stagd
->devp
->lock
);
215 stagd
->bufsize
= count
* sizeof("0x12345678\n");
216 stagd
->buf
= kmalloc(stagd
->bufsize
, GFP_KERNEL
);
222 spin_lock_irq(&stagd
->devp
->lock
);
223 idr_for_each(&stagd
->devp
->mmidr
, dump_stag
, stagd
);
224 spin_unlock_irq(&stagd
->devp
->lock
);
226 stagd
->buf
[stagd
->pos
++] = 0;
227 file
->private_data
= stagd
;
235 static const struct file_operations stag_debugfs_fops
= {
236 .owner
= THIS_MODULE
,
238 .release
= stag_release
,
239 .read
= debugfs_read
,
240 .llseek
= default_llseek
,
243 static int setup_debugfs(struct c4iw_dev
*devp
)
247 if (!devp
->debugfs_root
)
250 de
= debugfs_create_file("qps", S_IWUSR
, devp
->debugfs_root
,
251 (void *)devp
, &qp_debugfs_fops
);
252 if (de
&& de
->d_inode
)
253 de
->d_inode
->i_size
= 4096;
255 de
= debugfs_create_file("stags", S_IWUSR
, devp
->debugfs_root
,
256 (void *)devp
, &stag_debugfs_fops
);
257 if (de
&& de
->d_inode
)
258 de
->d_inode
->i_size
= 4096;
262 void c4iw_release_dev_ucontext(struct c4iw_rdev
*rdev
,
263 struct c4iw_dev_ucontext
*uctx
)
265 struct list_head
*pos
, *nxt
;
266 struct c4iw_qid_list
*entry
;
268 mutex_lock(&uctx
->lock
);
269 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
270 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
271 list_del_init(&entry
->entry
);
272 if (!(entry
->qid
& rdev
->qpmask
))
273 c4iw_put_resource(&rdev
->resource
.qid_fifo
, entry
->qid
,
274 &rdev
->resource
.qid_fifo_lock
);
278 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
279 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
280 list_del_init(&entry
->entry
);
283 mutex_unlock(&uctx
->lock
);
286 void c4iw_init_dev_ucontext(struct c4iw_rdev
*rdev
,
287 struct c4iw_dev_ucontext
*uctx
)
289 INIT_LIST_HEAD(&uctx
->qpids
);
290 INIT_LIST_HEAD(&uctx
->cqids
);
291 mutex_init(&uctx
->lock
);
294 /* Caller takes care of locking if needed */
295 static int c4iw_rdev_open(struct c4iw_rdev
*rdev
)
299 c4iw_init_dev_ucontext(rdev
, &rdev
->uctx
);
302 * qpshift is the number of bits to shift the qpid left in order
303 * to get the correct address of the doorbell for that qp.
305 rdev
->qpshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.udb_density
);
306 rdev
->qpmask
= rdev
->lldi
.udb_density
- 1;
307 rdev
->cqshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.ucq_density
);
308 rdev
->cqmask
= rdev
->lldi
.ucq_density
- 1;
309 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
310 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
311 "qp qid start %u size %u cq qid start %u size %u\n",
312 __func__
, pci_name(rdev
->lldi
.pdev
), rdev
->lldi
.vr
->stag
.start
,
313 rdev
->lldi
.vr
->stag
.size
, c4iw_num_stags(rdev
),
314 rdev
->lldi
.vr
->pbl
.start
,
315 rdev
->lldi
.vr
->pbl
.size
, rdev
->lldi
.vr
->rq
.start
,
316 rdev
->lldi
.vr
->rq
.size
,
317 rdev
->lldi
.vr
->qp
.start
,
318 rdev
->lldi
.vr
->qp
.size
,
319 rdev
->lldi
.vr
->cq
.start
,
320 rdev
->lldi
.vr
->cq
.size
);
321 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
322 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
323 (unsigned)pci_resource_len(rdev
->lldi
.pdev
, 2),
324 (void *)pci_resource_start(rdev
->lldi
.pdev
, 2),
327 rdev
->qpshift
, rdev
->qpmask
,
328 rdev
->cqshift
, rdev
->cqmask
);
330 if (c4iw_num_stags(rdev
) == 0) {
335 err
= c4iw_init_resource(rdev
, c4iw_num_stags(rdev
), T4_MAX_NUM_PD
);
337 printk(KERN_ERR MOD
"error %d initializing resources\n", err
);
340 err
= c4iw_pblpool_create(rdev
);
342 printk(KERN_ERR MOD
"error %d initializing pbl pool\n", err
);
345 err
= c4iw_rqtpool_create(rdev
);
347 printk(KERN_ERR MOD
"error %d initializing rqt pool\n", err
);
350 err
= c4iw_ocqp_pool_create(rdev
);
352 printk(KERN_ERR MOD
"error %d initializing ocqp pool\n", err
);
357 c4iw_rqtpool_destroy(rdev
);
359 c4iw_pblpool_destroy(rdev
);
361 c4iw_destroy_resource(&rdev
->resource
);
366 static void c4iw_rdev_close(struct c4iw_rdev
*rdev
)
368 c4iw_pblpool_destroy(rdev
);
369 c4iw_rqtpool_destroy(rdev
);
370 c4iw_destroy_resource(&rdev
->resource
);
374 struct list_head entry
;
375 struct cxgb4_lld_info lldi
;
376 struct c4iw_dev
*dev
;
379 static void c4iw_dealloc(struct uld_ctx
*ctx
)
381 c4iw_rdev_close(&ctx
->dev
->rdev
);
382 idr_destroy(&ctx
->dev
->cqidr
);
383 idr_destroy(&ctx
->dev
->qpidr
);
384 idr_destroy(&ctx
->dev
->mmidr
);
385 iounmap(ctx
->dev
->rdev
.oc_mw_kva
);
386 ib_dealloc_device(&ctx
->dev
->ibdev
);
390 static void c4iw_remove(struct uld_ctx
*ctx
)
392 PDBG("%s c4iw_dev %p\n", __func__
, ctx
->dev
);
393 c4iw_unregister_device(ctx
->dev
);
397 static int rdma_supported(const struct cxgb4_lld_info
*infop
)
399 return infop
->vr
->stag
.size
> 0 && infop
->vr
->pbl
.size
> 0 &&
400 infop
->vr
->rq
.size
> 0 && infop
->vr
->qp
.size
> 0 &&
401 infop
->vr
->cq
.size
> 0 && infop
->vr
->ocq
.size
> 0;
404 static struct c4iw_dev
*c4iw_alloc(const struct cxgb4_lld_info
*infop
)
406 struct c4iw_dev
*devp
;
409 if (!rdma_supported(infop
)) {
410 printk(KERN_INFO MOD
"%s: RDMA not supported on this device.\n",
411 pci_name(infop
->pdev
));
412 return ERR_PTR(-ENOSYS
);
414 devp
= (struct c4iw_dev
*)ib_alloc_device(sizeof(*devp
));
416 printk(KERN_ERR MOD
"Cannot allocate ib device\n");
417 return ERR_PTR(-ENOMEM
);
419 devp
->rdev
.lldi
= *infop
;
421 devp
->rdev
.oc_mw_pa
= pci_resource_start(devp
->rdev
.lldi
.pdev
, 2) +
422 (pci_resource_len(devp
->rdev
.lldi
.pdev
, 2) -
423 roundup_pow_of_two(devp
->rdev
.lldi
.vr
->ocq
.size
));
424 devp
->rdev
.oc_mw_kva
= ioremap_wc(devp
->rdev
.oc_mw_pa
,
425 devp
->rdev
.lldi
.vr
->ocq
.size
);
427 PDBG(KERN_INFO MOD
"ocq memory: "
428 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
429 devp
->rdev
.lldi
.vr
->ocq
.start
, devp
->rdev
.lldi
.vr
->ocq
.size
,
430 devp
->rdev
.oc_mw_pa
, devp
->rdev
.oc_mw_kva
);
432 ret
= c4iw_rdev_open(&devp
->rdev
);
434 printk(KERN_ERR MOD
"Unable to open CXIO rdev err %d\n", ret
);
435 ib_dealloc_device(&devp
->ibdev
);
439 idr_init(&devp
->cqidr
);
440 idr_init(&devp
->qpidr
);
441 idr_init(&devp
->mmidr
);
442 spin_lock_init(&devp
->lock
);
444 if (c4iw_debugfs_root
) {
445 devp
->debugfs_root
= debugfs_create_dir(
446 pci_name(devp
->rdev
.lldi
.pdev
),
453 static void *c4iw_uld_add(const struct cxgb4_lld_info
*infop
)
456 static int vers_printed
;
460 printk(KERN_INFO MOD
"Chelsio T4 RDMA Driver - version %s\n",
463 ctx
= kzalloc(sizeof *ctx
, GFP_KERNEL
);
465 ctx
= ERR_PTR(-ENOMEM
);
470 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
471 __func__
, pci_name(ctx
->lldi
.pdev
),
472 ctx
->lldi
.nchan
, ctx
->lldi
.nrxq
,
473 ctx
->lldi
.ntxq
, ctx
->lldi
.nports
);
475 mutex_lock(&dev_mutex
);
476 list_add_tail(&ctx
->entry
, &uld_ctx_list
);
477 mutex_unlock(&dev_mutex
);
479 for (i
= 0; i
< ctx
->lldi
.nrxq
; i
++)
480 PDBG("rxqid[%u] %u\n", i
, ctx
->lldi
.rxq_ids
[i
]);
485 static int c4iw_uld_rx_handler(void *handle
, const __be64
*rsp
,
486 const struct pkt_gl
*gl
)
488 struct uld_ctx
*ctx
= handle
;
489 struct c4iw_dev
*dev
= ctx
->dev
;
491 const struct cpl_act_establish
*rpl
;
495 /* omit RSS and rsp_ctrl at end of descriptor */
496 unsigned int len
= 64 - sizeof(struct rsp_ctrl
) - 8;
498 skb
= alloc_skb(256, GFP_ATOMIC
);
502 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
503 } else if (gl
== CXGB4_MSG_AN
) {
504 const struct rsp_ctrl
*rc
= (void *)rsp
;
506 u32 qid
= be32_to_cpu(rc
->pldbuflen_qid
);
507 c4iw_ev_handler(dev
, qid
);
510 skb
= cxgb4_pktgl_to_skb(gl
, 128, 128);
516 opcode
= rpl
->ot
.opcode
;
518 if (c4iw_handlers
[opcode
])
519 c4iw_handlers
[opcode
](dev
, skb
);
521 printk(KERN_INFO
"%s no handler opcode 0x%x...\n", __func__
,
529 static int c4iw_uld_state_change(void *handle
, enum cxgb4_state new_state
)
531 struct uld_ctx
*ctx
= handle
;
533 PDBG("%s new_state %u\n", __func__
, new_state
);
536 printk(KERN_INFO MOD
"%s: Up\n", pci_name(ctx
->lldi
.pdev
));
540 ctx
->dev
= c4iw_alloc(&ctx
->lldi
);
541 if (IS_ERR(ctx
->dev
)) {
543 "%s: initialization failed: %ld\n",
544 pci_name(ctx
->lldi
.pdev
),
549 ret
= c4iw_register_device(ctx
->dev
);
552 "%s: RDMA registration failed: %d\n",
553 pci_name(ctx
->lldi
.pdev
), ret
);
558 case CXGB4_STATE_DOWN
:
559 printk(KERN_INFO MOD
"%s: Down\n",
560 pci_name(ctx
->lldi
.pdev
));
564 case CXGB4_STATE_START_RECOVERY
:
565 printk(KERN_INFO MOD
"%s: Fatal Error\n",
566 pci_name(ctx
->lldi
.pdev
));
568 struct ib_event event
;
570 ctx
->dev
->rdev
.flags
|= T4_FATAL_ERROR
;
571 memset(&event
, 0, sizeof event
);
572 event
.event
= IB_EVENT_DEVICE_FATAL
;
573 event
.device
= &ctx
->dev
->ibdev
;
574 ib_dispatch_event(&event
);
578 case CXGB4_STATE_DETACH
:
579 printk(KERN_INFO MOD
"%s: Detach\n",
580 pci_name(ctx
->lldi
.pdev
));
588 static struct cxgb4_uld_info c4iw_uld_info
= {
591 .rx_handler
= c4iw_uld_rx_handler
,
592 .state_change
= c4iw_uld_state_change
,
595 static int __init
c4iw_init_module(void)
599 err
= c4iw_cm_init();
603 c4iw_debugfs_root
= debugfs_create_dir(DRV_NAME
, NULL
);
604 if (!c4iw_debugfs_root
)
605 printk(KERN_WARNING MOD
606 "could not create debugfs entry, continuing\n");
608 cxgb4_register_uld(CXGB4_ULD_RDMA
, &c4iw_uld_info
);
613 static void __exit
c4iw_exit_module(void)
615 struct uld_ctx
*ctx
, *tmp
;
617 mutex_lock(&dev_mutex
);
618 list_for_each_entry_safe(ctx
, tmp
, &uld_ctx_list
, entry
) {
623 mutex_unlock(&dev_mutex
);
624 cxgb4_unregister_uld(CXGB4_ULD_RDMA
);
626 debugfs_remove_recursive(c4iw_debugfs_root
);
629 module_init(c4iw_init_module
);
630 module_exit(c4iw_exit_module
);