2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
37 #include <rdma/ib_verbs.h>
41 #define DRV_VERSION "0.1"
43 MODULE_AUTHOR("Steve Wise");
44 MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
45 MODULE_LICENSE("Dual BSD/GPL");
46 MODULE_VERSION(DRV_VERSION
);
48 static int allow_db_fc_on_t5
;
49 module_param(allow_db_fc_on_t5
, int, 0644);
50 MODULE_PARM_DESC(allow_db_fc_on_t5
,
51 "Allow DB Flow Control on T5 (default = 0)");
53 static int allow_db_coalescing_on_t5
;
54 module_param(allow_db_coalescing_on_t5
, int, 0644);
55 MODULE_PARM_DESC(allow_db_coalescing_on_t5
,
56 "Allow DB Coalescing on T5 (default = 0)");
59 struct list_head entry
;
60 struct cxgb4_lld_info lldi
;
64 static LIST_HEAD(uld_ctx_list
);
65 static DEFINE_MUTEX(dev_mutex
);
67 static struct dentry
*c4iw_debugfs_root
;
69 struct c4iw_debugfs_data
{
70 struct c4iw_dev
*devp
;
76 static int count_idrs(int id
, void *p
, void *data
)
80 *countp
= *countp
+ 1;
84 static ssize_t
debugfs_read(struct file
*file
, char __user
*buf
, size_t count
,
87 struct c4iw_debugfs_data
*d
= file
->private_data
;
89 return simple_read_from_buffer(buf
, count
, ppos
, d
->buf
, d
->pos
);
92 static int dump_qp(int id
, void *p
, void *data
)
94 struct c4iw_qp
*qp
= p
;
95 struct c4iw_debugfs_data
*qpd
= data
;
99 if (id
!= qp
->wq
.sq
.qid
)
102 space
= qpd
->bufsize
- qpd
->pos
- 1;
107 if (qp
->ep
->com
.local_addr
.ss_family
== AF_INET
) {
108 struct sockaddr_in
*lsin
= (struct sockaddr_in
*)
109 &qp
->ep
->com
.local_addr
;
110 struct sockaddr_in
*rsin
= (struct sockaddr_in
*)
111 &qp
->ep
->com
.remote_addr
;
113 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
114 "rc qp sq id %u rq id %u state %u "
115 "onchip %u ep tid %u state %u "
116 "%pI4:%u->%pI4:%u\n",
117 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
119 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
,
120 qp
->ep
->hwtid
, (int)qp
->ep
->com
.state
,
121 &lsin
->sin_addr
, ntohs(lsin
->sin_port
),
122 &rsin
->sin_addr
, ntohs(rsin
->sin_port
));
124 struct sockaddr_in6
*lsin6
= (struct sockaddr_in6
*)
125 &qp
->ep
->com
.local_addr
;
126 struct sockaddr_in6
*rsin6
= (struct sockaddr_in6
*)
127 &qp
->ep
->com
.remote_addr
;
129 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
130 "rc qp sq id %u rq id %u state %u "
131 "onchip %u ep tid %u state %u "
132 "%pI6:%u->%pI6:%u\n",
133 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
135 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
,
136 qp
->ep
->hwtid
, (int)qp
->ep
->com
.state
,
138 ntohs(lsin6
->sin6_port
),
140 ntohs(rsin6
->sin6_port
));
143 cc
= snprintf(qpd
->buf
+ qpd
->pos
, space
,
144 "qp sq id %u rq id %u state %u onchip %u\n",
145 qp
->wq
.sq
.qid
, qp
->wq
.rq
.qid
,
147 qp
->wq
.sq
.flags
& T4_SQ_ONCHIP
);
153 static int qp_release(struct inode
*inode
, struct file
*file
)
155 struct c4iw_debugfs_data
*qpd
= file
->private_data
;
157 printk(KERN_INFO
"%s null qpd?\n", __func__
);
165 static int qp_open(struct inode
*inode
, struct file
*file
)
167 struct c4iw_debugfs_data
*qpd
;
171 qpd
= kmalloc(sizeof *qpd
, GFP_KERNEL
);
176 qpd
->devp
= inode
->i_private
;
179 spin_lock_irq(&qpd
->devp
->lock
);
180 idr_for_each(&qpd
->devp
->qpidr
, count_idrs
, &count
);
181 spin_unlock_irq(&qpd
->devp
->lock
);
183 qpd
->bufsize
= count
* 128;
184 qpd
->buf
= vmalloc(qpd
->bufsize
);
190 spin_lock_irq(&qpd
->devp
->lock
);
191 idr_for_each(&qpd
->devp
->qpidr
, dump_qp
, qpd
);
192 spin_unlock_irq(&qpd
->devp
->lock
);
194 qpd
->buf
[qpd
->pos
++] = 0;
195 file
->private_data
= qpd
;
203 static const struct file_operations qp_debugfs_fops
= {
204 .owner
= THIS_MODULE
,
206 .release
= qp_release
,
207 .read
= debugfs_read
,
208 .llseek
= default_llseek
,
211 static int dump_stag(int id
, void *p
, void *data
)
213 struct c4iw_debugfs_data
*stagd
= data
;
217 space
= stagd
->bufsize
- stagd
->pos
- 1;
221 cc
= snprintf(stagd
->buf
+ stagd
->pos
, space
, "0x%x\n", id
<<8);
227 static int stag_release(struct inode
*inode
, struct file
*file
)
229 struct c4iw_debugfs_data
*stagd
= file
->private_data
;
231 printk(KERN_INFO
"%s null stagd?\n", __func__
);
239 static int stag_open(struct inode
*inode
, struct file
*file
)
241 struct c4iw_debugfs_data
*stagd
;
245 stagd
= kmalloc(sizeof *stagd
, GFP_KERNEL
);
250 stagd
->devp
= inode
->i_private
;
253 spin_lock_irq(&stagd
->devp
->lock
);
254 idr_for_each(&stagd
->devp
->mmidr
, count_idrs
, &count
);
255 spin_unlock_irq(&stagd
->devp
->lock
);
257 stagd
->bufsize
= count
* sizeof("0x12345678\n");
258 stagd
->buf
= kmalloc(stagd
->bufsize
, GFP_KERNEL
);
264 spin_lock_irq(&stagd
->devp
->lock
);
265 idr_for_each(&stagd
->devp
->mmidr
, dump_stag
, stagd
);
266 spin_unlock_irq(&stagd
->devp
->lock
);
268 stagd
->buf
[stagd
->pos
++] = 0;
269 file
->private_data
= stagd
;
277 static const struct file_operations stag_debugfs_fops
= {
278 .owner
= THIS_MODULE
,
280 .release
= stag_release
,
281 .read
= debugfs_read
,
282 .llseek
= default_llseek
,
285 static char *db_state_str
[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"};
287 static int stats_show(struct seq_file
*seq
, void *v
)
289 struct c4iw_dev
*dev
= seq
->private;
291 seq_printf(seq
, " Object: %10s %10s %10s %10s\n", "Total", "Current",
293 seq_printf(seq
, " PDID: %10llu %10llu %10llu %10llu\n",
294 dev
->rdev
.stats
.pd
.total
, dev
->rdev
.stats
.pd
.cur
,
295 dev
->rdev
.stats
.pd
.max
, dev
->rdev
.stats
.pd
.fail
);
296 seq_printf(seq
, " QID: %10llu %10llu %10llu %10llu\n",
297 dev
->rdev
.stats
.qid
.total
, dev
->rdev
.stats
.qid
.cur
,
298 dev
->rdev
.stats
.qid
.max
, dev
->rdev
.stats
.qid
.fail
);
299 seq_printf(seq
, " TPTMEM: %10llu %10llu %10llu %10llu\n",
300 dev
->rdev
.stats
.stag
.total
, dev
->rdev
.stats
.stag
.cur
,
301 dev
->rdev
.stats
.stag
.max
, dev
->rdev
.stats
.stag
.fail
);
302 seq_printf(seq
, " PBLMEM: %10llu %10llu %10llu %10llu\n",
303 dev
->rdev
.stats
.pbl
.total
, dev
->rdev
.stats
.pbl
.cur
,
304 dev
->rdev
.stats
.pbl
.max
, dev
->rdev
.stats
.pbl
.fail
);
305 seq_printf(seq
, " RQTMEM: %10llu %10llu %10llu %10llu\n",
306 dev
->rdev
.stats
.rqt
.total
, dev
->rdev
.stats
.rqt
.cur
,
307 dev
->rdev
.stats
.rqt
.max
, dev
->rdev
.stats
.rqt
.fail
);
308 seq_printf(seq
, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
309 dev
->rdev
.stats
.ocqp
.total
, dev
->rdev
.stats
.ocqp
.cur
,
310 dev
->rdev
.stats
.ocqp
.max
, dev
->rdev
.stats
.ocqp
.fail
);
311 seq_printf(seq
, " DB FULL: %10llu\n", dev
->rdev
.stats
.db_full
);
312 seq_printf(seq
, " DB EMPTY: %10llu\n", dev
->rdev
.stats
.db_empty
);
313 seq_printf(seq
, " DB DROP: %10llu\n", dev
->rdev
.stats
.db_drop
);
314 seq_printf(seq
, " DB State: %s Transitions %llu\n",
315 db_state_str
[dev
->db_state
],
316 dev
->rdev
.stats
.db_state_transitions
);
317 seq_printf(seq
, "TCAM_FULL: %10llu\n", dev
->rdev
.stats
.tcam_full
);
318 seq_printf(seq
, "ACT_OFLD_CONN_FAILS: %10llu\n",
319 dev
->rdev
.stats
.act_ofld_conn_fails
);
320 seq_printf(seq
, "PAS_OFLD_CONN_FAILS: %10llu\n",
321 dev
->rdev
.stats
.pas_ofld_conn_fails
);
325 static int stats_open(struct inode
*inode
, struct file
*file
)
327 return single_open(file
, stats_show
, inode
->i_private
);
330 static ssize_t
stats_clear(struct file
*file
, const char __user
*buf
,
331 size_t count
, loff_t
*pos
)
333 struct c4iw_dev
*dev
= ((struct seq_file
*)file
->private_data
)->private;
335 mutex_lock(&dev
->rdev
.stats
.lock
);
336 dev
->rdev
.stats
.pd
.max
= 0;
337 dev
->rdev
.stats
.pd
.fail
= 0;
338 dev
->rdev
.stats
.qid
.max
= 0;
339 dev
->rdev
.stats
.qid
.fail
= 0;
340 dev
->rdev
.stats
.stag
.max
= 0;
341 dev
->rdev
.stats
.stag
.fail
= 0;
342 dev
->rdev
.stats
.pbl
.max
= 0;
343 dev
->rdev
.stats
.pbl
.fail
= 0;
344 dev
->rdev
.stats
.rqt
.max
= 0;
345 dev
->rdev
.stats
.rqt
.fail
= 0;
346 dev
->rdev
.stats
.ocqp
.max
= 0;
347 dev
->rdev
.stats
.ocqp
.fail
= 0;
348 dev
->rdev
.stats
.db_full
= 0;
349 dev
->rdev
.stats
.db_empty
= 0;
350 dev
->rdev
.stats
.db_drop
= 0;
351 dev
->rdev
.stats
.db_state_transitions
= 0;
352 dev
->rdev
.stats
.tcam_full
= 0;
353 dev
->rdev
.stats
.act_ofld_conn_fails
= 0;
354 dev
->rdev
.stats
.pas_ofld_conn_fails
= 0;
355 mutex_unlock(&dev
->rdev
.stats
.lock
);
359 static const struct file_operations stats_debugfs_fops
= {
360 .owner
= THIS_MODULE
,
362 .release
= single_release
,
365 .write
= stats_clear
,
368 static int dump_ep(int id
, void *p
, void *data
)
370 struct c4iw_ep
*ep
= p
;
371 struct c4iw_debugfs_data
*epd
= data
;
375 space
= epd
->bufsize
- epd
->pos
- 1;
379 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
380 struct sockaddr_in
*lsin
= (struct sockaddr_in
*)
382 struct sockaddr_in
*rsin
= (struct sockaddr_in
*)
383 &ep
->com
.remote_addr
;
385 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
386 "ep %p cm_id %p qp %p state %d flags 0x%lx "
387 "history 0x%lx hwtid %d atid %d "
388 "%pI4:%d <-> %pI4:%d\n",
389 ep
, ep
->com
.cm_id
, ep
->com
.qp
,
390 (int)ep
->com
.state
, ep
->com
.flags
,
391 ep
->com
.history
, ep
->hwtid
, ep
->atid
,
392 &lsin
->sin_addr
, ntohs(lsin
->sin_port
),
393 &rsin
->sin_addr
, ntohs(rsin
->sin_port
));
395 struct sockaddr_in6
*lsin6
= (struct sockaddr_in6
*)
397 struct sockaddr_in6
*rsin6
= (struct sockaddr_in6
*)
398 &ep
->com
.remote_addr
;
400 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
401 "ep %p cm_id %p qp %p state %d flags 0x%lx "
402 "history 0x%lx hwtid %d atid %d "
403 "%pI6:%d <-> %pI6:%d\n",
404 ep
, ep
->com
.cm_id
, ep
->com
.qp
,
405 (int)ep
->com
.state
, ep
->com
.flags
,
406 ep
->com
.history
, ep
->hwtid
, ep
->atid
,
407 &lsin6
->sin6_addr
, ntohs(lsin6
->sin6_port
),
408 &rsin6
->sin6_addr
, ntohs(rsin6
->sin6_port
));
415 static int dump_listen_ep(int id
, void *p
, void *data
)
417 struct c4iw_listen_ep
*ep
= p
;
418 struct c4iw_debugfs_data
*epd
= data
;
422 space
= epd
->bufsize
- epd
->pos
- 1;
426 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
427 struct sockaddr_in
*lsin
= (struct sockaddr_in
*)
430 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
431 "ep %p cm_id %p state %d flags 0x%lx stid %d "
432 "backlog %d %pI4:%d\n",
433 ep
, ep
->com
.cm_id
, (int)ep
->com
.state
,
434 ep
->com
.flags
, ep
->stid
, ep
->backlog
,
435 &lsin
->sin_addr
, ntohs(lsin
->sin_port
));
437 struct sockaddr_in6
*lsin6
= (struct sockaddr_in6
*)
440 cc
= snprintf(epd
->buf
+ epd
->pos
, space
,
441 "ep %p cm_id %p state %d flags 0x%lx stid %d "
442 "backlog %d %pI6:%d\n",
443 ep
, ep
->com
.cm_id
, (int)ep
->com
.state
,
444 ep
->com
.flags
, ep
->stid
, ep
->backlog
,
445 &lsin6
->sin6_addr
, ntohs(lsin6
->sin6_port
));
452 static int ep_release(struct inode
*inode
, struct file
*file
)
454 struct c4iw_debugfs_data
*epd
= file
->private_data
;
456 pr_info("%s null qpd?\n", __func__
);
464 static int ep_open(struct inode
*inode
, struct file
*file
)
466 struct c4iw_debugfs_data
*epd
;
470 epd
= kmalloc(sizeof(*epd
), GFP_KERNEL
);
475 epd
->devp
= inode
->i_private
;
478 spin_lock_irq(&epd
->devp
->lock
);
479 idr_for_each(&epd
->devp
->hwtid_idr
, count_idrs
, &count
);
480 idr_for_each(&epd
->devp
->atid_idr
, count_idrs
, &count
);
481 idr_for_each(&epd
->devp
->stid_idr
, count_idrs
, &count
);
482 spin_unlock_irq(&epd
->devp
->lock
);
484 epd
->bufsize
= count
* 160;
485 epd
->buf
= vmalloc(epd
->bufsize
);
491 spin_lock_irq(&epd
->devp
->lock
);
492 idr_for_each(&epd
->devp
->hwtid_idr
, dump_ep
, epd
);
493 idr_for_each(&epd
->devp
->atid_idr
, dump_ep
, epd
);
494 idr_for_each(&epd
->devp
->stid_idr
, dump_listen_ep
, epd
);
495 spin_unlock_irq(&epd
->devp
->lock
);
497 file
->private_data
= epd
;
505 static const struct file_operations ep_debugfs_fops
= {
506 .owner
= THIS_MODULE
,
508 .release
= ep_release
,
509 .read
= debugfs_read
,
512 static int setup_debugfs(struct c4iw_dev
*devp
)
516 if (!devp
->debugfs_root
)
519 de
= debugfs_create_file("qps", S_IWUSR
, devp
->debugfs_root
,
520 (void *)devp
, &qp_debugfs_fops
);
521 if (de
&& de
->d_inode
)
522 de
->d_inode
->i_size
= 4096;
524 de
= debugfs_create_file("stags", S_IWUSR
, devp
->debugfs_root
,
525 (void *)devp
, &stag_debugfs_fops
);
526 if (de
&& de
->d_inode
)
527 de
->d_inode
->i_size
= 4096;
529 de
= debugfs_create_file("stats", S_IWUSR
, devp
->debugfs_root
,
530 (void *)devp
, &stats_debugfs_fops
);
531 if (de
&& de
->d_inode
)
532 de
->d_inode
->i_size
= 4096;
534 de
= debugfs_create_file("eps", S_IWUSR
, devp
->debugfs_root
,
535 (void *)devp
, &ep_debugfs_fops
);
536 if (de
&& de
->d_inode
)
537 de
->d_inode
->i_size
= 4096;
542 void c4iw_release_dev_ucontext(struct c4iw_rdev
*rdev
,
543 struct c4iw_dev_ucontext
*uctx
)
545 struct list_head
*pos
, *nxt
;
546 struct c4iw_qid_list
*entry
;
548 mutex_lock(&uctx
->lock
);
549 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
550 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
551 list_del_init(&entry
->entry
);
552 if (!(entry
->qid
& rdev
->qpmask
)) {
553 c4iw_put_resource(&rdev
->resource
.qid_table
,
555 mutex_lock(&rdev
->stats
.lock
);
556 rdev
->stats
.qid
.cur
-= rdev
->qpmask
+ 1;
557 mutex_unlock(&rdev
->stats
.lock
);
562 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
563 entry
= list_entry(pos
, struct c4iw_qid_list
, entry
);
564 list_del_init(&entry
->entry
);
567 mutex_unlock(&uctx
->lock
);
570 void c4iw_init_dev_ucontext(struct c4iw_rdev
*rdev
,
571 struct c4iw_dev_ucontext
*uctx
)
573 INIT_LIST_HEAD(&uctx
->qpids
);
574 INIT_LIST_HEAD(&uctx
->cqids
);
575 mutex_init(&uctx
->lock
);
578 /* Caller takes care of locking if needed */
579 static int c4iw_rdev_open(struct c4iw_rdev
*rdev
)
583 c4iw_init_dev_ucontext(rdev
, &rdev
->uctx
);
586 * qpshift is the number of bits to shift the qpid left in order
587 * to get the correct address of the doorbell for that qp.
589 rdev
->qpshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.udb_density
);
590 rdev
->qpmask
= rdev
->lldi
.udb_density
- 1;
591 rdev
->cqshift
= PAGE_SHIFT
- ilog2(rdev
->lldi
.ucq_density
);
592 rdev
->cqmask
= rdev
->lldi
.ucq_density
- 1;
593 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
594 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
595 "qp qid start %u size %u cq qid start %u size %u\n",
596 __func__
, pci_name(rdev
->lldi
.pdev
), rdev
->lldi
.vr
->stag
.start
,
597 rdev
->lldi
.vr
->stag
.size
, c4iw_num_stags(rdev
),
598 rdev
->lldi
.vr
->pbl
.start
,
599 rdev
->lldi
.vr
->pbl
.size
, rdev
->lldi
.vr
->rq
.start
,
600 rdev
->lldi
.vr
->rq
.size
,
601 rdev
->lldi
.vr
->qp
.start
,
602 rdev
->lldi
.vr
->qp
.size
,
603 rdev
->lldi
.vr
->cq
.start
,
604 rdev
->lldi
.vr
->cq
.size
);
605 PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
606 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
607 (unsigned)pci_resource_len(rdev
->lldi
.pdev
, 2),
608 (u64
)pci_resource_start(rdev
->lldi
.pdev
, 2),
611 rdev
->qpshift
, rdev
->qpmask
,
612 rdev
->cqshift
, rdev
->cqmask
);
614 if (c4iw_num_stags(rdev
) == 0) {
619 rdev
->stats
.pd
.total
= T4_MAX_NUM_PD
;
620 rdev
->stats
.stag
.total
= rdev
->lldi
.vr
->stag
.size
;
621 rdev
->stats
.pbl
.total
= rdev
->lldi
.vr
->pbl
.size
;
622 rdev
->stats
.rqt
.total
= rdev
->lldi
.vr
->rq
.size
;
623 rdev
->stats
.ocqp
.total
= rdev
->lldi
.vr
->ocq
.size
;
624 rdev
->stats
.qid
.total
= rdev
->lldi
.vr
->qp
.size
;
626 err
= c4iw_init_resource(rdev
, c4iw_num_stags(rdev
), T4_MAX_NUM_PD
);
628 printk(KERN_ERR MOD
"error %d initializing resources\n", err
);
631 err
= c4iw_pblpool_create(rdev
);
633 printk(KERN_ERR MOD
"error %d initializing pbl pool\n", err
);
636 err
= c4iw_rqtpool_create(rdev
);
638 printk(KERN_ERR MOD
"error %d initializing rqt pool\n", err
);
641 err
= c4iw_ocqp_pool_create(rdev
);
643 printk(KERN_ERR MOD
"error %d initializing ocqp pool\n", err
);
648 c4iw_rqtpool_destroy(rdev
);
650 c4iw_pblpool_destroy(rdev
);
652 c4iw_destroy_resource(&rdev
->resource
);
657 static void c4iw_rdev_close(struct c4iw_rdev
*rdev
)
659 c4iw_pblpool_destroy(rdev
);
660 c4iw_rqtpool_destroy(rdev
);
661 c4iw_destroy_resource(&rdev
->resource
);
664 static void c4iw_dealloc(struct uld_ctx
*ctx
)
666 c4iw_rdev_close(&ctx
->dev
->rdev
);
667 idr_destroy(&ctx
->dev
->cqidr
);
668 idr_destroy(&ctx
->dev
->qpidr
);
669 idr_destroy(&ctx
->dev
->mmidr
);
670 idr_destroy(&ctx
->dev
->hwtid_idr
);
671 idr_destroy(&ctx
->dev
->stid_idr
);
672 idr_destroy(&ctx
->dev
->atid_idr
);
673 iounmap(ctx
->dev
->rdev
.oc_mw_kva
);
674 ib_dealloc_device(&ctx
->dev
->ibdev
);
678 static void c4iw_remove(struct uld_ctx
*ctx
)
680 PDBG("%s c4iw_dev %p\n", __func__
, ctx
->dev
);
681 c4iw_unregister_device(ctx
->dev
);
685 static int rdma_supported(const struct cxgb4_lld_info
*infop
)
687 return infop
->vr
->stag
.size
> 0 && infop
->vr
->pbl
.size
> 0 &&
688 infop
->vr
->rq
.size
> 0 && infop
->vr
->qp
.size
> 0 &&
689 infop
->vr
->cq
.size
> 0;
692 static struct c4iw_dev
*c4iw_alloc(const struct cxgb4_lld_info
*infop
)
694 struct c4iw_dev
*devp
;
697 if (!rdma_supported(infop
)) {
698 printk(KERN_INFO MOD
"%s: RDMA not supported on this device.\n",
699 pci_name(infop
->pdev
));
700 return ERR_PTR(-ENOSYS
);
702 if (!ocqp_supported(infop
))
703 pr_info("%s: On-Chip Queues not supported on this device.\n",
704 pci_name(infop
->pdev
));
706 if (!is_t4(infop
->adapter_type
)) {
707 if (!allow_db_fc_on_t5
) {
708 db_fc_threshold
= 100000;
709 pr_info("DB Flow Control Disabled.\n");
712 if (!allow_db_coalescing_on_t5
) {
713 db_coalescing_threshold
= -1;
714 pr_info("DB Coalescing Disabled.\n");
718 devp
= (struct c4iw_dev
*)ib_alloc_device(sizeof(*devp
));
720 printk(KERN_ERR MOD
"Cannot allocate ib device\n");
721 return ERR_PTR(-ENOMEM
);
723 devp
->rdev
.lldi
= *infop
;
725 devp
->rdev
.oc_mw_pa
= pci_resource_start(devp
->rdev
.lldi
.pdev
, 2) +
726 (pci_resource_len(devp
->rdev
.lldi
.pdev
, 2) -
727 roundup_pow_of_two(devp
->rdev
.lldi
.vr
->ocq
.size
));
728 devp
->rdev
.oc_mw_kva
= ioremap_wc(devp
->rdev
.oc_mw_pa
,
729 devp
->rdev
.lldi
.vr
->ocq
.size
);
731 PDBG(KERN_INFO MOD
"ocq memory: "
732 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
733 devp
->rdev
.lldi
.vr
->ocq
.start
, devp
->rdev
.lldi
.vr
->ocq
.size
,
734 devp
->rdev
.oc_mw_pa
, devp
->rdev
.oc_mw_kva
);
736 ret
= c4iw_rdev_open(&devp
->rdev
);
738 printk(KERN_ERR MOD
"Unable to open CXIO rdev err %d\n", ret
);
739 ib_dealloc_device(&devp
->ibdev
);
743 idr_init(&devp
->cqidr
);
744 idr_init(&devp
->qpidr
);
745 idr_init(&devp
->mmidr
);
746 idr_init(&devp
->hwtid_idr
);
747 idr_init(&devp
->stid_idr
);
748 idr_init(&devp
->atid_idr
);
749 spin_lock_init(&devp
->lock
);
750 mutex_init(&devp
->rdev
.stats
.lock
);
751 mutex_init(&devp
->db_mutex
);
753 if (c4iw_debugfs_root
) {
754 devp
->debugfs_root
= debugfs_create_dir(
755 pci_name(devp
->rdev
.lldi
.pdev
),
762 static void *c4iw_uld_add(const struct cxgb4_lld_info
*infop
)
765 static int vers_printed
;
769 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
772 ctx
= kzalloc(sizeof *ctx
, GFP_KERNEL
);
774 ctx
= ERR_PTR(-ENOMEM
);
779 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
780 __func__
, pci_name(ctx
->lldi
.pdev
),
781 ctx
->lldi
.nchan
, ctx
->lldi
.nrxq
,
782 ctx
->lldi
.ntxq
, ctx
->lldi
.nports
);
784 mutex_lock(&dev_mutex
);
785 list_add_tail(&ctx
->entry
, &uld_ctx_list
);
786 mutex_unlock(&dev_mutex
);
788 for (i
= 0; i
< ctx
->lldi
.nrxq
; i
++)
789 PDBG("rxqid[%u] %u\n", i
, ctx
->lldi
.rxq_ids
[i
]);
794 static inline struct sk_buff
*copy_gl_to_skb_pkt(const struct pkt_gl
*gl
,
801 * Allocate space for cpl_pass_accept_req which will be synthesized by
802 * driver. Once the driver synthesizes the request the skb will go
803 * through the regular cpl_pass_accept_req processing.
804 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
807 skb
= alloc_skb(gl
->tot_len
+ sizeof(struct cpl_pass_accept_req
) +
808 sizeof(struct rss_header
) - pktshift
, GFP_ATOMIC
);
812 __skb_put(skb
, gl
->tot_len
+ sizeof(struct cpl_pass_accept_req
) +
813 sizeof(struct rss_header
) - pktshift
);
816 * This skb will contain:
817 * rss_header from the rspq descriptor (1 flit)
818 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
819 * space for the difference between the size of an
820 * rx_pkt and pass_accept_req cpl (1 flit)
821 * the packet data from the gl
823 skb_copy_to_linear_data(skb
, rsp
, sizeof(struct cpl_pass_accept_req
) +
824 sizeof(struct rss_header
));
825 skb_copy_to_linear_data_offset(skb
, sizeof(struct rss_header
) +
826 sizeof(struct cpl_pass_accept_req
),
828 gl
->tot_len
- pktshift
);
832 static inline int recv_rx_pkt(struct c4iw_dev
*dev
, const struct pkt_gl
*gl
,
835 unsigned int opcode
= *(u8
*)rsp
;
838 if (opcode
!= CPL_RX_PKT
)
841 skb
= copy_gl_to_skb_pkt(gl
, rsp
, dev
->rdev
.lldi
.sge_pktshift
);
845 if (c4iw_handlers
[opcode
] == NULL
) {
846 pr_info("%s no handler opcode 0x%x...\n", __func__
,
851 c4iw_handlers
[opcode
](dev
, skb
);
857 static int c4iw_uld_rx_handler(void *handle
, const __be64
*rsp
,
858 const struct pkt_gl
*gl
)
860 struct uld_ctx
*ctx
= handle
;
861 struct c4iw_dev
*dev
= ctx
->dev
;
866 /* omit RSS and rsp_ctrl at end of descriptor */
867 unsigned int len
= 64 - sizeof(struct rsp_ctrl
) - 8;
869 skb
= alloc_skb(256, GFP_ATOMIC
);
873 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
874 } else if (gl
== CXGB4_MSG_AN
) {
875 const struct rsp_ctrl
*rc
= (void *)rsp
;
877 u32 qid
= be32_to_cpu(rc
->pldbuflen_qid
);
878 c4iw_ev_handler(dev
, qid
);
880 } else if (unlikely(*(u8
*)rsp
!= *(u8
*)gl
->va
)) {
881 if (recv_rx_pkt(dev
, gl
, rsp
))
884 pr_info("%s: unexpected FL contents at %p, " \
885 "RSS %#llx, FL %#llx, len %u\n",
886 pci_name(ctx
->lldi
.pdev
), gl
->va
,
887 (unsigned long long)be64_to_cpu(*rsp
),
888 (unsigned long long)be64_to_cpu(
889 *(__force __be64
*)gl
->va
),
894 skb
= cxgb4_pktgl_to_skb(gl
, 128, 128);
900 if (c4iw_handlers
[opcode
])
901 c4iw_handlers
[opcode
](dev
, skb
);
903 pr_info("%s no handler opcode 0x%x...\n", __func__
,
911 static int c4iw_uld_state_change(void *handle
, enum cxgb4_state new_state
)
913 struct uld_ctx
*ctx
= handle
;
915 PDBG("%s new_state %u\n", __func__
, new_state
);
918 printk(KERN_INFO MOD
"%s: Up\n", pci_name(ctx
->lldi
.pdev
));
922 ctx
->dev
= c4iw_alloc(&ctx
->lldi
);
923 if (IS_ERR(ctx
->dev
)) {
925 "%s: initialization failed: %ld\n",
926 pci_name(ctx
->lldi
.pdev
),
931 ret
= c4iw_register_device(ctx
->dev
);
934 "%s: RDMA registration failed: %d\n",
935 pci_name(ctx
->lldi
.pdev
), ret
);
940 case CXGB4_STATE_DOWN
:
941 printk(KERN_INFO MOD
"%s: Down\n",
942 pci_name(ctx
->lldi
.pdev
));
946 case CXGB4_STATE_START_RECOVERY
:
947 printk(KERN_INFO MOD
"%s: Fatal Error\n",
948 pci_name(ctx
->lldi
.pdev
));
950 struct ib_event event
;
952 ctx
->dev
->rdev
.flags
|= T4_FATAL_ERROR
;
953 memset(&event
, 0, sizeof event
);
954 event
.event
= IB_EVENT_DEVICE_FATAL
;
955 event
.device
= &ctx
->dev
->ibdev
;
956 ib_dispatch_event(&event
);
960 case CXGB4_STATE_DETACH
:
961 printk(KERN_INFO MOD
"%s: Detach\n",
962 pci_name(ctx
->lldi
.pdev
));
970 static int disable_qp_db(int id
, void *p
, void *data
)
972 struct c4iw_qp
*qp
= p
;
974 t4_disable_wq_db(&qp
->wq
);
978 static void stop_queues(struct uld_ctx
*ctx
)
980 spin_lock_irq(&ctx
->dev
->lock
);
981 if (ctx
->dev
->db_state
== NORMAL
) {
982 ctx
->dev
->rdev
.stats
.db_state_transitions
++;
983 ctx
->dev
->db_state
= FLOW_CONTROL
;
984 idr_for_each(&ctx
->dev
->qpidr
, disable_qp_db
, NULL
);
986 spin_unlock_irq(&ctx
->dev
->lock
);
989 static int enable_qp_db(int id
, void *p
, void *data
)
991 struct c4iw_qp
*qp
= p
;
993 t4_enable_wq_db(&qp
->wq
);
997 static void resume_queues(struct uld_ctx
*ctx
)
999 spin_lock_irq(&ctx
->dev
->lock
);
1000 if (ctx
->dev
->qpcnt
<= db_fc_threshold
&&
1001 ctx
->dev
->db_state
== FLOW_CONTROL
) {
1002 ctx
->dev
->db_state
= NORMAL
;
1003 ctx
->dev
->rdev
.stats
.db_state_transitions
++;
1004 idr_for_each(&ctx
->dev
->qpidr
, enable_qp_db
, NULL
);
1006 spin_unlock_irq(&ctx
->dev
->lock
);
1011 struct c4iw_qp
**qps
;
1014 static int add_and_ref_qp(int id
, void *p
, void *data
)
1016 struct qp_list
*qp_listp
= data
;
1017 struct c4iw_qp
*qp
= p
;
1019 c4iw_qp_add_ref(&qp
->ibqp
);
1020 qp_listp
->qps
[qp_listp
->idx
++] = qp
;
1024 static int count_qps(int id
, void *p
, void *data
)
1026 unsigned *countp
= data
;
1031 static void deref_qps(struct qp_list qp_list
)
1035 for (idx
= 0; idx
< qp_list
.idx
; idx
++)
1036 c4iw_qp_rem_ref(&qp_list
.qps
[idx
]->ibqp
);
1039 static void recover_lost_dbs(struct uld_ctx
*ctx
, struct qp_list
*qp_list
)
1044 for (idx
= 0; idx
< qp_list
->idx
; idx
++) {
1045 struct c4iw_qp
*qp
= qp_list
->qps
[idx
];
1047 ret
= cxgb4_sync_txq_pidx(qp
->rhp
->rdev
.lldi
.ports
[0],
1049 t4_sq_host_wq_pidx(&qp
->wq
),
1050 t4_sq_wq_size(&qp
->wq
));
1052 printk(KERN_ERR MOD
"%s: Fatal error - "
1053 "DB overflow recovery failed - "
1054 "error syncing SQ qid %u\n",
1055 pci_name(ctx
->lldi
.pdev
), qp
->wq
.sq
.qid
);
1059 ret
= cxgb4_sync_txq_pidx(qp
->rhp
->rdev
.lldi
.ports
[0],
1061 t4_rq_host_wq_pidx(&qp
->wq
),
1062 t4_rq_wq_size(&qp
->wq
));
1065 printk(KERN_ERR MOD
"%s: Fatal error - "
1066 "DB overflow recovery failed - "
1067 "error syncing RQ qid %u\n",
1068 pci_name(ctx
->lldi
.pdev
), qp
->wq
.rq
.qid
);
1072 /* Wait for the dbfifo to drain */
1073 while (cxgb4_dbfifo_count(qp
->rhp
->rdev
.lldi
.ports
[0], 1) > 0) {
1074 set_current_state(TASK_UNINTERRUPTIBLE
);
1075 schedule_timeout(usecs_to_jiffies(10));
1080 static void recover_queues(struct uld_ctx
*ctx
)
1083 struct qp_list qp_list
;
1086 /* lock out kernel db ringers */
1087 mutex_lock(&ctx
->dev
->db_mutex
);
1089 /* put all queues in to recovery mode */
1090 spin_lock_irq(&ctx
->dev
->lock
);
1091 ctx
->dev
->db_state
= RECOVERY
;
1092 ctx
->dev
->rdev
.stats
.db_state_transitions
++;
1093 idr_for_each(&ctx
->dev
->qpidr
, disable_qp_db
, NULL
);
1094 spin_unlock_irq(&ctx
->dev
->lock
);
1096 /* slow everybody down */
1097 set_current_state(TASK_UNINTERRUPTIBLE
);
1098 schedule_timeout(usecs_to_jiffies(1000));
1100 /* Wait for the dbfifo to completely drain. */
1101 while (cxgb4_dbfifo_count(ctx
->dev
->rdev
.lldi
.ports
[0], 1) > 0) {
1102 set_current_state(TASK_UNINTERRUPTIBLE
);
1103 schedule_timeout(usecs_to_jiffies(10));
1106 /* flush the SGE contexts */
1107 ret
= cxgb4_flush_eq_cache(ctx
->dev
->rdev
.lldi
.ports
[0]);
1109 printk(KERN_ERR MOD
"%s: Fatal error - DB overflow recovery failed\n",
1110 pci_name(ctx
->lldi
.pdev
));
1114 /* Count active queues so we can build a list of queues to recover */
1115 spin_lock_irq(&ctx
->dev
->lock
);
1116 idr_for_each(&ctx
->dev
->qpidr
, count_qps
, &count
);
1118 qp_list
.qps
= kzalloc(count
* sizeof *qp_list
.qps
, GFP_ATOMIC
);
1120 printk(KERN_ERR MOD
"%s: Fatal error - DB overflow recovery failed\n",
1121 pci_name(ctx
->lldi
.pdev
));
1122 spin_unlock_irq(&ctx
->dev
->lock
);
1127 /* add and ref each qp so it doesn't get freed */
1128 idr_for_each(&ctx
->dev
->qpidr
, add_and_ref_qp
, &qp_list
);
1130 spin_unlock_irq(&ctx
->dev
->lock
);
1132 /* now traverse the list in a safe context to recover the db state*/
1133 recover_lost_dbs(ctx
, &qp_list
);
1135 /* we're almost done! deref the qps and clean up */
1139 /* Wait for the dbfifo to completely drain again */
1140 while (cxgb4_dbfifo_count(ctx
->dev
->rdev
.lldi
.ports
[0], 1) > 0) {
1141 set_current_state(TASK_UNINTERRUPTIBLE
);
1142 schedule_timeout(usecs_to_jiffies(10));
1145 /* resume the queues */
1146 spin_lock_irq(&ctx
->dev
->lock
);
1147 if (ctx
->dev
->qpcnt
> db_fc_threshold
)
1148 ctx
->dev
->db_state
= FLOW_CONTROL
;
1150 ctx
->dev
->db_state
= NORMAL
;
1151 idr_for_each(&ctx
->dev
->qpidr
, enable_qp_db
, NULL
);
1153 ctx
->dev
->rdev
.stats
.db_state_transitions
++;
1154 spin_unlock_irq(&ctx
->dev
->lock
);
1157 /* start up kernel db ringers again */
1158 mutex_unlock(&ctx
->dev
->db_mutex
);
1161 static int c4iw_uld_control(void *handle
, enum cxgb4_control control
, ...)
1163 struct uld_ctx
*ctx
= handle
;
1166 case CXGB4_CONTROL_DB_FULL
:
1168 mutex_lock(&ctx
->dev
->rdev
.stats
.lock
);
1169 ctx
->dev
->rdev
.stats
.db_full
++;
1170 mutex_unlock(&ctx
->dev
->rdev
.stats
.lock
);
1172 case CXGB4_CONTROL_DB_EMPTY
:
1174 mutex_lock(&ctx
->dev
->rdev
.stats
.lock
);
1175 ctx
->dev
->rdev
.stats
.db_empty
++;
1176 mutex_unlock(&ctx
->dev
->rdev
.stats
.lock
);
1178 case CXGB4_CONTROL_DB_DROP
:
1179 recover_queues(ctx
);
1180 mutex_lock(&ctx
->dev
->rdev
.stats
.lock
);
1181 ctx
->dev
->rdev
.stats
.db_drop
++;
1182 mutex_unlock(&ctx
->dev
->rdev
.stats
.lock
);
1185 printk(KERN_WARNING MOD
"%s: unknown control cmd %u\n",
1186 pci_name(ctx
->lldi
.pdev
), control
);
1192 static struct cxgb4_uld_info c4iw_uld_info
= {
1194 .add
= c4iw_uld_add
,
1195 .rx_handler
= c4iw_uld_rx_handler
,
1196 .state_change
= c4iw_uld_state_change
,
1197 .control
= c4iw_uld_control
,
1200 static int __init
c4iw_init_module(void)
1204 err
= c4iw_cm_init();
1208 c4iw_debugfs_root
= debugfs_create_dir(DRV_NAME
, NULL
);
1209 if (!c4iw_debugfs_root
)
1210 printk(KERN_WARNING MOD
1211 "could not create debugfs entry, continuing\n");
1213 cxgb4_register_uld(CXGB4_ULD_RDMA
, &c4iw_uld_info
);
1218 static void __exit
c4iw_exit_module(void)
1220 struct uld_ctx
*ctx
, *tmp
;
1222 mutex_lock(&dev_mutex
);
1223 list_for_each_entry_safe(ctx
, tmp
, &uld_ctx_list
, entry
) {
1228 mutex_unlock(&dev_mutex
);
1229 cxgb4_unregister_uld(CXGB4_ULD_RDMA
);
1231 debugfs_remove_recursive(c4iw_debugfs_root
);
1234 module_init(c4iw_init_module
);
1235 module_exit(c4iw_exit_module
);