2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * PCIe NTB Transport Linux driver
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
50 #include <linux/debugfs.h>
51 #include <linux/delay.h>
52 #include <linux/dmaengine.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/errno.h>
55 #include <linux/export.h>
56 #include <linux/interrupt.h>
57 #include <linux/module.h>
58 #include <linux/pci.h>
59 #include <linux/slab.h>
60 #include <linux/types.h>
61 #include <linux/uaccess.h>
62 #include "linux/ntb.h"
63 #include "linux/ntb_transport.h"
65 #define NTB_TRANSPORT_VERSION 4
66 #define NTB_TRANSPORT_VER "4"
67 #define NTB_TRANSPORT_NAME "ntb_transport"
68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
70 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC
);
71 MODULE_VERSION(NTB_TRANSPORT_VER
);
72 MODULE_LICENSE("Dual BSD/GPL");
73 MODULE_AUTHOR("Intel Corporation");
75 static unsigned long max_mw_size
;
76 module_param(max_mw_size
, ulong
, 0644);
77 MODULE_PARM_DESC(max_mw_size
, "Limit size of large memory windows");
79 static unsigned int transport_mtu
= 0x10000;
80 module_param(transport_mtu
, uint
, 0644);
81 MODULE_PARM_DESC(transport_mtu
, "Maximum size of NTB transport packets");
83 static unsigned char max_num_clients
;
84 module_param(max_num_clients
, byte
, 0644);
85 MODULE_PARM_DESC(max_num_clients
, "Maximum number of NTB transport clients");
87 static unsigned int copy_bytes
= 1024;
88 module_param(copy_bytes
, uint
, 0644);
89 MODULE_PARM_DESC(copy_bytes
, "Threshold under which NTB will use the CPU to copy instead of DMA");
92 module_param(use_dma
, bool, 0644);
93 MODULE_PARM_DESC(use_dma
, "Use DMA engine to perform large data copy");
95 static struct dentry
*nt_debugfs_dir
;
97 struct ntb_queue_entry
{
98 /* ntb_queue list reference */
99 struct list_head entry
;
100 /* pointers to data to be transferred */
106 struct ntb_transport_qp
*qp
;
108 struct ntb_payload_header __iomem
*tx_hdr
;
109 struct ntb_payload_header
*rx_hdr
;
118 struct ntb_transport_qp
{
119 struct ntb_transport_ctx
*transport
;
120 struct ntb_dev
*ndev
;
122 struct dma_chan
*dma_chan
;
127 u8 qp_num
; /* Only 64 QP's are allowed. 0-63 */
130 struct ntb_rx_info __iomem
*rx_info
;
131 struct ntb_rx_info
*remote_rx_info
;
133 void (*tx_handler
)(struct ntb_transport_qp
*qp
, void *qp_data
,
134 void *data
, int len
);
135 struct list_head tx_free_q
;
136 spinlock_t ntb_tx_free_q_lock
;
138 dma_addr_t tx_mw_phys
;
139 unsigned int tx_index
;
140 unsigned int tx_max_entry
;
141 unsigned int tx_max_frame
;
143 void (*rx_handler
)(struct ntb_transport_qp
*qp
, void *qp_data
,
144 void *data
, int len
);
145 struct list_head rx_post_q
;
146 struct list_head rx_pend_q
;
147 struct list_head rx_free_q
;
148 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
149 spinlock_t ntb_rx_q_lock
;
151 unsigned int rx_index
;
152 unsigned int rx_max_entry
;
153 unsigned int rx_max_frame
;
154 dma_cookie_t last_cookie
;
155 struct tasklet_struct rxc_db_work
;
157 void (*event_handler
)(void *data
, int status
);
158 struct delayed_work link_work
;
159 struct work_struct link_cleanup
;
161 struct dentry
*debugfs_dir
;
162 struct dentry
*debugfs_stats
;
181 struct ntb_transport_mw
{
182 phys_addr_t phys_addr
;
183 resource_size_t phys_size
;
184 resource_size_t xlat_align
;
185 resource_size_t xlat_align_size
;
193 struct ntb_transport_client_dev
{
194 struct list_head entry
;
195 struct ntb_transport_ctx
*nt
;
199 struct ntb_transport_ctx
{
200 struct list_head entry
;
201 struct list_head client_devs
;
203 struct ntb_dev
*ndev
;
205 struct ntb_transport_mw
*mw_vec
;
206 struct ntb_transport_qp
*qp_vec
;
207 unsigned int mw_count
;
208 unsigned int qp_count
;
213 struct delayed_work link_work
;
214 struct work_struct link_cleanup
;
216 struct dentry
*debugfs_node_dir
;
220 DESC_DONE_FLAG
= BIT(0),
221 LINK_DOWN_FLAG
= BIT(1),
224 struct ntb_payload_header
{
242 #define dev_client_dev(__dev) \
243 container_of((__dev), struct ntb_transport_client_dev, dev)
245 #define drv_client(__drv) \
246 container_of((__drv), struct ntb_transport_client, driver)
248 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
249 #define NTB_QP_DEF_NUM_ENTRIES 100
250 #define NTB_LINK_DOWN_TIMEOUT 10
252 static void ntb_transport_rxc_db(unsigned long data
);
253 static const struct ntb_ctx_ops ntb_transport_ops
;
254 static struct ntb_client ntb_transport_client
;
256 static int ntb_transport_bus_match(struct device
*dev
,
257 struct device_driver
*drv
)
259 return !strncmp(dev_name(dev
), drv
->name
, strlen(drv
->name
));
262 static int ntb_transport_bus_probe(struct device
*dev
)
264 const struct ntb_transport_client
*client
;
269 client
= drv_client(dev
->driver
);
270 rc
= client
->probe(dev
);
277 static int ntb_transport_bus_remove(struct device
*dev
)
279 const struct ntb_transport_client
*client
;
281 client
= drv_client(dev
->driver
);
289 static struct bus_type ntb_transport_bus
= {
290 .name
= "ntb_transport",
291 .match
= ntb_transport_bus_match
,
292 .probe
= ntb_transport_bus_probe
,
293 .remove
= ntb_transport_bus_remove
,
296 static LIST_HEAD(ntb_transport_list
);
298 static int ntb_bus_init(struct ntb_transport_ctx
*nt
)
300 list_add(&nt
->entry
, &ntb_transport_list
);
304 static void ntb_bus_remove(struct ntb_transport_ctx
*nt
)
306 struct ntb_transport_client_dev
*client_dev
, *cd
;
308 list_for_each_entry_safe(client_dev
, cd
, &nt
->client_devs
, entry
) {
309 dev_err(client_dev
->dev
.parent
, "%s still attached to bus, removing\n",
310 dev_name(&client_dev
->dev
));
311 list_del(&client_dev
->entry
);
312 device_unregister(&client_dev
->dev
);
315 list_del(&nt
->entry
);
318 static void ntb_transport_client_release(struct device
*dev
)
320 struct ntb_transport_client_dev
*client_dev
;
322 client_dev
= dev_client_dev(dev
);
327 * ntb_transport_unregister_client_dev - Unregister NTB client device
328 * @device_name: Name of NTB client device
330 * Unregister an NTB client device with the NTB transport layer
332 void ntb_transport_unregister_client_dev(char *device_name
)
334 struct ntb_transport_client_dev
*client
, *cd
;
335 struct ntb_transport_ctx
*nt
;
337 list_for_each_entry(nt
, &ntb_transport_list
, entry
)
338 list_for_each_entry_safe(client
, cd
, &nt
->client_devs
, entry
)
339 if (!strncmp(dev_name(&client
->dev
), device_name
,
340 strlen(device_name
))) {
341 list_del(&client
->entry
);
342 device_unregister(&client
->dev
);
345 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev
);
348 * ntb_transport_register_client_dev - Register NTB client device
349 * @device_name: Name of NTB client device
351 * Register an NTB client device with the NTB transport layer
353 int ntb_transport_register_client_dev(char *device_name
)
355 struct ntb_transport_client_dev
*client_dev
;
356 struct ntb_transport_ctx
*nt
;
360 if (list_empty(&ntb_transport_list
))
363 list_for_each_entry(nt
, &ntb_transport_list
, entry
) {
366 node
= dev_to_node(&nt
->ndev
->dev
);
368 client_dev
= kzalloc_node(sizeof(*client_dev
),
375 dev
= &client_dev
->dev
;
377 /* setup and register client devices */
378 dev_set_name(dev
, "%s%d", device_name
, i
);
379 dev
->bus
= &ntb_transport_bus
;
380 dev
->release
= ntb_transport_client_release
;
381 dev
->parent
= &nt
->ndev
->dev
;
383 rc
= device_register(dev
);
389 list_add_tail(&client_dev
->entry
, &nt
->client_devs
);
396 ntb_transport_unregister_client_dev(device_name
);
400 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev
);
403 * ntb_transport_register_client - Register NTB client driver
404 * @drv: NTB client driver to be registered
406 * Register an NTB client driver with the NTB transport layer
408 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
410 int ntb_transport_register_client(struct ntb_transport_client
*drv
)
412 drv
->driver
.bus
= &ntb_transport_bus
;
414 if (list_empty(&ntb_transport_list
))
417 return driver_register(&drv
->driver
);
419 EXPORT_SYMBOL_GPL(ntb_transport_register_client
);
422 * ntb_transport_unregister_client - Unregister NTB client driver
423 * @drv: NTB client driver to be unregistered
425 * Unregister an NTB client driver with the NTB transport layer
427 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
429 void ntb_transport_unregister_client(struct ntb_transport_client
*drv
)
431 driver_unregister(&drv
->driver
);
433 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client
);
435 static ssize_t
debugfs_read(struct file
*filp
, char __user
*ubuf
, size_t count
,
438 struct ntb_transport_qp
*qp
;
440 ssize_t ret
, out_offset
, out_count
;
442 qp
= filp
->private_data
;
444 if (!qp
|| !qp
->link_is_up
)
449 buf
= kmalloc(out_count
, GFP_KERNEL
);
454 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
456 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
457 "rx_bytes - \t%llu\n", qp
->rx_bytes
);
458 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
459 "rx_pkts - \t%llu\n", qp
->rx_pkts
);
460 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
461 "rx_memcpy - \t%llu\n", qp
->rx_memcpy
);
462 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
463 "rx_async - \t%llu\n", qp
->rx_async
);
464 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
465 "rx_ring_empty - %llu\n", qp
->rx_ring_empty
);
466 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
467 "rx_err_no_buf - %llu\n", qp
->rx_err_no_buf
);
468 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
469 "rx_err_oflow - \t%llu\n", qp
->rx_err_oflow
);
470 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
471 "rx_err_ver - \t%llu\n", qp
->rx_err_ver
);
472 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
473 "rx_buff - \t%p\n", qp
->rx_buff
);
474 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
475 "rx_index - \t%u\n", qp
->rx_index
);
476 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
477 "rx_max_entry - \t%u\n", qp
->rx_max_entry
);
479 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
480 "tx_bytes - \t%llu\n", qp
->tx_bytes
);
481 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
482 "tx_pkts - \t%llu\n", qp
->tx_pkts
);
483 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
484 "tx_memcpy - \t%llu\n", qp
->tx_memcpy
);
485 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
486 "tx_async - \t%llu\n", qp
->tx_async
);
487 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
488 "tx_ring_full - \t%llu\n", qp
->tx_ring_full
);
489 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
490 "tx_err_no_buf - %llu\n", qp
->tx_err_no_buf
);
491 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
492 "tx_mw - \t%p\n", qp
->tx_mw
);
493 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
494 "tx_index - \t%u\n", qp
->tx_index
);
495 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
496 "tx_max_entry - \t%u\n", qp
->tx_max_entry
);
498 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
500 qp
->link_is_up
? "Up" : "Down");
501 if (out_offset
> out_count
)
502 out_offset
= out_count
;
504 ret
= simple_read_from_buffer(ubuf
, count
, offp
, buf
, out_offset
);
509 static const struct file_operations ntb_qp_debugfs_stats
= {
510 .owner
= THIS_MODULE
,
512 .read
= debugfs_read
,
515 static void ntb_list_add(spinlock_t
*lock
, struct list_head
*entry
,
516 struct list_head
*list
)
520 spin_lock_irqsave(lock
, flags
);
521 list_add_tail(entry
, list
);
522 spin_unlock_irqrestore(lock
, flags
);
525 static struct ntb_queue_entry
*ntb_list_rm(spinlock_t
*lock
,
526 struct list_head
*list
)
528 struct ntb_queue_entry
*entry
;
531 spin_lock_irqsave(lock
, flags
);
532 if (list_empty(list
)) {
536 entry
= list_first_entry(list
, struct ntb_queue_entry
, entry
);
537 list_del(&entry
->entry
);
539 spin_unlock_irqrestore(lock
, flags
);
544 static struct ntb_queue_entry
*ntb_list_mv(spinlock_t
*lock
,
545 struct list_head
*list
,
546 struct list_head
*to_list
)
548 struct ntb_queue_entry
*entry
;
551 spin_lock_irqsave(lock
, flags
);
553 if (list_empty(list
)) {
556 entry
= list_first_entry(list
, struct ntb_queue_entry
, entry
);
557 list_move_tail(&entry
->entry
, to_list
);
560 spin_unlock_irqrestore(lock
, flags
);
565 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx
*nt
,
568 struct ntb_transport_qp
*qp
= &nt
->qp_vec
[qp_num
];
569 struct ntb_transport_mw
*mw
;
570 unsigned int rx_size
, num_qps_mw
;
571 unsigned int mw_num
, mw_count
, qp_count
;
574 mw_count
= nt
->mw_count
;
575 qp_count
= nt
->qp_count
;
577 mw_num
= QP_TO_MW(nt
, qp_num
);
578 mw
= &nt
->mw_vec
[mw_num
];
583 if (qp_count
% mw_count
&& mw_num
+ 1 < qp_count
/ mw_count
)
584 num_qps_mw
= qp_count
/ mw_count
+ 1;
586 num_qps_mw
= qp_count
/ mw_count
;
588 rx_size
= (unsigned int)mw
->xlat_size
/ num_qps_mw
;
589 qp
->rx_buff
= mw
->virt_addr
+ rx_size
* qp_num
/ mw_count
;
590 rx_size
-= sizeof(struct ntb_rx_info
);
592 qp
->remote_rx_info
= qp
->rx_buff
+ rx_size
;
594 /* Due to housekeeping, there must be atleast 2 buffs */
595 qp
->rx_max_frame
= min(transport_mtu
, rx_size
/ 2);
596 qp
->rx_max_entry
= rx_size
/ qp
->rx_max_frame
;
599 qp
->remote_rx_info
->entry
= qp
->rx_max_entry
- 1;
601 /* setup the hdr offsets with 0's */
602 for (i
= 0; i
< qp
->rx_max_entry
; i
++) {
603 void *offset
= (qp
->rx_buff
+ qp
->rx_max_frame
* (i
+ 1) -
604 sizeof(struct ntb_payload_header
));
605 memset(offset
, 0, sizeof(struct ntb_payload_header
));
615 static void ntb_free_mw(struct ntb_transport_ctx
*nt
, int num_mw
)
617 struct ntb_transport_mw
*mw
= &nt
->mw_vec
[num_mw
];
618 struct pci_dev
*pdev
= nt
->ndev
->pdev
;
623 ntb_mw_clear_trans(nt
->ndev
, num_mw
);
624 dma_free_coherent(&pdev
->dev
, mw
->buff_size
,
625 mw
->virt_addr
, mw
->dma_addr
);
628 mw
->virt_addr
= NULL
;
631 static int ntb_set_mw(struct ntb_transport_ctx
*nt
, int num_mw
,
632 resource_size_t size
)
634 struct ntb_transport_mw
*mw
= &nt
->mw_vec
[num_mw
];
635 struct pci_dev
*pdev
= nt
->ndev
->pdev
;
636 size_t xlat_size
, buff_size
;
642 xlat_size
= round_up(size
, mw
->xlat_align_size
);
643 buff_size
= round_up(size
, mw
->xlat_align
);
645 /* No need to re-setup */
646 if (mw
->xlat_size
== xlat_size
)
650 ntb_free_mw(nt
, num_mw
);
652 /* Alloc memory for receiving data. Must be aligned */
653 mw
->xlat_size
= xlat_size
;
654 mw
->buff_size
= buff_size
;
656 mw
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, buff_size
,
657 &mw
->dma_addr
, GFP_KERNEL
);
658 if (!mw
->virt_addr
) {
661 dev_err(&pdev
->dev
, "Unable to alloc MW buff of size %zu\n",
667 * we must ensure that the memory address allocated is BAR size
668 * aligned in order for the XLAT register to take the value. This
669 * is a requirement of the hardware. It is recommended to setup CMA
670 * for BAR sizes equal or greater than 4MB.
672 if (!IS_ALIGNED(mw
->dma_addr
, mw
->xlat_align
)) {
673 dev_err(&pdev
->dev
, "DMA memory %pad is not aligned\n",
675 ntb_free_mw(nt
, num_mw
);
679 /* Notify HW the memory location of the receive buffer */
680 rc
= ntb_mw_set_trans(nt
->ndev
, num_mw
, mw
->dma_addr
, mw
->xlat_size
);
682 dev_err(&pdev
->dev
, "Unable to set mw%d translation", num_mw
);
683 ntb_free_mw(nt
, num_mw
);
690 static void ntb_qp_link_down_reset(struct ntb_transport_qp
*qp
)
692 qp
->link_is_up
= false;
698 qp
->rx_ring_empty
= 0;
699 qp
->rx_err_no_buf
= 0;
700 qp
->rx_err_oflow
= 0;
706 qp
->tx_ring_full
= 0;
707 qp
->tx_err_no_buf
= 0;
712 static void ntb_qp_link_cleanup(struct ntb_transport_qp
*qp
)
714 struct ntb_transport_ctx
*nt
= qp
->transport
;
715 struct pci_dev
*pdev
= nt
->ndev
->pdev
;
717 dev_info(&pdev
->dev
, "qp %d: Link Cleanup\n", qp
->qp_num
);
719 cancel_delayed_work_sync(&qp
->link_work
);
720 ntb_qp_link_down_reset(qp
);
722 if (qp
->event_handler
)
723 qp
->event_handler(qp
->cb_data
, qp
->link_is_up
);
726 static void ntb_qp_link_cleanup_work(struct work_struct
*work
)
728 struct ntb_transport_qp
*qp
= container_of(work
,
729 struct ntb_transport_qp
,
731 struct ntb_transport_ctx
*nt
= qp
->transport
;
733 ntb_qp_link_cleanup(qp
);
736 schedule_delayed_work(&qp
->link_work
,
737 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT
));
740 static void ntb_qp_link_down(struct ntb_transport_qp
*qp
)
742 schedule_work(&qp
->link_cleanup
);
745 static void ntb_transport_link_cleanup(struct ntb_transport_ctx
*nt
)
747 struct ntb_transport_qp
*qp
;
751 qp_bitmap_alloc
= nt
->qp_bitmap
& ~nt
->qp_bitmap_free
;
753 /* Pass along the info to any clients */
754 for (i
= 0; i
< nt
->qp_count
; i
++)
755 if (qp_bitmap_alloc
& BIT_ULL(i
)) {
757 ntb_qp_link_cleanup(qp
);
758 cancel_work_sync(&qp
->link_cleanup
);
759 cancel_delayed_work_sync(&qp
->link_work
);
763 cancel_delayed_work_sync(&nt
->link_work
);
765 /* The scratchpad registers keep the values if the remote side
766 * goes down, blast them now to give them a sane value the next
767 * time they are accessed
769 for (i
= 0; i
< MAX_SPAD
; i
++)
770 ntb_spad_write(nt
->ndev
, i
, 0);
773 static void ntb_transport_link_cleanup_work(struct work_struct
*work
)
775 struct ntb_transport_ctx
*nt
=
776 container_of(work
, struct ntb_transport_ctx
, link_cleanup
);
778 ntb_transport_link_cleanup(nt
);
781 static void ntb_transport_event_callback(void *data
)
783 struct ntb_transport_ctx
*nt
= data
;
785 if (ntb_link_is_up(nt
->ndev
, NULL
, NULL
) == 1)
786 schedule_delayed_work(&nt
->link_work
, 0);
788 schedule_work(&nt
->link_cleanup
);
791 static void ntb_transport_link_work(struct work_struct
*work
)
793 struct ntb_transport_ctx
*nt
=
794 container_of(work
, struct ntb_transport_ctx
, link_work
.work
);
795 struct ntb_dev
*ndev
= nt
->ndev
;
796 struct pci_dev
*pdev
= ndev
->pdev
;
797 resource_size_t size
;
801 /* send the local info, in the opposite order of the way we read it */
802 for (i
= 0; i
< nt
->mw_count
; i
++) {
803 size
= nt
->mw_vec
[i
].phys_size
;
805 if (max_mw_size
&& size
> max_mw_size
)
808 spad
= MW0_SZ_HIGH
+ (i
* 2);
809 ntb_peer_spad_write(ndev
, spad
, (u32
)(size
>> 32));
811 spad
= MW0_SZ_LOW
+ (i
* 2);
812 ntb_peer_spad_write(ndev
, spad
, (u32
)size
);
815 ntb_peer_spad_write(ndev
, NUM_MWS
, nt
->mw_count
);
817 ntb_peer_spad_write(ndev
, NUM_QPS
, nt
->qp_count
);
819 ntb_peer_spad_write(ndev
, VERSION
, NTB_TRANSPORT_VERSION
);
821 /* Query the remote side for its info */
822 val
= ntb_spad_read(ndev
, VERSION
);
823 dev_dbg(&pdev
->dev
, "Remote version = %d\n", val
);
824 if (val
!= NTB_TRANSPORT_VERSION
)
827 val
= ntb_spad_read(ndev
, NUM_QPS
);
828 dev_dbg(&pdev
->dev
, "Remote max number of qps = %d\n", val
);
829 if (val
!= nt
->qp_count
)
832 val
= ntb_spad_read(ndev
, NUM_MWS
);
833 dev_dbg(&pdev
->dev
, "Remote number of mws = %d\n", val
);
834 if (val
!= nt
->mw_count
)
837 for (i
= 0; i
< nt
->mw_count
; i
++) {
840 val
= ntb_spad_read(ndev
, MW0_SZ_HIGH
+ (i
* 2));
841 val64
= (u64
)val
<< 32;
843 val
= ntb_spad_read(ndev
, MW0_SZ_LOW
+ (i
* 2));
846 dev_dbg(&pdev
->dev
, "Remote MW%d size = %#llx\n", i
, val64
);
848 rc
= ntb_set_mw(nt
, i
, val64
);
853 nt
->link_is_up
= true;
855 for (i
= 0; i
< nt
->qp_count
; i
++) {
856 struct ntb_transport_qp
*qp
= &nt
->qp_vec
[i
];
858 ntb_transport_setup_qp_mw(nt
, i
);
860 if (qp
->client_ready
)
861 schedule_delayed_work(&qp
->link_work
, 0);
867 for (i
= 0; i
< nt
->mw_count
; i
++)
870 if (ntb_link_is_up(ndev
, NULL
, NULL
) == 1)
871 schedule_delayed_work(&nt
->link_work
,
872 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT
));
875 static void ntb_qp_link_work(struct work_struct
*work
)
877 struct ntb_transport_qp
*qp
= container_of(work
,
878 struct ntb_transport_qp
,
880 struct pci_dev
*pdev
= qp
->ndev
->pdev
;
881 struct ntb_transport_ctx
*nt
= qp
->transport
;
884 WARN_ON(!nt
->link_is_up
);
886 val
= ntb_spad_read(nt
->ndev
, QP_LINKS
);
888 ntb_peer_spad_write(nt
->ndev
, QP_LINKS
, val
| BIT(qp
->qp_num
));
890 /* query remote spad for qp ready bits */
891 ntb_peer_spad_read(nt
->ndev
, QP_LINKS
);
892 dev_dbg_ratelimited(&pdev
->dev
, "Remote QP link status = %x\n", val
);
894 /* See if the remote side is up */
895 if (val
& BIT(qp
->qp_num
)) {
896 dev_info(&pdev
->dev
, "qp %d: Link Up\n", qp
->qp_num
);
897 qp
->link_is_up
= true;
899 if (qp
->event_handler
)
900 qp
->event_handler(qp
->cb_data
, qp
->link_is_up
);
902 tasklet_schedule(&qp
->rxc_db_work
);
903 } else if (nt
->link_is_up
)
904 schedule_delayed_work(&qp
->link_work
,
905 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT
));
908 static int ntb_transport_init_queue(struct ntb_transport_ctx
*nt
,
911 struct ntb_transport_qp
*qp
;
912 struct ntb_transport_mw
*mw
;
914 resource_size_t mw_size
;
915 unsigned int num_qps_mw
, tx_size
;
916 unsigned int mw_num
, mw_count
, qp_count
;
919 mw_count
= nt
->mw_count
;
920 qp_count
= nt
->qp_count
;
922 mw_num
= QP_TO_MW(nt
, qp_num
);
923 mw
= &nt
->mw_vec
[mw_num
];
925 qp
= &nt
->qp_vec
[qp_num
];
929 qp
->client_ready
= false;
930 qp
->event_handler
= NULL
;
931 ntb_qp_link_down_reset(qp
);
933 if (qp_count
% mw_count
&& mw_num
+ 1 < qp_count
/ mw_count
)
934 num_qps_mw
= qp_count
/ mw_count
+ 1;
936 num_qps_mw
= qp_count
/ mw_count
;
938 mw_base
= nt
->mw_vec
[mw_num
].phys_addr
;
939 mw_size
= nt
->mw_vec
[mw_num
].phys_size
;
941 tx_size
= (unsigned int)mw_size
/ num_qps_mw
;
942 qp_offset
= tx_size
* qp_num
/ mw_count
;
944 qp
->tx_mw
= nt
->mw_vec
[mw_num
].vbase
+ qp_offset
;
948 qp
->tx_mw_phys
= mw_base
+ qp_offset
;
952 tx_size
-= sizeof(struct ntb_rx_info
);
953 qp
->rx_info
= qp
->tx_mw
+ tx_size
;
955 /* Due to housekeeping, there must be atleast 2 buffs */
956 qp
->tx_max_frame
= min(transport_mtu
, tx_size
/ 2);
957 qp
->tx_max_entry
= tx_size
/ qp
->tx_max_frame
;
959 if (nt
->debugfs_node_dir
) {
960 char debugfs_name
[4];
962 snprintf(debugfs_name
, 4, "qp%d", qp_num
);
963 qp
->debugfs_dir
= debugfs_create_dir(debugfs_name
,
964 nt
->debugfs_node_dir
);
966 qp
->debugfs_stats
= debugfs_create_file("stats", S_IRUSR
,
968 &ntb_qp_debugfs_stats
);
970 qp
->debugfs_dir
= NULL
;
971 qp
->debugfs_stats
= NULL
;
974 INIT_DELAYED_WORK(&qp
->link_work
, ntb_qp_link_work
);
975 INIT_WORK(&qp
->link_cleanup
, ntb_qp_link_cleanup_work
);
977 spin_lock_init(&qp
->ntb_rx_q_lock
);
978 spin_lock_init(&qp
->ntb_tx_free_q_lock
);
980 INIT_LIST_HEAD(&qp
->rx_post_q
);
981 INIT_LIST_HEAD(&qp
->rx_pend_q
);
982 INIT_LIST_HEAD(&qp
->rx_free_q
);
983 INIT_LIST_HEAD(&qp
->tx_free_q
);
985 tasklet_init(&qp
->rxc_db_work
, ntb_transport_rxc_db
,
991 static int ntb_transport_probe(struct ntb_client
*self
, struct ntb_dev
*ndev
)
993 struct ntb_transport_ctx
*nt
;
994 struct ntb_transport_mw
*mw
;
995 unsigned int mw_count
, qp_count
;
1000 if (ntb_db_is_unsafe(ndev
))
1002 "doorbell is unsafe, proceed anyway...\n");
1003 if (ntb_spad_is_unsafe(ndev
))
1005 "scratchpad is unsafe, proceed anyway...\n");
1007 node
= dev_to_node(&ndev
->dev
);
1009 nt
= kzalloc_node(sizeof(*nt
), GFP_KERNEL
, node
);
1015 mw_count
= ntb_mw_count(ndev
);
1017 nt
->mw_count
= mw_count
;
1019 nt
->mw_vec
= kzalloc_node(mw_count
* sizeof(*nt
->mw_vec
),
1026 for (i
= 0; i
< mw_count
; i
++) {
1027 mw
= &nt
->mw_vec
[i
];
1029 rc
= ntb_mw_get_range(ndev
, i
, &mw
->phys_addr
, &mw
->phys_size
,
1030 &mw
->xlat_align
, &mw
->xlat_align_size
);
1034 mw
->vbase
= ioremap_wc(mw
->phys_addr
, mw
->phys_size
);
1042 mw
->virt_addr
= NULL
;
1046 qp_bitmap
= ntb_db_valid_mask(ndev
);
1048 qp_count
= ilog2(qp_bitmap
);
1049 if (max_num_clients
&& max_num_clients
< qp_count
)
1050 qp_count
= max_num_clients
;
1051 else if (mw_count
< qp_count
)
1052 qp_count
= mw_count
;
1054 qp_bitmap
&= BIT_ULL(qp_count
) - 1;
1056 nt
->qp_count
= qp_count
;
1057 nt
->qp_bitmap
= qp_bitmap
;
1058 nt
->qp_bitmap_free
= qp_bitmap
;
1060 nt
->qp_vec
= kzalloc_node(qp_count
* sizeof(*nt
->qp_vec
),
1067 if (nt_debugfs_dir
) {
1068 nt
->debugfs_node_dir
=
1069 debugfs_create_dir(pci_name(ndev
->pdev
),
1073 for (i
= 0; i
< qp_count
; i
++) {
1074 rc
= ntb_transport_init_queue(nt
, i
);
1079 INIT_DELAYED_WORK(&nt
->link_work
, ntb_transport_link_work
);
1080 INIT_WORK(&nt
->link_cleanup
, ntb_transport_link_cleanup_work
);
1082 rc
= ntb_set_ctx(ndev
, nt
, &ntb_transport_ops
);
1086 INIT_LIST_HEAD(&nt
->client_devs
);
1087 rc
= ntb_bus_init(nt
);
1091 nt
->link_is_up
= false;
1092 ntb_link_enable(ndev
, NTB_SPEED_AUTO
, NTB_WIDTH_AUTO
);
1093 ntb_link_event(ndev
);
1098 ntb_clear_ctx(ndev
);
1105 mw
= &nt
->mw_vec
[i
];
1113 static void ntb_transport_free(struct ntb_client
*self
, struct ntb_dev
*ndev
)
1115 struct ntb_transport_ctx
*nt
= ndev
->ctx
;
1116 struct ntb_transport_qp
*qp
;
1117 u64 qp_bitmap_alloc
;
1120 ntb_transport_link_cleanup(nt
);
1121 cancel_work_sync(&nt
->link_cleanup
);
1122 cancel_delayed_work_sync(&nt
->link_work
);
1124 qp_bitmap_alloc
= nt
->qp_bitmap
& ~nt
->qp_bitmap_free
;
1126 /* verify that all the qp's are freed */
1127 for (i
= 0; i
< nt
->qp_count
; i
++) {
1128 qp
= &nt
->qp_vec
[i
];
1129 if (qp_bitmap_alloc
& BIT_ULL(i
))
1130 ntb_transport_free_queue(qp
);
1131 debugfs_remove_recursive(qp
->debugfs_dir
);
1134 ntb_link_disable(ndev
);
1135 ntb_clear_ctx(ndev
);
1139 for (i
= nt
->mw_count
; i
--; ) {
1141 iounmap(nt
->mw_vec
[i
].vbase
);
1149 static void ntb_complete_rxc(struct ntb_transport_qp
*qp
)
1151 struct ntb_queue_entry
*entry
;
1154 unsigned long irqflags
;
1156 spin_lock_irqsave(&qp
->ntb_rx_q_lock
, irqflags
);
1158 while (!list_empty(&qp
->rx_post_q
)) {
1159 entry
= list_first_entry(&qp
->rx_post_q
,
1160 struct ntb_queue_entry
, entry
);
1161 if (!(entry
->flags
& DESC_DONE_FLAG
))
1164 entry
->rx_hdr
->flags
= 0;
1165 iowrite32(entry
->index
, &qp
->rx_info
->entry
);
1167 cb_data
= entry
->cb_data
;
1170 list_move_tail(&entry
->entry
, &qp
->rx_free_q
);
1172 spin_unlock_irqrestore(&qp
->ntb_rx_q_lock
, irqflags
);
1174 if (qp
->rx_handler
&& qp
->client_ready
)
1175 qp
->rx_handler(qp
, qp
->cb_data
, cb_data
, len
);
1177 spin_lock_irqsave(&qp
->ntb_rx_q_lock
, irqflags
);
1180 spin_unlock_irqrestore(&qp
->ntb_rx_q_lock
, irqflags
);
1183 static void ntb_rx_copy_callback(void *data
)
1185 struct ntb_queue_entry
*entry
= data
;
1187 entry
->flags
|= DESC_DONE_FLAG
;
1189 ntb_complete_rxc(entry
->qp
);
1192 static void ntb_memcpy_rx(struct ntb_queue_entry
*entry
, void *offset
)
1194 void *buf
= entry
->buf
;
1195 size_t len
= entry
->len
;
1197 memcpy(buf
, offset
, len
);
1199 /* Ensure that the data is fully copied out before clearing the flag */
1202 ntb_rx_copy_callback(entry
);
1205 static void ntb_async_rx(struct ntb_queue_entry
*entry
, void *offset
)
1207 struct dma_async_tx_descriptor
*txd
;
1208 struct ntb_transport_qp
*qp
= entry
->qp
;
1209 struct dma_chan
*chan
= qp
->dma_chan
;
1210 struct dma_device
*device
;
1211 size_t pay_off
, buff_off
, len
;
1212 struct dmaengine_unmap_data
*unmap
;
1213 dma_cookie_t cookie
;
1214 void *buf
= entry
->buf
;
1221 if (len
< copy_bytes
)
1224 device
= chan
->device
;
1225 pay_off
= (size_t)offset
& ~PAGE_MASK
;
1226 buff_off
= (size_t)buf
& ~PAGE_MASK
;
1228 if (!is_dma_copy_aligned(device
, pay_off
, buff_off
, len
))
1231 unmap
= dmaengine_get_unmap_data(device
->dev
, 2, GFP_NOWAIT
);
1236 unmap
->addr
[0] = dma_map_page(device
->dev
, virt_to_page(offset
),
1237 pay_off
, len
, DMA_TO_DEVICE
);
1238 if (dma_mapping_error(device
->dev
, unmap
->addr
[0]))
1243 unmap
->addr
[1] = dma_map_page(device
->dev
, virt_to_page(buf
),
1244 buff_off
, len
, DMA_FROM_DEVICE
);
1245 if (dma_mapping_error(device
->dev
, unmap
->addr
[1]))
1248 unmap
->from_cnt
= 1;
1250 txd
= device
->device_prep_dma_memcpy(chan
, unmap
->addr
[1],
1251 unmap
->addr
[0], len
,
1252 DMA_PREP_INTERRUPT
);
1256 txd
->callback
= ntb_rx_copy_callback
;
1257 txd
->callback_param
= entry
;
1258 dma_set_unmap(txd
, unmap
);
1260 cookie
= dmaengine_submit(txd
);
1261 if (dma_submit_error(cookie
))
1264 dmaengine_unmap_put(unmap
);
1266 qp
->last_cookie
= cookie
;
1273 dmaengine_unmap_put(unmap
);
1275 dmaengine_unmap_put(unmap
);
1277 /* If the callbacks come out of order, the writing of the index to the
1278 * last completed will be out of order. This may result in the
1279 * receive stalling forever.
1281 dma_sync_wait(chan
, qp
->last_cookie
);
1283 ntb_memcpy_rx(entry
, offset
);
1287 static int ntb_process_rxc(struct ntb_transport_qp
*qp
)
1289 struct ntb_payload_header
*hdr
;
1290 struct ntb_queue_entry
*entry
;
1293 offset
= qp
->rx_buff
+ qp
->rx_max_frame
* qp
->rx_index
;
1294 hdr
= offset
+ qp
->rx_max_frame
- sizeof(struct ntb_payload_header
);
1296 dev_dbg(&qp
->ndev
->pdev
->dev
, "qp %d: RX ver %u len %d flags %x\n",
1297 qp
->qp_num
, hdr
->ver
, hdr
->len
, hdr
->flags
);
1299 if (!(hdr
->flags
& DESC_DONE_FLAG
)) {
1300 dev_dbg(&qp
->ndev
->pdev
->dev
, "done flag not set\n");
1301 qp
->rx_ring_empty
++;
1305 if (hdr
->flags
& LINK_DOWN_FLAG
) {
1306 dev_dbg(&qp
->ndev
->pdev
->dev
, "link down flag set\n");
1307 ntb_qp_link_down(qp
);
1312 if (hdr
->ver
!= (u32
)qp
->rx_pkts
) {
1313 dev_dbg(&qp
->ndev
->pdev
->dev
,
1314 "version mismatch, expected %llu - got %u\n",
1315 qp
->rx_pkts
, hdr
->ver
);
1320 entry
= ntb_list_mv(&qp
->ntb_rx_q_lock
, &qp
->rx_pend_q
, &qp
->rx_post_q
);
1322 dev_dbg(&qp
->ndev
->pdev
->dev
, "no receive buffer\n");
1323 qp
->rx_err_no_buf
++;
1327 entry
->rx_hdr
= hdr
;
1328 entry
->index
= qp
->rx_index
;
1330 if (hdr
->len
> entry
->len
) {
1331 dev_dbg(&qp
->ndev
->pdev
->dev
,
1332 "receive buffer overflow! Wanted %d got %d\n",
1333 hdr
->len
, entry
->len
);
1337 entry
->flags
|= DESC_DONE_FLAG
;
1339 ntb_complete_rxc(qp
);
1341 dev_dbg(&qp
->ndev
->pdev
->dev
,
1342 "RX OK index %u ver %u size %d into buf size %d\n",
1343 qp
->rx_index
, hdr
->ver
, hdr
->len
, entry
->len
);
1345 qp
->rx_bytes
+= hdr
->len
;
1348 entry
->len
= hdr
->len
;
1350 ntb_async_rx(entry
, offset
);
1354 qp
->rx_index
%= qp
->rx_max_entry
;
1359 static void ntb_transport_rxc_db(unsigned long data
)
1361 struct ntb_transport_qp
*qp
= (void *)data
;
1364 dev_dbg(&qp
->ndev
->pdev
->dev
, "%s: doorbell %d received\n",
1365 __func__
, qp
->qp_num
);
1367 /* Limit the number of packets processed in a single interrupt to
1368 * provide fairness to others
1370 for (i
= 0; i
< qp
->rx_max_entry
; i
++) {
1371 rc
= ntb_process_rxc(qp
);
1376 if (i
&& qp
->dma_chan
)
1377 dma_async_issue_pending(qp
->dma_chan
);
1379 if (i
== qp
->rx_max_entry
) {
1380 /* there is more work to do */
1381 tasklet_schedule(&qp
->rxc_db_work
);
1382 } else if (ntb_db_read(qp
->ndev
) & BIT_ULL(qp
->qp_num
)) {
1383 /* the doorbell bit is set: clear it */
1384 ntb_db_clear(qp
->ndev
, BIT_ULL(qp
->qp_num
));
1385 /* ntb_db_read ensures ntb_db_clear write is committed */
1386 ntb_db_read(qp
->ndev
);
1388 /* an interrupt may have arrived between finishing
1389 * ntb_process_rxc and clearing the doorbell bit:
1390 * there might be some more work to do.
1392 tasklet_schedule(&qp
->rxc_db_work
);
1396 static void ntb_tx_copy_callback(void *data
)
1398 struct ntb_queue_entry
*entry
= data
;
1399 struct ntb_transport_qp
*qp
= entry
->qp
;
1400 struct ntb_payload_header __iomem
*hdr
= entry
->tx_hdr
;
1402 iowrite32(entry
->flags
| DESC_DONE_FLAG
, &hdr
->flags
);
1404 ntb_peer_db_set(qp
->ndev
, BIT_ULL(qp
->qp_num
));
1406 /* The entry length can only be zero if the packet is intended to be a
1407 * "link down" or similar. Since no payload is being sent in these
1408 * cases, there is nothing to add to the completion queue.
1410 if (entry
->len
> 0) {
1411 qp
->tx_bytes
+= entry
->len
;
1414 qp
->tx_handler(qp
, qp
->cb_data
, entry
->cb_data
,
1418 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
, &qp
->tx_free_q
);
1421 static void ntb_memcpy_tx(struct ntb_queue_entry
*entry
, void __iomem
*offset
)
1423 #ifdef ARCH_HAS_NOCACHE_UACCESS
1425 * Using non-temporal mov to improve performance on non-cached
1426 * writes, even though we aren't actually copying from user space.
1428 __copy_from_user_inatomic_nocache(offset
, entry
->buf
, entry
->len
);
1430 memcpy_toio(offset
, entry
->buf
, entry
->len
);
1433 /* Ensure that the data is fully copied out before setting the flags */
1436 ntb_tx_copy_callback(entry
);
1439 static void ntb_async_tx(struct ntb_transport_qp
*qp
,
1440 struct ntb_queue_entry
*entry
)
1442 struct ntb_payload_header __iomem
*hdr
;
1443 struct dma_async_tx_descriptor
*txd
;
1444 struct dma_chan
*chan
= qp
->dma_chan
;
1445 struct dma_device
*device
;
1446 size_t dest_off
, buff_off
;
1447 struct dmaengine_unmap_data
*unmap
;
1449 dma_cookie_t cookie
;
1450 void __iomem
*offset
;
1451 size_t len
= entry
->len
;
1452 void *buf
= entry
->buf
;
1454 offset
= qp
->tx_mw
+ qp
->tx_max_frame
* qp
->tx_index
;
1455 hdr
= offset
+ qp
->tx_max_frame
- sizeof(struct ntb_payload_header
);
1456 entry
->tx_hdr
= hdr
;
1458 iowrite32(entry
->len
, &hdr
->len
);
1459 iowrite32((u32
)qp
->tx_pkts
, &hdr
->ver
);
1464 if (len
< copy_bytes
)
1467 device
= chan
->device
;
1468 dest
= qp
->tx_mw_phys
+ qp
->tx_max_frame
* qp
->tx_index
;
1469 buff_off
= (size_t)buf
& ~PAGE_MASK
;
1470 dest_off
= (size_t)dest
& ~PAGE_MASK
;
1472 if (!is_dma_copy_aligned(device
, buff_off
, dest_off
, len
))
1475 unmap
= dmaengine_get_unmap_data(device
->dev
, 1, GFP_NOWAIT
);
1480 unmap
->addr
[0] = dma_map_page(device
->dev
, virt_to_page(buf
),
1481 buff_off
, len
, DMA_TO_DEVICE
);
1482 if (dma_mapping_error(device
->dev
, unmap
->addr
[0]))
1487 txd
= device
->device_prep_dma_memcpy(chan
, dest
, unmap
->addr
[0], len
,
1488 DMA_PREP_INTERRUPT
);
1492 txd
->callback
= ntb_tx_copy_callback
;
1493 txd
->callback_param
= entry
;
1494 dma_set_unmap(txd
, unmap
);
1496 cookie
= dmaengine_submit(txd
);
1497 if (dma_submit_error(cookie
))
1500 dmaengine_unmap_put(unmap
);
1502 dma_async_issue_pending(chan
);
1507 dmaengine_unmap_put(unmap
);
1509 dmaengine_unmap_put(unmap
);
1511 ntb_memcpy_tx(entry
, offset
);
1515 static int ntb_process_tx(struct ntb_transport_qp
*qp
,
1516 struct ntb_queue_entry
*entry
)
1518 if (qp
->tx_index
== qp
->remote_rx_info
->entry
) {
1523 if (entry
->len
> qp
->tx_max_frame
- sizeof(struct ntb_payload_header
)) {
1525 qp
->tx_handler(qp
->cb_data
, qp
, NULL
, -EIO
);
1527 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
,
1532 ntb_async_tx(qp
, entry
);
1535 qp
->tx_index
%= qp
->tx_max_entry
;
1542 static void ntb_send_link_down(struct ntb_transport_qp
*qp
)
1544 struct pci_dev
*pdev
= qp
->ndev
->pdev
;
1545 struct ntb_queue_entry
*entry
;
1548 if (!qp
->link_is_up
)
1551 dev_info(&pdev
->dev
, "qp %d: Send Link Down\n", qp
->qp_num
);
1553 for (i
= 0; i
< NTB_LINK_DOWN_TIMEOUT
; i
++) {
1554 entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
);
1563 entry
->cb_data
= NULL
;
1566 entry
->flags
= LINK_DOWN_FLAG
;
1568 rc
= ntb_process_tx(qp
, entry
);
1570 dev_err(&pdev
->dev
, "ntb: QP%d unable to send linkdown msg\n",
1573 ntb_qp_link_down_reset(qp
);
1576 static bool ntb_dma_filter_fn(struct dma_chan
*chan
, void *node
)
1578 return dev_to_node(&chan
->dev
->device
) == (int)(unsigned long)node
;
1582 * ntb_transport_create_queue - Create a new NTB transport layer queue
1583 * @rx_handler: receive callback function
1584 * @tx_handler: transmit callback function
1585 * @event_handler: event callback function
1587 * Create a new NTB transport layer queue and provide the queue with a callback
1588 * routine for both transmit and receive. The receive callback routine will be
1589 * used to pass up data when the transport has received it on the queue. The
1590 * transmit callback routine will be called when the transport has completed the
1591 * transmission of the data on the queue and the data is ready to be freed.
1593 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1595 struct ntb_transport_qp
*
1596 ntb_transport_create_queue(void *data
, struct device
*client_dev
,
1597 const struct ntb_queue_handlers
*handlers
)
1599 struct ntb_dev
*ndev
;
1600 struct pci_dev
*pdev
;
1601 struct ntb_transport_ctx
*nt
;
1602 struct ntb_queue_entry
*entry
;
1603 struct ntb_transport_qp
*qp
;
1605 unsigned int free_queue
;
1606 dma_cap_mask_t dma_mask
;
1610 ndev
= dev_ntb(client_dev
->parent
);
1614 node
= dev_to_node(&ndev
->dev
);
1616 free_queue
= ffs(nt
->qp_bitmap
);
1620 /* decrement free_queue to make it zero based */
1623 qp
= &nt
->qp_vec
[free_queue
];
1624 qp_bit
= BIT_ULL(qp
->qp_num
);
1626 nt
->qp_bitmap_free
&= ~qp_bit
;
1629 qp
->rx_handler
= handlers
->rx_handler
;
1630 qp
->tx_handler
= handlers
->tx_handler
;
1631 qp
->event_handler
= handlers
->event_handler
;
1633 dma_cap_zero(dma_mask
);
1634 dma_cap_set(DMA_MEMCPY
, dma_mask
);
1637 qp
->dma_chan
= dma_request_channel(dma_mask
, ntb_dma_filter_fn
,
1638 (void *)(unsigned long)node
);
1640 dev_info(&pdev
->dev
, "Unable to allocate DMA channel\n");
1642 qp
->dma_chan
= NULL
;
1644 dev_dbg(&pdev
->dev
, "Using %s memcpy\n", qp
->dma_chan
? "DMA" : "CPU");
1646 for (i
= 0; i
< NTB_QP_DEF_NUM_ENTRIES
; i
++) {
1647 entry
= kzalloc_node(sizeof(*entry
), GFP_ATOMIC
, node
);
1652 ntb_list_add(&qp
->ntb_rx_q_lock
, &entry
->entry
,
1656 for (i
= 0; i
< NTB_QP_DEF_NUM_ENTRIES
; i
++) {
1657 entry
= kzalloc_node(sizeof(*entry
), GFP_ATOMIC
, node
);
1662 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
,
1666 ntb_db_clear(qp
->ndev
, qp_bit
);
1667 ntb_db_clear_mask(qp
->ndev
, qp_bit
);
1669 dev_info(&pdev
->dev
, "NTB Transport QP %d created\n", qp
->qp_num
);
1674 while ((entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
)))
1677 while ((entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_free_q
)))
1680 dma_release_channel(qp
->dma_chan
);
1681 nt
->qp_bitmap_free
|= qp_bit
;
1685 EXPORT_SYMBOL_GPL(ntb_transport_create_queue
);
1688 * ntb_transport_free_queue - Frees NTB transport queue
1689 * @qp: NTB queue to be freed
1691 * Frees NTB transport queue
1693 void ntb_transport_free_queue(struct ntb_transport_qp
*qp
)
1695 struct pci_dev
*pdev
;
1696 struct ntb_queue_entry
*entry
;
1702 pdev
= qp
->ndev
->pdev
;
1705 struct dma_chan
*chan
= qp
->dma_chan
;
1706 /* Putting the dma_chan to NULL will force any new traffic to be
1707 * processed by the CPU instead of the DAM engine
1709 qp
->dma_chan
= NULL
;
1711 /* Try to be nice and wait for any queued DMA engine
1712 * transactions to process before smashing it with a rock
1714 dma_sync_wait(chan
, qp
->last_cookie
);
1715 dmaengine_terminate_all(chan
);
1716 dma_release_channel(chan
);
1719 qp_bit
= BIT_ULL(qp
->qp_num
);
1721 ntb_db_set_mask(qp
->ndev
, qp_bit
);
1722 tasklet_disable(&qp
->rxc_db_work
);
1724 cancel_delayed_work_sync(&qp
->link_work
);
1727 qp
->rx_handler
= NULL
;
1728 qp
->tx_handler
= NULL
;
1729 qp
->event_handler
= NULL
;
1731 while ((entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_free_q
)))
1734 while ((entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_pend_q
))) {
1735 dev_warn(&pdev
->dev
, "Freeing item from non-empty rx_pend_q\n");
1739 while ((entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_post_q
))) {
1740 dev_warn(&pdev
->dev
, "Freeing item from non-empty rx_post_q\n");
1744 while ((entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
)))
1747 qp
->transport
->qp_bitmap_free
|= qp_bit
;
1749 dev_info(&pdev
->dev
, "NTB Transport QP %d freed\n", qp
->qp_num
);
1751 EXPORT_SYMBOL_GPL(ntb_transport_free_queue
);
1754 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1755 * @qp: NTB queue to be freed
1756 * @len: pointer to variable to write enqueued buffers length
1758 * Dequeues unused buffers from receive queue. Should only be used during
1761 * RETURNS: NULL error value on error, or void* for success.
1763 void *ntb_transport_rx_remove(struct ntb_transport_qp
*qp
, unsigned int *len
)
1765 struct ntb_queue_entry
*entry
;
1768 if (!qp
|| qp
->client_ready
)
1771 entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_pend_q
);
1775 buf
= entry
->cb_data
;
1778 ntb_list_add(&qp
->ntb_rx_q_lock
, &entry
->entry
, &qp
->rx_free_q
);
1782 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove
);
1785 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1786 * @qp: NTB transport layer queue the entry is to be enqueued on
1787 * @cb: per buffer pointer for callback function to use
1788 * @data: pointer to data buffer that incoming packets will be copied into
1789 * @len: length of the data buffer
1791 * Enqueue a new receive buffer onto the transport queue into which a NTB
1792 * payload can be received into.
1794 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1796 int ntb_transport_rx_enqueue(struct ntb_transport_qp
*qp
, void *cb
, void *data
,
1799 struct ntb_queue_entry
*entry
;
1804 entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_free_q
);
1808 entry
->cb_data
= cb
;
1813 ntb_list_add(&qp
->ntb_rx_q_lock
, &entry
->entry
, &qp
->rx_pend_q
);
1815 tasklet_schedule(&qp
->rxc_db_work
);
1819 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue
);
1822 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1823 * @qp: NTB transport layer queue the entry is to be enqueued on
1824 * @cb: per buffer pointer for callback function to use
1825 * @data: pointer to data buffer that will be sent
1826 * @len: length of the data buffer
1828 * Enqueue a new transmit buffer onto the transport queue from which a NTB
1829 * payload will be transmitted. This assumes that a lock is being held to
1830 * serialize access to the qp.
1832 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1834 int ntb_transport_tx_enqueue(struct ntb_transport_qp
*qp
, void *cb
, void *data
,
1837 struct ntb_queue_entry
*entry
;
1840 if (!qp
|| !qp
->link_is_up
|| !len
)
1843 entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
);
1845 qp
->tx_err_no_buf
++;
1849 entry
->cb_data
= cb
;
1854 rc
= ntb_process_tx(qp
, entry
);
1856 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
,
1861 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue
);
1864 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1865 * @qp: NTB transport layer queue to be enabled
1867 * Notify NTB transport layer of client readiness to use queue
1869 void ntb_transport_link_up(struct ntb_transport_qp
*qp
)
1874 qp
->client_ready
= true;
1876 if (qp
->transport
->link_is_up
)
1877 schedule_delayed_work(&qp
->link_work
, 0);
1879 EXPORT_SYMBOL_GPL(ntb_transport_link_up
);
1882 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1883 * @qp: NTB transport layer queue to be disabled
1885 * Notify NTB transport layer of client's desire to no longer receive data on
1886 * transport queue specified. It is the client's responsibility to ensure all
1887 * entries on queue are purged or otherwise handled appropriately.
1889 void ntb_transport_link_down(struct ntb_transport_qp
*qp
)
1891 struct pci_dev
*pdev
;
1897 pdev
= qp
->ndev
->pdev
;
1898 qp
->client_ready
= false;
1900 val
= ntb_spad_read(qp
->ndev
, QP_LINKS
);
1902 ntb_peer_spad_write(qp
->ndev
, QP_LINKS
,
1903 val
& ~BIT(qp
->qp_num
));
1906 ntb_send_link_down(qp
);
1908 cancel_delayed_work_sync(&qp
->link_work
);
1910 EXPORT_SYMBOL_GPL(ntb_transport_link_down
);
1913 * ntb_transport_link_query - Query transport link state
1914 * @qp: NTB transport layer queue to be queried
1916 * Query connectivity to the remote system of the NTB transport queue
1918 * RETURNS: true for link up or false for link down
1920 bool ntb_transport_link_query(struct ntb_transport_qp
*qp
)
1925 return qp
->link_is_up
;
1927 EXPORT_SYMBOL_GPL(ntb_transport_link_query
);
1930 * ntb_transport_qp_num - Query the qp number
1931 * @qp: NTB transport layer queue to be queried
1933 * Query qp number of the NTB transport queue
1935 * RETURNS: a zero based number specifying the qp number
1937 unsigned char ntb_transport_qp_num(struct ntb_transport_qp
*qp
)
1944 EXPORT_SYMBOL_GPL(ntb_transport_qp_num
);
1947 * ntb_transport_max_size - Query the max payload size of a qp
1948 * @qp: NTB transport layer queue to be queried
1950 * Query the maximum payload size permissible on the given qp
1952 * RETURNS: the max payload size of a qp
1954 unsigned int ntb_transport_max_size(struct ntb_transport_qp
*qp
)
1962 return qp
->tx_max_frame
- sizeof(struct ntb_payload_header
);
1964 /* If DMA engine usage is possible, try to find the max size for that */
1965 max
= qp
->tx_max_frame
- sizeof(struct ntb_payload_header
);
1966 max
-= max
% (1 << qp
->dma_chan
->device
->copy_align
);
1970 EXPORT_SYMBOL_GPL(ntb_transport_max_size
);
1972 static void ntb_transport_doorbell_callback(void *data
, int vector
)
1974 struct ntb_transport_ctx
*nt
= data
;
1975 struct ntb_transport_qp
*qp
;
1977 unsigned int qp_num
;
1979 db_bits
= (nt
->qp_bitmap
& ~nt
->qp_bitmap_free
&
1980 ntb_db_vector_mask(nt
->ndev
, vector
));
1983 qp_num
= __ffs(db_bits
);
1984 qp
= &nt
->qp_vec
[qp_num
];
1986 tasklet_schedule(&qp
->rxc_db_work
);
1988 db_bits
&= ~BIT_ULL(qp_num
);
1992 static const struct ntb_ctx_ops ntb_transport_ops
= {
1993 .link_event
= ntb_transport_event_callback
,
1994 .db_event
= ntb_transport_doorbell_callback
,
1997 static struct ntb_client ntb_transport_client
= {
1999 .probe
= ntb_transport_probe
,
2000 .remove
= ntb_transport_free
,
2004 static int __init
ntb_transport_init(void)
2008 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC
, NTB_TRANSPORT_VER
);
2010 if (debugfs_initialized())
2011 nt_debugfs_dir
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
2013 rc
= bus_register(&ntb_transport_bus
);
2017 rc
= ntb_register_client(&ntb_transport_client
);
2024 bus_unregister(&ntb_transport_bus
);
2026 debugfs_remove_recursive(nt_debugfs_dir
);
2029 module_init(ntb_transport_init
);
2031 static void __exit
ntb_transport_exit(void)
2033 debugfs_remove_recursive(nt_debugfs_dir
);
2035 ntb_unregister_client(&ntb_transport_client
);
2036 bus_unregister(&ntb_transport_bus
);
2038 module_exit(ntb_transport_exit
);