2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * PCIe NTB Transport Linux driver
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
50 #include <linux/debugfs.h>
51 #include <linux/delay.h>
52 #include <linux/dmaengine.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/errno.h>
55 #include <linux/export.h>
56 #include <linux/interrupt.h>
57 #include <linux/module.h>
58 #include <linux/pci.h>
59 #include <linux/slab.h>
60 #include <linux/types.h>
61 #include <linux/uaccess.h>
62 #include "linux/ntb.h"
63 #include "linux/ntb_transport.h"
65 #define NTB_TRANSPORT_VERSION 4
66 #define NTB_TRANSPORT_VER "4"
67 #define NTB_TRANSPORT_NAME "ntb_transport"
68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
69 #define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2)
71 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC
);
72 MODULE_VERSION(NTB_TRANSPORT_VER
);
73 MODULE_LICENSE("Dual BSD/GPL");
74 MODULE_AUTHOR("Intel Corporation");
76 static unsigned long max_mw_size
;
77 module_param(max_mw_size
, ulong
, 0644);
78 MODULE_PARM_DESC(max_mw_size
, "Limit size of large memory windows");
80 static unsigned int transport_mtu
= 0x10000;
81 module_param(transport_mtu
, uint
, 0644);
82 MODULE_PARM_DESC(transport_mtu
, "Maximum size of NTB transport packets");
84 static unsigned char max_num_clients
;
85 module_param(max_num_clients
, byte
, 0644);
86 MODULE_PARM_DESC(max_num_clients
, "Maximum number of NTB transport clients");
88 static unsigned int copy_bytes
= 1024;
89 module_param(copy_bytes
, uint
, 0644);
90 MODULE_PARM_DESC(copy_bytes
, "Threshold under which NTB will use the CPU to copy instead of DMA");
93 module_param(use_dma
, bool, 0644);
94 MODULE_PARM_DESC(use_dma
, "Use DMA engine to perform large data copy");
98 module_param(use_msi
, bool, 0644);
99 MODULE_PARM_DESC(use_msi
, "Use MSI interrupts instead of doorbells");
102 static struct dentry
*nt_debugfs_dir
;
104 /* Only two-ports NTB devices are supported */
105 #define PIDX NTB_DEF_PEER_IDX
107 struct ntb_queue_entry
{
108 /* ntb_queue list reference */
109 struct list_head entry
;
110 /* pointers to data to be transferred */
117 unsigned int tx_index
;
118 unsigned int rx_index
;
120 struct ntb_transport_qp
*qp
;
122 struct ntb_payload_header __iomem
*tx_hdr
;
123 struct ntb_payload_header
*rx_hdr
;
131 struct ntb_transport_qp
{
132 struct ntb_transport_ctx
*transport
;
133 struct ntb_dev
*ndev
;
135 struct dma_chan
*tx_dma_chan
;
136 struct dma_chan
*rx_dma_chan
;
142 u8 qp_num
; /* Only 64 QP's are allowed. 0-63 */
145 struct ntb_rx_info __iomem
*rx_info
;
146 struct ntb_rx_info
*remote_rx_info
;
148 void (*tx_handler
)(struct ntb_transport_qp
*qp
, void *qp_data
,
149 void *data
, int len
);
150 struct list_head tx_free_q
;
151 spinlock_t ntb_tx_free_q_lock
;
153 phys_addr_t tx_mw_phys
;
155 dma_addr_t tx_mw_dma_addr
;
156 unsigned int tx_index
;
157 unsigned int tx_max_entry
;
158 unsigned int tx_max_frame
;
160 void (*rx_handler
)(struct ntb_transport_qp
*qp
, void *qp_data
,
161 void *data
, int len
);
162 struct list_head rx_post_q
;
163 struct list_head rx_pend_q
;
164 struct list_head rx_free_q
;
165 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
166 spinlock_t ntb_rx_q_lock
;
168 unsigned int rx_index
;
169 unsigned int rx_max_entry
;
170 unsigned int rx_max_frame
;
171 unsigned int rx_alloc_entry
;
172 dma_cookie_t last_cookie
;
173 struct tasklet_struct rxc_db_work
;
175 void (*event_handler
)(void *data
, int status
);
176 struct delayed_work link_work
;
177 struct work_struct link_cleanup
;
179 struct dentry
*debugfs_dir
;
180 struct dentry
*debugfs_stats
;
200 struct ntb_msi_desc msi_desc
;
201 struct ntb_msi_desc peer_msi_desc
;
204 struct ntb_transport_mw
{
205 phys_addr_t phys_addr
;
206 resource_size_t phys_size
;
216 struct ntb_transport_client_dev
{
217 struct list_head entry
;
218 struct ntb_transport_ctx
*nt
;
222 struct ntb_transport_ctx
{
223 struct list_head entry
;
224 struct list_head client_devs
;
226 struct ntb_dev
*ndev
;
228 struct ntb_transport_mw
*mw_vec
;
229 struct ntb_transport_qp
*qp_vec
;
230 unsigned int mw_count
;
231 unsigned int qp_count
;
236 unsigned int msi_spad_offset
;
240 struct delayed_work link_work
;
241 struct work_struct link_cleanup
;
243 struct dentry
*debugfs_node_dir
;
247 DESC_DONE_FLAG
= BIT(0),
248 LINK_DOWN_FLAG
= BIT(1),
251 struct ntb_payload_header
{
266 #define dev_client_dev(__dev) \
267 container_of((__dev), struct ntb_transport_client_dev, dev)
269 #define drv_client(__drv) \
270 container_of((__drv), struct ntb_transport_client, driver)
272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
273 #define NTB_QP_DEF_NUM_ENTRIES 100
274 #define NTB_LINK_DOWN_TIMEOUT 10
276 static void ntb_transport_rxc_db(unsigned long data
);
277 static const struct ntb_ctx_ops ntb_transport_ops
;
278 static struct ntb_client ntb_transport_client
;
279 static int ntb_async_tx_submit(struct ntb_transport_qp
*qp
,
280 struct ntb_queue_entry
*entry
);
281 static void ntb_memcpy_tx(struct ntb_queue_entry
*entry
, void __iomem
*offset
);
282 static int ntb_async_rx_submit(struct ntb_queue_entry
*entry
, void *offset
);
283 static void ntb_memcpy_rx(struct ntb_queue_entry
*entry
, void *offset
);
286 static int ntb_transport_bus_match(struct device
*dev
,
287 const struct device_driver
*drv
)
289 return !strncmp(dev_name(dev
), drv
->name
, strlen(drv
->name
));
292 static int ntb_transport_bus_probe(struct device
*dev
)
294 const struct ntb_transport_client
*client
;
299 client
= drv_client(dev
->driver
);
300 rc
= client
->probe(dev
);
307 static void ntb_transport_bus_remove(struct device
*dev
)
309 const struct ntb_transport_client
*client
;
311 client
= drv_client(dev
->driver
);
317 static const struct bus_type ntb_transport_bus
= {
318 .name
= "ntb_transport",
319 .match
= ntb_transport_bus_match
,
320 .probe
= ntb_transport_bus_probe
,
321 .remove
= ntb_transport_bus_remove
,
324 static LIST_HEAD(ntb_transport_list
);
326 static int ntb_bus_init(struct ntb_transport_ctx
*nt
)
328 list_add_tail(&nt
->entry
, &ntb_transport_list
);
332 static void ntb_bus_remove(struct ntb_transport_ctx
*nt
)
334 struct ntb_transport_client_dev
*client_dev
, *cd
;
336 list_for_each_entry_safe(client_dev
, cd
, &nt
->client_devs
, entry
) {
337 dev_err(client_dev
->dev
.parent
, "%s still attached to bus, removing\n",
338 dev_name(&client_dev
->dev
));
339 list_del(&client_dev
->entry
);
340 device_unregister(&client_dev
->dev
);
343 list_del(&nt
->entry
);
346 static void ntb_transport_client_release(struct device
*dev
)
348 struct ntb_transport_client_dev
*client_dev
;
350 client_dev
= dev_client_dev(dev
);
355 * ntb_transport_unregister_client_dev - Unregister NTB client device
356 * @device_name: Name of NTB client device
358 * Unregister an NTB client device with the NTB transport layer
360 void ntb_transport_unregister_client_dev(char *device_name
)
362 struct ntb_transport_client_dev
*client
, *cd
;
363 struct ntb_transport_ctx
*nt
;
365 list_for_each_entry(nt
, &ntb_transport_list
, entry
)
366 list_for_each_entry_safe(client
, cd
, &nt
->client_devs
, entry
)
367 if (!strncmp(dev_name(&client
->dev
), device_name
,
368 strlen(device_name
))) {
369 list_del(&client
->entry
);
370 device_unregister(&client
->dev
);
373 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev
);
376 * ntb_transport_register_client_dev - Register NTB client device
377 * @device_name: Name of NTB client device
379 * Register an NTB client device with the NTB transport layer
381 * Returns: %0 on success or -errno code on error
383 int ntb_transport_register_client_dev(char *device_name
)
385 struct ntb_transport_client_dev
*client_dev
;
386 struct ntb_transport_ctx
*nt
;
390 if (list_empty(&ntb_transport_list
))
393 list_for_each_entry(nt
, &ntb_transport_list
, entry
) {
396 node
= dev_to_node(&nt
->ndev
->dev
);
398 client_dev
= kzalloc_node(sizeof(*client_dev
),
405 dev
= &client_dev
->dev
;
407 /* setup and register client devices */
408 dev_set_name(dev
, "%s%d", device_name
, i
);
409 dev
->bus
= &ntb_transport_bus
;
410 dev
->release
= ntb_transport_client_release
;
411 dev
->parent
= &nt
->ndev
->dev
;
413 rc
= device_register(dev
);
419 list_add_tail(&client_dev
->entry
, &nt
->client_devs
);
426 ntb_transport_unregister_client_dev(device_name
);
430 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev
);
433 * ntb_transport_register_client - Register NTB client driver
434 * @drv: NTB client driver to be registered
436 * Register an NTB client driver with the NTB transport layer
438 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
440 int ntb_transport_register_client(struct ntb_transport_client
*drv
)
442 drv
->driver
.bus
= &ntb_transport_bus
;
444 if (list_empty(&ntb_transport_list
))
447 return driver_register(&drv
->driver
);
449 EXPORT_SYMBOL_GPL(ntb_transport_register_client
);
452 * ntb_transport_unregister_client - Unregister NTB client driver
453 * @drv: NTB client driver to be unregistered
455 * Unregister an NTB client driver with the NTB transport layer
457 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
459 void ntb_transport_unregister_client(struct ntb_transport_client
*drv
)
461 driver_unregister(&drv
->driver
);
463 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client
);
465 static ssize_t
debugfs_read(struct file
*filp
, char __user
*ubuf
, size_t count
,
468 struct ntb_transport_qp
*qp
;
470 ssize_t ret
, out_offset
, out_count
;
472 qp
= filp
->private_data
;
474 if (!qp
|| !qp
->link_is_up
)
479 buf
= kmalloc(out_count
, GFP_KERNEL
);
484 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
485 "\nNTB QP stats:\n\n");
486 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
487 "rx_bytes - \t%llu\n", qp
->rx_bytes
);
488 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
489 "rx_pkts - \t%llu\n", qp
->rx_pkts
);
490 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
491 "rx_memcpy - \t%llu\n", qp
->rx_memcpy
);
492 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
493 "rx_async - \t%llu\n", qp
->rx_async
);
494 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
495 "rx_ring_empty - %llu\n", qp
->rx_ring_empty
);
496 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
497 "rx_err_no_buf - %llu\n", qp
->rx_err_no_buf
);
498 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
499 "rx_err_oflow - \t%llu\n", qp
->rx_err_oflow
);
500 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
501 "rx_err_ver - \t%llu\n", qp
->rx_err_ver
);
502 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
503 "rx_buff - \t0x%p\n", qp
->rx_buff
);
504 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
505 "rx_index - \t%u\n", qp
->rx_index
);
506 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
507 "rx_max_entry - \t%u\n", qp
->rx_max_entry
);
508 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
509 "rx_alloc_entry - \t%u\n\n", qp
->rx_alloc_entry
);
511 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
512 "tx_bytes - \t%llu\n", qp
->tx_bytes
);
513 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
514 "tx_pkts - \t%llu\n", qp
->tx_pkts
);
515 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
516 "tx_memcpy - \t%llu\n", qp
->tx_memcpy
);
517 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
518 "tx_async - \t%llu\n", qp
->tx_async
);
519 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
520 "tx_ring_full - \t%llu\n", qp
->tx_ring_full
);
521 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
522 "tx_err_no_buf - %llu\n", qp
->tx_err_no_buf
);
523 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
524 "tx_mw - \t0x%p\n", qp
->tx_mw
);
525 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
526 "tx_index (H) - \t%u\n", qp
->tx_index
);
527 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
529 qp
->remote_rx_info
->entry
);
530 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
531 "tx_max_entry - \t%u\n", qp
->tx_max_entry
);
532 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
534 ntb_transport_tx_free_entry(qp
));
536 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
538 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
539 "Using TX DMA - \t%s\n",
540 qp
->tx_dma_chan
? "Yes" : "No");
541 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
542 "Using RX DMA - \t%s\n",
543 qp
->rx_dma_chan
? "Yes" : "No");
544 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
546 qp
->link_is_up
? "Up" : "Down");
547 out_offset
+= scnprintf(buf
+ out_offset
, out_count
- out_offset
,
550 if (out_offset
> out_count
)
551 out_offset
= out_count
;
553 ret
= simple_read_from_buffer(ubuf
, count
, offp
, buf
, out_offset
);
558 static const struct file_operations ntb_qp_debugfs_stats
= {
559 .owner
= THIS_MODULE
,
561 .read
= debugfs_read
,
564 static void ntb_list_add(spinlock_t
*lock
, struct list_head
*entry
,
565 struct list_head
*list
)
569 spin_lock_irqsave(lock
, flags
);
570 list_add_tail(entry
, list
);
571 spin_unlock_irqrestore(lock
, flags
);
574 static struct ntb_queue_entry
*ntb_list_rm(spinlock_t
*lock
,
575 struct list_head
*list
)
577 struct ntb_queue_entry
*entry
;
580 spin_lock_irqsave(lock
, flags
);
581 if (list_empty(list
)) {
585 entry
= list_first_entry(list
, struct ntb_queue_entry
, entry
);
586 list_del(&entry
->entry
);
589 spin_unlock_irqrestore(lock
, flags
);
594 static struct ntb_queue_entry
*ntb_list_mv(spinlock_t
*lock
,
595 struct list_head
*list
,
596 struct list_head
*to_list
)
598 struct ntb_queue_entry
*entry
;
601 spin_lock_irqsave(lock
, flags
);
603 if (list_empty(list
)) {
606 entry
= list_first_entry(list
, struct ntb_queue_entry
, entry
);
607 list_move_tail(&entry
->entry
, to_list
);
610 spin_unlock_irqrestore(lock
, flags
);
615 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx
*nt
,
618 struct ntb_transport_qp
*qp
= &nt
->qp_vec
[qp_num
];
619 struct ntb_transport_mw
*mw
;
620 struct ntb_dev
*ndev
= nt
->ndev
;
621 struct ntb_queue_entry
*entry
;
622 unsigned int rx_size
, num_qps_mw
;
623 unsigned int mw_num
, mw_count
, qp_count
;
627 mw_count
= nt
->mw_count
;
628 qp_count
= nt
->qp_count
;
630 mw_num
= QP_TO_MW(nt
, qp_num
);
631 mw
= &nt
->mw_vec
[mw_num
];
636 if (mw_num
< qp_count
% mw_count
)
637 num_qps_mw
= qp_count
/ mw_count
+ 1;
639 num_qps_mw
= qp_count
/ mw_count
;
641 rx_size
= (unsigned int)mw
->xlat_size
/ num_qps_mw
;
642 qp
->rx_buff
= mw
->virt_addr
+ rx_size
* (qp_num
/ mw_count
);
643 rx_size
-= sizeof(struct ntb_rx_info
);
645 qp
->remote_rx_info
= qp
->rx_buff
+ rx_size
;
647 /* Due to housekeeping, there must be atleast 2 buffs */
648 qp
->rx_max_frame
= min(transport_mtu
, rx_size
/ 2);
649 qp
->rx_max_entry
= rx_size
/ qp
->rx_max_frame
;
653 * Checking to see if we have more entries than the default.
654 * We should add additional entries if that is the case so we
655 * can be in sync with the transport frames.
657 node
= dev_to_node(&ndev
->dev
);
658 for (i
= qp
->rx_alloc_entry
; i
< qp
->rx_max_entry
; i
++) {
659 entry
= kzalloc_node(sizeof(*entry
), GFP_KERNEL
, node
);
664 ntb_list_add(&qp
->ntb_rx_q_lock
, &entry
->entry
,
666 qp
->rx_alloc_entry
++;
669 qp
->remote_rx_info
->entry
= qp
->rx_max_entry
- 1;
671 /* setup the hdr offsets with 0's */
672 for (i
= 0; i
< qp
->rx_max_entry
; i
++) {
673 void *offset
= (qp
->rx_buff
+ qp
->rx_max_frame
* (i
+ 1) -
674 sizeof(struct ntb_payload_header
));
675 memset(offset
, 0, sizeof(struct ntb_payload_header
));
685 static irqreturn_t
ntb_transport_isr(int irq
, void *dev
)
687 struct ntb_transport_qp
*qp
= dev
;
689 tasklet_schedule(&qp
->rxc_db_work
);
694 static void ntb_transport_setup_qp_peer_msi(struct ntb_transport_ctx
*nt
,
697 struct ntb_transport_qp
*qp
= &nt
->qp_vec
[qp_num
];
698 int spad
= qp_num
* 2 + nt
->msi_spad_offset
;
703 if (spad
>= ntb_spad_count(nt
->ndev
))
706 qp
->peer_msi_desc
.addr_offset
=
707 ntb_peer_spad_read(qp
->ndev
, PIDX
, spad
);
708 qp
->peer_msi_desc
.data
=
709 ntb_peer_spad_read(qp
->ndev
, PIDX
, spad
+ 1);
711 dev_dbg(&qp
->ndev
->pdev
->dev
, "QP%d Peer MSI addr=%x data=%x\n",
712 qp_num
, qp
->peer_msi_desc
.addr_offset
, qp
->peer_msi_desc
.data
);
714 if (qp
->peer_msi_desc
.addr_offset
) {
716 dev_info(&qp
->ndev
->pdev
->dev
,
717 "Using MSI interrupts for QP%d\n", qp_num
);
721 static void ntb_transport_setup_qp_msi(struct ntb_transport_ctx
*nt
,
724 struct ntb_transport_qp
*qp
= &nt
->qp_vec
[qp_num
];
725 int spad
= qp_num
* 2 + nt
->msi_spad_offset
;
731 if (spad
>= ntb_spad_count(nt
->ndev
)) {
732 dev_warn_once(&qp
->ndev
->pdev
->dev
,
733 "Not enough SPADS to use MSI interrupts\n");
737 ntb_spad_write(qp
->ndev
, spad
, 0);
738 ntb_spad_write(qp
->ndev
, spad
+ 1, 0);
741 qp
->msi_irq
= ntbm_msi_request_irq(qp
->ndev
, ntb_transport_isr
,
744 if (qp
->msi_irq
< 0) {
745 dev_warn(&qp
->ndev
->pdev
->dev
,
746 "Unable to allocate MSI interrupt for qp%d\n",
752 rc
= ntb_spad_write(qp
->ndev
, spad
, qp
->msi_desc
.addr_offset
);
754 goto err_free_interrupt
;
756 rc
= ntb_spad_write(qp
->ndev
, spad
+ 1, qp
->msi_desc
.data
);
758 goto err_free_interrupt
;
760 dev_dbg(&qp
->ndev
->pdev
->dev
, "QP%d MSI %d addr=%x data=%x\n",
761 qp_num
, qp
->msi_irq
, qp
->msi_desc
.addr_offset
,
767 devm_free_irq(&nt
->ndev
->dev
, qp
->msi_irq
, qp
);
770 static void ntb_transport_msi_peer_desc_changed(struct ntb_transport_ctx
*nt
)
774 dev_dbg(&nt
->ndev
->pdev
->dev
, "Peer MSI descriptors changed");
776 for (i
= 0; i
< nt
->qp_count
; i
++)
777 ntb_transport_setup_qp_peer_msi(nt
, i
);
780 static void ntb_transport_msi_desc_changed(void *data
)
782 struct ntb_transport_ctx
*nt
= data
;
785 dev_dbg(&nt
->ndev
->pdev
->dev
, "MSI descriptors changed");
787 for (i
= 0; i
< nt
->qp_count
; i
++)
788 ntb_transport_setup_qp_msi(nt
, i
);
790 ntb_peer_db_set(nt
->ndev
, nt
->msi_db_mask
);
793 static void ntb_free_mw(struct ntb_transport_ctx
*nt
, int num_mw
)
795 struct ntb_transport_mw
*mw
= &nt
->mw_vec
[num_mw
];
796 struct pci_dev
*pdev
= nt
->ndev
->pdev
;
801 ntb_mw_clear_trans(nt
->ndev
, PIDX
, num_mw
);
802 dma_free_coherent(&pdev
->dev
, mw
->alloc_size
,
803 mw
->alloc_addr
, mw
->dma_addr
);
807 mw
->alloc_addr
= NULL
;
808 mw
->virt_addr
= NULL
;
811 static int ntb_alloc_mw_buffer(struct ntb_transport_mw
*mw
,
812 struct device
*ntb_dev
, size_t align
)
815 void *alloc_addr
, *virt_addr
;
819 * The buffer here is allocated against the NTB device. The reason to
820 * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer
821 * backing the NTB BAR for the remote host to write to. During receive
822 * processing, the data is being copied out of the receive buffer to
823 * the kernel skbuff. When a DMA device is being used, dma_map_page()
824 * is called on the kvaddr of the receive buffer (from dma_alloc_*())
825 * and remapped against the DMA device. It appears to be a double
826 * DMA mapping of buffers, but first is mapped to the NTB device and
827 * second is to the DMA device. DMA_ATTR_FORCE_CONTIGUOUS is necessary
828 * in order for the later dma_map_page() to not fail.
830 alloc_addr
= dma_alloc_attrs(ntb_dev
, mw
->alloc_size
,
831 &dma_addr
, GFP_KERNEL
,
832 DMA_ATTR_FORCE_CONTIGUOUS
);
834 dev_err(ntb_dev
, "Unable to alloc MW buff of size %zu\n",
838 virt_addr
= alloc_addr
;
841 * we must ensure that the memory address allocated is BAR size
842 * aligned in order for the XLAT register to take the value. This
843 * is a requirement of the hardware. It is recommended to setup CMA
844 * for BAR sizes equal or greater than 4MB.
846 if (!IS_ALIGNED(dma_addr
, align
)) {
847 if (mw
->alloc_size
> mw
->buff_size
) {
848 virt_addr
= PTR_ALIGN(alloc_addr
, align
);
849 dma_addr
= ALIGN(dma_addr
, align
);
856 mw
->alloc_addr
= alloc_addr
;
857 mw
->virt_addr
= virt_addr
;
858 mw
->dma_addr
= dma_addr
;
863 dma_free_coherent(ntb_dev
, mw
->alloc_size
, alloc_addr
, dma_addr
);
868 static int ntb_set_mw(struct ntb_transport_ctx
*nt
, int num_mw
,
869 resource_size_t size
)
871 struct ntb_transport_mw
*mw
= &nt
->mw_vec
[num_mw
];
872 struct pci_dev
*pdev
= nt
->ndev
->pdev
;
873 size_t xlat_size
, buff_size
;
874 resource_size_t xlat_align
;
875 resource_size_t xlat_align_size
;
881 rc
= ntb_mw_get_align(nt
->ndev
, PIDX
, num_mw
, &xlat_align
,
882 &xlat_align_size
, NULL
);
886 xlat_size
= round_up(size
, xlat_align_size
);
887 buff_size
= round_up(size
, xlat_align
);
889 /* No need to re-setup */
890 if (mw
->xlat_size
== xlat_size
)
894 ntb_free_mw(nt
, num_mw
);
896 /* Alloc memory for receiving data. Must be aligned */
897 mw
->xlat_size
= xlat_size
;
898 mw
->buff_size
= buff_size
;
899 mw
->alloc_size
= buff_size
;
901 rc
= ntb_alloc_mw_buffer(mw
, &pdev
->dev
, xlat_align
);
904 rc
= ntb_alloc_mw_buffer(mw
, &pdev
->dev
, xlat_align
);
907 "Unable to alloc aligned MW buff\n");
915 /* Notify HW the memory location of the receive buffer */
916 rc
= ntb_mw_set_trans(nt
->ndev
, PIDX
, num_mw
, mw
->dma_addr
,
919 dev_err(&pdev
->dev
, "Unable to set mw%d translation", num_mw
);
920 ntb_free_mw(nt
, num_mw
);
927 static void ntb_qp_link_context_reset(struct ntb_transport_qp
*qp
)
929 qp
->link_is_up
= false;
936 qp
->rx_ring_empty
= 0;
937 qp
->rx_err_no_buf
= 0;
938 qp
->rx_err_oflow
= 0;
944 qp
->tx_ring_full
= 0;
945 qp
->tx_err_no_buf
= 0;
950 static void ntb_qp_link_down_reset(struct ntb_transport_qp
*qp
)
952 ntb_qp_link_context_reset(qp
);
953 if (qp
->remote_rx_info
)
954 qp
->remote_rx_info
->entry
= qp
->rx_max_entry
- 1;
957 static void ntb_qp_link_cleanup(struct ntb_transport_qp
*qp
)
959 struct ntb_transport_ctx
*nt
= qp
->transport
;
960 struct pci_dev
*pdev
= nt
->ndev
->pdev
;
962 dev_info(&pdev
->dev
, "qp %d: Link Cleanup\n", qp
->qp_num
);
964 cancel_delayed_work_sync(&qp
->link_work
);
965 ntb_qp_link_down_reset(qp
);
967 if (qp
->event_handler
)
968 qp
->event_handler(qp
->cb_data
, qp
->link_is_up
);
971 static void ntb_qp_link_cleanup_work(struct work_struct
*work
)
973 struct ntb_transport_qp
*qp
= container_of(work
,
974 struct ntb_transport_qp
,
976 struct ntb_transport_ctx
*nt
= qp
->transport
;
978 ntb_qp_link_cleanup(qp
);
981 schedule_delayed_work(&qp
->link_work
,
982 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT
));
985 static void ntb_qp_link_down(struct ntb_transport_qp
*qp
)
987 schedule_work(&qp
->link_cleanup
);
990 static void ntb_transport_link_cleanup(struct ntb_transport_ctx
*nt
)
992 struct ntb_transport_qp
*qp
;
994 unsigned int i
, count
;
996 qp_bitmap_alloc
= nt
->qp_bitmap
& ~nt
->qp_bitmap_free
;
998 /* Pass along the info to any clients */
999 for (i
= 0; i
< nt
->qp_count
; i
++)
1000 if (qp_bitmap_alloc
& BIT_ULL(i
)) {
1001 qp
= &nt
->qp_vec
[i
];
1002 ntb_qp_link_cleanup(qp
);
1003 cancel_work_sync(&qp
->link_cleanup
);
1004 cancel_delayed_work_sync(&qp
->link_work
);
1007 if (!nt
->link_is_up
)
1008 cancel_delayed_work_sync(&nt
->link_work
);
1010 for (i
= 0; i
< nt
->mw_count
; i
++)
1013 /* The scratchpad registers keep the values if the remote side
1014 * goes down, blast them now to give them a sane value the next
1015 * time they are accessed
1017 count
= ntb_spad_count(nt
->ndev
);
1018 for (i
= 0; i
< count
; i
++)
1019 ntb_spad_write(nt
->ndev
, i
, 0);
1022 static void ntb_transport_link_cleanup_work(struct work_struct
*work
)
1024 struct ntb_transport_ctx
*nt
=
1025 container_of(work
, struct ntb_transport_ctx
, link_cleanup
);
1027 ntb_transport_link_cleanup(nt
);
1030 static void ntb_transport_event_callback(void *data
)
1032 struct ntb_transport_ctx
*nt
= data
;
1034 if (ntb_link_is_up(nt
->ndev
, NULL
, NULL
) == 1)
1035 schedule_delayed_work(&nt
->link_work
, 0);
1037 schedule_work(&nt
->link_cleanup
);
1040 static void ntb_transport_link_work(struct work_struct
*work
)
1042 struct ntb_transport_ctx
*nt
=
1043 container_of(work
, struct ntb_transport_ctx
, link_work
.work
);
1044 struct ntb_dev
*ndev
= nt
->ndev
;
1045 struct pci_dev
*pdev
= ndev
->pdev
;
1046 resource_size_t size
;
1048 int rc
= 0, i
, spad
;
1050 /* send the local info, in the opposite order of the way we read it */
1053 rc
= ntb_msi_setup_mws(ndev
);
1055 dev_warn(&pdev
->dev
,
1056 "Failed to register MSI memory window: %d\n",
1058 nt
->use_msi
= false;
1062 for (i
= 0; i
< nt
->qp_count
; i
++)
1063 ntb_transport_setup_qp_msi(nt
, i
);
1065 for (i
= 0; i
< nt
->mw_count
; i
++) {
1066 size
= nt
->mw_vec
[i
].phys_size
;
1068 if (max_mw_size
&& size
> max_mw_size
)
1071 spad
= MW0_SZ_HIGH
+ (i
* 2);
1072 ntb_peer_spad_write(ndev
, PIDX
, spad
, upper_32_bits(size
));
1074 spad
= MW0_SZ_LOW
+ (i
* 2);
1075 ntb_peer_spad_write(ndev
, PIDX
, spad
, lower_32_bits(size
));
1078 ntb_peer_spad_write(ndev
, PIDX
, NUM_MWS
, nt
->mw_count
);
1080 ntb_peer_spad_write(ndev
, PIDX
, NUM_QPS
, nt
->qp_count
);
1082 ntb_peer_spad_write(ndev
, PIDX
, VERSION
, NTB_TRANSPORT_VERSION
);
1084 /* Query the remote side for its info */
1085 val
= ntb_spad_read(ndev
, VERSION
);
1086 dev_dbg(&pdev
->dev
, "Remote version = %d\n", val
);
1087 if (val
!= NTB_TRANSPORT_VERSION
)
1090 val
= ntb_spad_read(ndev
, NUM_QPS
);
1091 dev_dbg(&pdev
->dev
, "Remote max number of qps = %d\n", val
);
1092 if (val
!= nt
->qp_count
)
1095 val
= ntb_spad_read(ndev
, NUM_MWS
);
1096 dev_dbg(&pdev
->dev
, "Remote number of mws = %d\n", val
);
1097 if (val
!= nt
->mw_count
)
1100 for (i
= 0; i
< nt
->mw_count
; i
++) {
1103 val
= ntb_spad_read(ndev
, MW0_SZ_HIGH
+ (i
* 2));
1104 val64
= (u64
)val
<< 32;
1106 val
= ntb_spad_read(ndev
, MW0_SZ_LOW
+ (i
* 2));
1109 dev_dbg(&pdev
->dev
, "Remote MW%d size = %#llx\n", i
, val64
);
1111 rc
= ntb_set_mw(nt
, i
, val64
);
1116 nt
->link_is_up
= true;
1118 for (i
= 0; i
< nt
->qp_count
; i
++) {
1119 struct ntb_transport_qp
*qp
= &nt
->qp_vec
[i
];
1121 ntb_transport_setup_qp_mw(nt
, i
);
1122 ntb_transport_setup_qp_peer_msi(nt
, i
);
1124 if (qp
->client_ready
)
1125 schedule_delayed_work(&qp
->link_work
, 0);
1131 for (i
= 0; i
< nt
->mw_count
; i
++)
1134 /* if there's an actual failure, we should just bail */
1139 if (ntb_link_is_up(ndev
, NULL
, NULL
) == 1)
1140 schedule_delayed_work(&nt
->link_work
,
1141 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT
));
1144 static void ntb_qp_link_work(struct work_struct
*work
)
1146 struct ntb_transport_qp
*qp
= container_of(work
,
1147 struct ntb_transport_qp
,
1149 struct pci_dev
*pdev
= qp
->ndev
->pdev
;
1150 struct ntb_transport_ctx
*nt
= qp
->transport
;
1153 WARN_ON(!nt
->link_is_up
);
1155 val
= ntb_spad_read(nt
->ndev
, QP_LINKS
);
1157 ntb_peer_spad_write(nt
->ndev
, PIDX
, QP_LINKS
, val
| BIT(qp
->qp_num
));
1159 /* query remote spad for qp ready bits */
1160 dev_dbg_ratelimited(&pdev
->dev
, "Remote QP link status = %x\n", val
);
1162 /* See if the remote side is up */
1163 if (val
& BIT(qp
->qp_num
)) {
1164 dev_info(&pdev
->dev
, "qp %d: Link Up\n", qp
->qp_num
);
1165 qp
->link_is_up
= true;
1168 if (qp
->event_handler
)
1169 qp
->event_handler(qp
->cb_data
, qp
->link_is_up
);
1172 tasklet_schedule(&qp
->rxc_db_work
);
1173 } else if (nt
->link_is_up
)
1174 schedule_delayed_work(&qp
->link_work
,
1175 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT
));
1178 static int ntb_transport_init_queue(struct ntb_transport_ctx
*nt
,
1179 unsigned int qp_num
)
1181 struct ntb_transport_qp
*qp
;
1182 phys_addr_t mw_base
;
1183 resource_size_t mw_size
;
1184 unsigned int num_qps_mw
, tx_size
;
1185 unsigned int mw_num
, mw_count
, qp_count
;
1188 mw_count
= nt
->mw_count
;
1189 qp_count
= nt
->qp_count
;
1191 mw_num
= QP_TO_MW(nt
, qp_num
);
1193 qp
= &nt
->qp_vec
[qp_num
];
1194 qp
->qp_num
= qp_num
;
1196 qp
->ndev
= nt
->ndev
;
1197 qp
->client_ready
= false;
1198 qp
->event_handler
= NULL
;
1199 ntb_qp_link_context_reset(qp
);
1201 if (mw_num
< qp_count
% mw_count
)
1202 num_qps_mw
= qp_count
/ mw_count
+ 1;
1204 num_qps_mw
= qp_count
/ mw_count
;
1206 mw_base
= nt
->mw_vec
[mw_num
].phys_addr
;
1207 mw_size
= nt
->mw_vec
[mw_num
].phys_size
;
1209 if (max_mw_size
&& mw_size
> max_mw_size
)
1210 mw_size
= max_mw_size
;
1212 tx_size
= (unsigned int)mw_size
/ num_qps_mw
;
1213 qp_offset
= tx_size
* (qp_num
/ mw_count
);
1215 qp
->tx_mw_size
= tx_size
;
1216 qp
->tx_mw
= nt
->mw_vec
[mw_num
].vbase
+ qp_offset
;
1220 qp
->tx_mw_phys
= mw_base
+ qp_offset
;
1221 if (!qp
->tx_mw_phys
)
1224 tx_size
-= sizeof(struct ntb_rx_info
);
1225 qp
->rx_info
= qp
->tx_mw
+ tx_size
;
1227 /* Due to housekeeping, there must be atleast 2 buffs */
1228 qp
->tx_max_frame
= min(transport_mtu
, tx_size
/ 2);
1229 qp
->tx_max_entry
= tx_size
/ qp
->tx_max_frame
;
1231 if (nt
->debugfs_node_dir
) {
1232 char debugfs_name
[4];
1234 snprintf(debugfs_name
, 4, "qp%d", qp_num
);
1235 qp
->debugfs_dir
= debugfs_create_dir(debugfs_name
,
1236 nt
->debugfs_node_dir
);
1238 qp
->debugfs_stats
= debugfs_create_file("stats", S_IRUSR
,
1239 qp
->debugfs_dir
, qp
,
1240 &ntb_qp_debugfs_stats
);
1242 qp
->debugfs_dir
= NULL
;
1243 qp
->debugfs_stats
= NULL
;
1246 INIT_DELAYED_WORK(&qp
->link_work
, ntb_qp_link_work
);
1247 INIT_WORK(&qp
->link_cleanup
, ntb_qp_link_cleanup_work
);
1249 spin_lock_init(&qp
->ntb_rx_q_lock
);
1250 spin_lock_init(&qp
->ntb_tx_free_q_lock
);
1252 INIT_LIST_HEAD(&qp
->rx_post_q
);
1253 INIT_LIST_HEAD(&qp
->rx_pend_q
);
1254 INIT_LIST_HEAD(&qp
->rx_free_q
);
1255 INIT_LIST_HEAD(&qp
->tx_free_q
);
1257 tasklet_init(&qp
->rxc_db_work
, ntb_transport_rxc_db
,
1263 static int ntb_transport_probe(struct ntb_client
*self
, struct ntb_dev
*ndev
)
1265 struct ntb_transport_ctx
*nt
;
1266 struct ntb_transport_mw
*mw
;
1267 unsigned int mw_count
, qp_count
, spad_count
, max_mw_count_for_spads
;
1272 mw_count
= ntb_peer_mw_count(ndev
);
1274 if (!ndev
->ops
->mw_set_trans
) {
1275 dev_err(&ndev
->dev
, "Inbound MW based NTB API is required\n");
1279 if (ntb_db_is_unsafe(ndev
))
1281 "doorbell is unsafe, proceed anyway...\n");
1282 if (ntb_spad_is_unsafe(ndev
))
1284 "scratchpad is unsafe, proceed anyway...\n");
1286 if (ntb_peer_port_count(ndev
) != NTB_DEF_PEER_CNT
)
1287 dev_warn(&ndev
->dev
, "Multi-port NTB devices unsupported\n");
1289 node
= dev_to_node(&ndev
->dev
);
1291 nt
= kzalloc_node(sizeof(*nt
), GFP_KERNEL
, node
);
1298 * If we are using MSI, and have at least one extra memory window,
1299 * we will reserve the last MW for the MSI window.
1301 if (use_msi
&& mw_count
> 1) {
1302 rc
= ntb_msi_init(ndev
, ntb_transport_msi_desc_changed
);
1309 spad_count
= ntb_spad_count(ndev
);
1311 /* Limit the MW's based on the availability of scratchpads */
1313 if (spad_count
< NTB_TRANSPORT_MIN_SPADS
) {
1319 max_mw_count_for_spads
= (spad_count
- MW0_SZ_HIGH
) / 2;
1320 nt
->mw_count
= min(mw_count
, max_mw_count_for_spads
);
1322 nt
->msi_spad_offset
= nt
->mw_count
* 2 + MW0_SZ_HIGH
;
1324 nt
->mw_vec
= kcalloc_node(mw_count
, sizeof(*nt
->mw_vec
),
1331 for (i
= 0; i
< mw_count
; i
++) {
1332 mw
= &nt
->mw_vec
[i
];
1334 rc
= ntb_peer_mw_get_addr(ndev
, i
, &mw
->phys_addr
,
1339 mw
->vbase
= ioremap_wc(mw
->phys_addr
, mw
->phys_size
);
1347 mw
->virt_addr
= NULL
;
1351 qp_bitmap
= ntb_db_valid_mask(ndev
);
1353 qp_count
= ilog2(qp_bitmap
);
1356 nt
->msi_db_mask
= 1 << qp_count
;
1357 ntb_db_clear_mask(ndev
, nt
->msi_db_mask
);
1360 if (max_num_clients
&& max_num_clients
< qp_count
)
1361 qp_count
= max_num_clients
;
1362 else if (nt
->mw_count
< qp_count
)
1363 qp_count
= nt
->mw_count
;
1365 qp_bitmap
&= BIT_ULL(qp_count
) - 1;
1367 nt
->qp_count
= qp_count
;
1368 nt
->qp_bitmap
= qp_bitmap
;
1369 nt
->qp_bitmap_free
= qp_bitmap
;
1371 nt
->qp_vec
= kcalloc_node(qp_count
, sizeof(*nt
->qp_vec
),
1378 if (nt_debugfs_dir
) {
1379 nt
->debugfs_node_dir
=
1380 debugfs_create_dir(pci_name(ndev
->pdev
),
1384 for (i
= 0; i
< qp_count
; i
++) {
1385 rc
= ntb_transport_init_queue(nt
, i
);
1390 INIT_DELAYED_WORK(&nt
->link_work
, ntb_transport_link_work
);
1391 INIT_WORK(&nt
->link_cleanup
, ntb_transport_link_cleanup_work
);
1393 rc
= ntb_set_ctx(ndev
, nt
, &ntb_transport_ops
);
1397 INIT_LIST_HEAD(&nt
->client_devs
);
1398 rc
= ntb_bus_init(nt
);
1402 nt
->link_is_up
= false;
1403 ntb_link_enable(ndev
, NTB_SPEED_AUTO
, NTB_WIDTH_AUTO
);
1404 ntb_link_event(ndev
);
1409 ntb_clear_ctx(ndev
);
1414 mw
= &nt
->mw_vec
[i
];
1423 static void ntb_transport_free(struct ntb_client
*self
, struct ntb_dev
*ndev
)
1425 struct ntb_transport_ctx
*nt
= ndev
->ctx
;
1426 struct ntb_transport_qp
*qp
;
1427 u64 qp_bitmap_alloc
;
1430 ntb_transport_link_cleanup(nt
);
1431 cancel_work_sync(&nt
->link_cleanup
);
1432 cancel_delayed_work_sync(&nt
->link_work
);
1434 qp_bitmap_alloc
= nt
->qp_bitmap
& ~nt
->qp_bitmap_free
;
1436 /* verify that all the qp's are freed */
1437 for (i
= 0; i
< nt
->qp_count
; i
++) {
1438 qp
= &nt
->qp_vec
[i
];
1439 if (qp_bitmap_alloc
& BIT_ULL(i
))
1440 ntb_transport_free_queue(qp
);
1441 debugfs_remove_recursive(qp
->debugfs_dir
);
1444 ntb_link_disable(ndev
);
1445 ntb_clear_ctx(ndev
);
1449 for (i
= nt
->mw_count
; i
--; ) {
1451 iounmap(nt
->mw_vec
[i
].vbase
);
1459 static void ntb_complete_rxc(struct ntb_transport_qp
*qp
)
1461 struct ntb_queue_entry
*entry
;
1464 unsigned long irqflags
;
1466 spin_lock_irqsave(&qp
->ntb_rx_q_lock
, irqflags
);
1468 while (!list_empty(&qp
->rx_post_q
)) {
1469 entry
= list_first_entry(&qp
->rx_post_q
,
1470 struct ntb_queue_entry
, entry
);
1471 if (!(entry
->flags
& DESC_DONE_FLAG
))
1474 entry
->rx_hdr
->flags
= 0;
1475 iowrite32(entry
->rx_index
, &qp
->rx_info
->entry
);
1477 cb_data
= entry
->cb_data
;
1480 list_move_tail(&entry
->entry
, &qp
->rx_free_q
);
1482 spin_unlock_irqrestore(&qp
->ntb_rx_q_lock
, irqflags
);
1484 if (qp
->rx_handler
&& qp
->client_ready
)
1485 qp
->rx_handler(qp
, qp
->cb_data
, cb_data
, len
);
1487 spin_lock_irqsave(&qp
->ntb_rx_q_lock
, irqflags
);
1490 spin_unlock_irqrestore(&qp
->ntb_rx_q_lock
, irqflags
);
1493 static void ntb_rx_copy_callback(void *data
,
1494 const struct dmaengine_result
*res
)
1496 struct ntb_queue_entry
*entry
= data
;
1498 /* we need to check DMA results if we are using DMA */
1500 enum dmaengine_tx_result dma_err
= res
->result
;
1503 case DMA_TRANS_READ_FAILED
:
1504 case DMA_TRANS_WRITE_FAILED
:
1507 case DMA_TRANS_ABORTED
:
1509 struct ntb_transport_qp
*qp
= entry
->qp
;
1510 void *offset
= qp
->rx_buff
+ qp
->rx_max_frame
*
1513 ntb_memcpy_rx(entry
, offset
);
1518 case DMA_TRANS_NOERROR
:
1524 entry
->flags
|= DESC_DONE_FLAG
;
1526 ntb_complete_rxc(entry
->qp
);
1529 static void ntb_memcpy_rx(struct ntb_queue_entry
*entry
, void *offset
)
1531 void *buf
= entry
->buf
;
1532 size_t len
= entry
->len
;
1534 memcpy(buf
, offset
, len
);
1536 /* Ensure that the data is fully copied out before clearing the flag */
1539 ntb_rx_copy_callback(entry
, NULL
);
1542 static int ntb_async_rx_submit(struct ntb_queue_entry
*entry
, void *offset
)
1544 struct dma_async_tx_descriptor
*txd
;
1545 struct ntb_transport_qp
*qp
= entry
->qp
;
1546 struct dma_chan
*chan
= qp
->rx_dma_chan
;
1547 struct dma_device
*device
;
1548 size_t pay_off
, buff_off
, len
;
1549 struct dmaengine_unmap_data
*unmap
;
1550 dma_cookie_t cookie
;
1551 void *buf
= entry
->buf
;
1554 device
= chan
->device
;
1555 pay_off
= (size_t)offset
& ~PAGE_MASK
;
1556 buff_off
= (size_t)buf
& ~PAGE_MASK
;
1558 if (!is_dma_copy_aligned(device
, pay_off
, buff_off
, len
))
1561 unmap
= dmaengine_get_unmap_data(device
->dev
, 2, GFP_NOWAIT
);
1566 unmap
->addr
[0] = dma_map_page(device
->dev
, virt_to_page(offset
),
1567 pay_off
, len
, DMA_TO_DEVICE
);
1568 if (dma_mapping_error(device
->dev
, unmap
->addr
[0]))
1573 unmap
->addr
[1] = dma_map_page(device
->dev
, virt_to_page(buf
),
1574 buff_off
, len
, DMA_FROM_DEVICE
);
1575 if (dma_mapping_error(device
->dev
, unmap
->addr
[1]))
1578 unmap
->from_cnt
= 1;
1580 txd
= device
->device_prep_dma_memcpy(chan
, unmap
->addr
[1],
1581 unmap
->addr
[0], len
,
1582 DMA_PREP_INTERRUPT
);
1586 txd
->callback_result
= ntb_rx_copy_callback
;
1587 txd
->callback_param
= entry
;
1588 dma_set_unmap(txd
, unmap
);
1590 cookie
= dmaengine_submit(txd
);
1591 if (dma_submit_error(cookie
))
1594 dmaengine_unmap_put(unmap
);
1596 qp
->last_cookie
= cookie
;
1603 dmaengine_unmap_put(unmap
);
1605 dmaengine_unmap_put(unmap
);
1610 static void ntb_async_rx(struct ntb_queue_entry
*entry
, void *offset
)
1612 struct ntb_transport_qp
*qp
= entry
->qp
;
1613 struct dma_chan
*chan
= qp
->rx_dma_chan
;
1619 if (entry
->len
< copy_bytes
)
1622 res
= ntb_async_rx_submit(entry
, offset
);
1626 if (!entry
->retries
)
1632 ntb_memcpy_rx(entry
, offset
);
1636 static int ntb_process_rxc(struct ntb_transport_qp
*qp
)
1638 struct ntb_payload_header
*hdr
;
1639 struct ntb_queue_entry
*entry
;
1642 offset
= qp
->rx_buff
+ qp
->rx_max_frame
* qp
->rx_index
;
1643 hdr
= offset
+ qp
->rx_max_frame
- sizeof(struct ntb_payload_header
);
1645 dev_dbg(&qp
->ndev
->pdev
->dev
, "qp %d: RX ver %u len %d flags %x\n",
1646 qp
->qp_num
, hdr
->ver
, hdr
->len
, hdr
->flags
);
1648 if (!(hdr
->flags
& DESC_DONE_FLAG
)) {
1649 dev_dbg(&qp
->ndev
->pdev
->dev
, "done flag not set\n");
1650 qp
->rx_ring_empty
++;
1654 if (hdr
->flags
& LINK_DOWN_FLAG
) {
1655 dev_dbg(&qp
->ndev
->pdev
->dev
, "link down flag set\n");
1656 ntb_qp_link_down(qp
);
1661 if (hdr
->ver
!= (u32
)qp
->rx_pkts
) {
1662 dev_dbg(&qp
->ndev
->pdev
->dev
,
1663 "version mismatch, expected %llu - got %u\n",
1664 qp
->rx_pkts
, hdr
->ver
);
1669 entry
= ntb_list_mv(&qp
->ntb_rx_q_lock
, &qp
->rx_pend_q
, &qp
->rx_post_q
);
1671 dev_dbg(&qp
->ndev
->pdev
->dev
, "no receive buffer\n");
1672 qp
->rx_err_no_buf
++;
1676 entry
->rx_hdr
= hdr
;
1677 entry
->rx_index
= qp
->rx_index
;
1679 if (hdr
->len
> entry
->len
) {
1680 dev_dbg(&qp
->ndev
->pdev
->dev
,
1681 "receive buffer overflow! Wanted %d got %d\n",
1682 hdr
->len
, entry
->len
);
1686 entry
->flags
|= DESC_DONE_FLAG
;
1688 ntb_complete_rxc(qp
);
1690 dev_dbg(&qp
->ndev
->pdev
->dev
,
1691 "RX OK index %u ver %u size %d into buf size %d\n",
1692 qp
->rx_index
, hdr
->ver
, hdr
->len
, entry
->len
);
1694 qp
->rx_bytes
+= hdr
->len
;
1697 entry
->len
= hdr
->len
;
1699 ntb_async_rx(entry
, offset
);
1703 qp
->rx_index
%= qp
->rx_max_entry
;
1708 static void ntb_transport_rxc_db(unsigned long data
)
1710 struct ntb_transport_qp
*qp
= (void *)data
;
1713 dev_dbg(&qp
->ndev
->pdev
->dev
, "%s: doorbell %d received\n",
1714 __func__
, qp
->qp_num
);
1716 /* Limit the number of packets processed in a single interrupt to
1717 * provide fairness to others
1719 for (i
= 0; i
< qp
->rx_max_entry
; i
++) {
1720 rc
= ntb_process_rxc(qp
);
1725 if (i
&& qp
->rx_dma_chan
)
1726 dma_async_issue_pending(qp
->rx_dma_chan
);
1728 if (i
== qp
->rx_max_entry
) {
1729 /* there is more work to do */
1731 tasklet_schedule(&qp
->rxc_db_work
);
1732 } else if (ntb_db_read(qp
->ndev
) & BIT_ULL(qp
->qp_num
)) {
1733 /* the doorbell bit is set: clear it */
1734 ntb_db_clear(qp
->ndev
, BIT_ULL(qp
->qp_num
));
1735 /* ntb_db_read ensures ntb_db_clear write is committed */
1736 ntb_db_read(qp
->ndev
);
1738 /* an interrupt may have arrived between finishing
1739 * ntb_process_rxc and clearing the doorbell bit:
1740 * there might be some more work to do.
1743 tasklet_schedule(&qp
->rxc_db_work
);
1747 static void ntb_tx_copy_callback(void *data
,
1748 const struct dmaengine_result
*res
)
1750 struct ntb_queue_entry
*entry
= data
;
1751 struct ntb_transport_qp
*qp
= entry
->qp
;
1752 struct ntb_payload_header __iomem
*hdr
= entry
->tx_hdr
;
1754 /* we need to check DMA results if we are using DMA */
1756 enum dmaengine_tx_result dma_err
= res
->result
;
1759 case DMA_TRANS_READ_FAILED
:
1760 case DMA_TRANS_WRITE_FAILED
:
1763 case DMA_TRANS_ABORTED
:
1765 void __iomem
*offset
=
1766 qp
->tx_mw
+ qp
->tx_max_frame
*
1769 /* resubmit via CPU */
1770 ntb_memcpy_tx(entry
, offset
);
1775 case DMA_TRANS_NOERROR
:
1781 iowrite32(entry
->flags
| DESC_DONE_FLAG
, &hdr
->flags
);
1784 ntb_msi_peer_trigger(qp
->ndev
, PIDX
, &qp
->peer_msi_desc
);
1786 ntb_peer_db_set(qp
->ndev
, BIT_ULL(qp
->qp_num
));
1788 /* The entry length can only be zero if the packet is intended to be a
1789 * "link down" or similar. Since no payload is being sent in these
1790 * cases, there is nothing to add to the completion queue.
1792 if (entry
->len
> 0) {
1793 qp
->tx_bytes
+= entry
->len
;
1796 qp
->tx_handler(qp
, qp
->cb_data
, entry
->cb_data
,
1800 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
, &qp
->tx_free_q
);
1803 static void ntb_memcpy_tx(struct ntb_queue_entry
*entry
, void __iomem
*offset
)
1805 #ifdef ARCH_HAS_NOCACHE_UACCESS
1807 * Using non-temporal mov to improve performance on non-cached
1808 * writes, even though we aren't actually copying from user space.
1810 __copy_from_user_inatomic_nocache(offset
, entry
->buf
, entry
->len
);
1812 memcpy_toio(offset
, entry
->buf
, entry
->len
);
1815 /* Ensure that the data is fully copied out before setting the flags */
1818 ntb_tx_copy_callback(entry
, NULL
);
1821 static int ntb_async_tx_submit(struct ntb_transport_qp
*qp
,
1822 struct ntb_queue_entry
*entry
)
1824 struct dma_async_tx_descriptor
*txd
;
1825 struct dma_chan
*chan
= qp
->tx_dma_chan
;
1826 struct dma_device
*device
;
1827 size_t len
= entry
->len
;
1828 void *buf
= entry
->buf
;
1829 size_t dest_off
, buff_off
;
1830 struct dmaengine_unmap_data
*unmap
;
1832 dma_cookie_t cookie
;
1834 device
= chan
->device
;
1835 dest
= qp
->tx_mw_dma_addr
+ qp
->tx_max_frame
* entry
->tx_index
;
1836 buff_off
= (size_t)buf
& ~PAGE_MASK
;
1837 dest_off
= (size_t)dest
& ~PAGE_MASK
;
1839 if (!is_dma_copy_aligned(device
, buff_off
, dest_off
, len
))
1842 unmap
= dmaengine_get_unmap_data(device
->dev
, 1, GFP_NOWAIT
);
1847 unmap
->addr
[0] = dma_map_page(device
->dev
, virt_to_page(buf
),
1848 buff_off
, len
, DMA_TO_DEVICE
);
1849 if (dma_mapping_error(device
->dev
, unmap
->addr
[0]))
1854 txd
= device
->device_prep_dma_memcpy(chan
, dest
, unmap
->addr
[0], len
,
1855 DMA_PREP_INTERRUPT
);
1859 txd
->callback_result
= ntb_tx_copy_callback
;
1860 txd
->callback_param
= entry
;
1861 dma_set_unmap(txd
, unmap
);
1863 cookie
= dmaengine_submit(txd
);
1864 if (dma_submit_error(cookie
))
1867 dmaengine_unmap_put(unmap
);
1869 dma_async_issue_pending(chan
);
1873 dmaengine_unmap_put(unmap
);
1875 dmaengine_unmap_put(unmap
);
1880 static void ntb_async_tx(struct ntb_transport_qp
*qp
,
1881 struct ntb_queue_entry
*entry
)
1883 struct ntb_payload_header __iomem
*hdr
;
1884 struct dma_chan
*chan
= qp
->tx_dma_chan
;
1885 void __iomem
*offset
;
1888 entry
->tx_index
= qp
->tx_index
;
1889 offset
= qp
->tx_mw
+ qp
->tx_max_frame
* entry
->tx_index
;
1890 hdr
= offset
+ qp
->tx_max_frame
- sizeof(struct ntb_payload_header
);
1891 entry
->tx_hdr
= hdr
;
1893 iowrite32(entry
->len
, &hdr
->len
);
1894 iowrite32((u32
)qp
->tx_pkts
, &hdr
->ver
);
1899 if (entry
->len
< copy_bytes
)
1902 res
= ntb_async_tx_submit(qp
, entry
);
1906 if (!entry
->retries
)
1912 ntb_memcpy_tx(entry
, offset
);
1916 static int ntb_process_tx(struct ntb_transport_qp
*qp
,
1917 struct ntb_queue_entry
*entry
)
1919 if (!ntb_transport_tx_free_entry(qp
)) {
1924 if (entry
->len
> qp
->tx_max_frame
- sizeof(struct ntb_payload_header
)) {
1926 qp
->tx_handler(qp
, qp
->cb_data
, NULL
, -EIO
);
1928 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
,
1933 ntb_async_tx(qp
, entry
);
1936 qp
->tx_index
%= qp
->tx_max_entry
;
1943 static void ntb_send_link_down(struct ntb_transport_qp
*qp
)
1945 struct pci_dev
*pdev
= qp
->ndev
->pdev
;
1946 struct ntb_queue_entry
*entry
;
1949 if (!qp
->link_is_up
)
1952 dev_info(&pdev
->dev
, "qp %d: Send Link Down\n", qp
->qp_num
);
1954 for (i
= 0; i
< NTB_LINK_DOWN_TIMEOUT
; i
++) {
1955 entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
);
1964 entry
->cb_data
= NULL
;
1967 entry
->flags
= LINK_DOWN_FLAG
;
1969 rc
= ntb_process_tx(qp
, entry
);
1971 dev_err(&pdev
->dev
, "ntb: QP%d unable to send linkdown msg\n",
1974 ntb_qp_link_down_reset(qp
);
1977 static bool ntb_dma_filter_fn(struct dma_chan
*chan
, void *node
)
1979 return dev_to_node(&chan
->dev
->device
) == (int)(unsigned long)node
;
1983 * ntb_transport_create_queue - Create a new NTB transport layer queue
1984 * @data: pointer for callback data
1985 * @client_dev: &struct device pointer
1986 * @handlers: pointer to various ntb queue (callback) handlers
1988 * Create a new NTB transport layer queue and provide the queue with a callback
1989 * routine for both transmit and receive. The receive callback routine will be
1990 * used to pass up data when the transport has received it on the queue. The
1991 * transmit callback routine will be called when the transport has completed the
1992 * transmission of the data on the queue and the data is ready to be freed.
1994 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1996 struct ntb_transport_qp
*
1997 ntb_transport_create_queue(void *data
, struct device
*client_dev
,
1998 const struct ntb_queue_handlers
*handlers
)
2000 struct ntb_dev
*ndev
;
2001 struct pci_dev
*pdev
;
2002 struct ntb_transport_ctx
*nt
;
2003 struct ntb_queue_entry
*entry
;
2004 struct ntb_transport_qp
*qp
;
2006 unsigned int free_queue
;
2007 dma_cap_mask_t dma_mask
;
2011 ndev
= dev_ntb(client_dev
->parent
);
2015 node
= dev_to_node(&ndev
->dev
);
2017 free_queue
= ffs(nt
->qp_bitmap_free
);
2021 /* decrement free_queue to make it zero based */
2024 qp
= &nt
->qp_vec
[free_queue
];
2025 qp_bit
= BIT_ULL(qp
->qp_num
);
2027 nt
->qp_bitmap_free
&= ~qp_bit
;
2030 qp
->rx_handler
= handlers
->rx_handler
;
2031 qp
->tx_handler
= handlers
->tx_handler
;
2032 qp
->event_handler
= handlers
->event_handler
;
2034 dma_cap_zero(dma_mask
);
2035 dma_cap_set(DMA_MEMCPY
, dma_mask
);
2039 dma_request_channel(dma_mask
, ntb_dma_filter_fn
,
2040 (void *)(unsigned long)node
);
2041 if (!qp
->tx_dma_chan
)
2042 dev_info(&pdev
->dev
, "Unable to allocate TX DMA channel\n");
2045 dma_request_channel(dma_mask
, ntb_dma_filter_fn
,
2046 (void *)(unsigned long)node
);
2047 if (!qp
->rx_dma_chan
)
2048 dev_info(&pdev
->dev
, "Unable to allocate RX DMA channel\n");
2050 qp
->tx_dma_chan
= NULL
;
2051 qp
->rx_dma_chan
= NULL
;
2054 qp
->tx_mw_dma_addr
= 0;
2055 if (qp
->tx_dma_chan
) {
2056 qp
->tx_mw_dma_addr
=
2057 dma_map_resource(qp
->tx_dma_chan
->device
->dev
,
2058 qp
->tx_mw_phys
, qp
->tx_mw_size
,
2059 DMA_FROM_DEVICE
, 0);
2060 if (dma_mapping_error(qp
->tx_dma_chan
->device
->dev
,
2061 qp
->tx_mw_dma_addr
)) {
2062 qp
->tx_mw_dma_addr
= 0;
2067 dev_dbg(&pdev
->dev
, "Using %s memcpy for TX\n",
2068 qp
->tx_dma_chan
? "DMA" : "CPU");
2070 dev_dbg(&pdev
->dev
, "Using %s memcpy for RX\n",
2071 qp
->rx_dma_chan
? "DMA" : "CPU");
2073 for (i
= 0; i
< NTB_QP_DEF_NUM_ENTRIES
; i
++) {
2074 entry
= kzalloc_node(sizeof(*entry
), GFP_KERNEL
, node
);
2079 ntb_list_add(&qp
->ntb_rx_q_lock
, &entry
->entry
,
2082 qp
->rx_alloc_entry
= NTB_QP_DEF_NUM_ENTRIES
;
2084 for (i
= 0; i
< qp
->tx_max_entry
; i
++) {
2085 entry
= kzalloc_node(sizeof(*entry
), GFP_KERNEL
, node
);
2090 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
,
2094 ntb_db_clear(qp
->ndev
, qp_bit
);
2095 ntb_db_clear_mask(qp
->ndev
, qp_bit
);
2097 dev_info(&pdev
->dev
, "NTB Transport QP %d created\n", qp
->qp_num
);
2102 while ((entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
)))
2105 qp
->rx_alloc_entry
= 0;
2106 while ((entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_free_q
)))
2108 if (qp
->tx_mw_dma_addr
)
2109 dma_unmap_resource(qp
->tx_dma_chan
->device
->dev
,
2110 qp
->tx_mw_dma_addr
, qp
->tx_mw_size
,
2111 DMA_FROM_DEVICE
, 0);
2112 if (qp
->tx_dma_chan
)
2113 dma_release_channel(qp
->tx_dma_chan
);
2114 if (qp
->rx_dma_chan
)
2115 dma_release_channel(qp
->rx_dma_chan
);
2116 nt
->qp_bitmap_free
|= qp_bit
;
2120 EXPORT_SYMBOL_GPL(ntb_transport_create_queue
);
2123 * ntb_transport_free_queue - Frees NTB transport queue
2124 * @qp: NTB queue to be freed
2126 * Frees NTB transport queue
2128 void ntb_transport_free_queue(struct ntb_transport_qp
*qp
)
2130 struct pci_dev
*pdev
;
2131 struct ntb_queue_entry
*entry
;
2137 pdev
= qp
->ndev
->pdev
;
2141 if (qp
->tx_dma_chan
) {
2142 struct dma_chan
*chan
= qp
->tx_dma_chan
;
2143 /* Putting the dma_chan to NULL will force any new traffic to be
2144 * processed by the CPU instead of the DAM engine
2146 qp
->tx_dma_chan
= NULL
;
2148 /* Try to be nice and wait for any queued DMA engine
2149 * transactions to process before smashing it with a rock
2151 dma_sync_wait(chan
, qp
->last_cookie
);
2152 dmaengine_terminate_all(chan
);
2154 dma_unmap_resource(chan
->device
->dev
,
2155 qp
->tx_mw_dma_addr
, qp
->tx_mw_size
,
2156 DMA_FROM_DEVICE
, 0);
2158 dma_release_channel(chan
);
2161 if (qp
->rx_dma_chan
) {
2162 struct dma_chan
*chan
= qp
->rx_dma_chan
;
2163 /* Putting the dma_chan to NULL will force any new traffic to be
2164 * processed by the CPU instead of the DAM engine
2166 qp
->rx_dma_chan
= NULL
;
2168 /* Try to be nice and wait for any queued DMA engine
2169 * transactions to process before smashing it with a rock
2171 dma_sync_wait(chan
, qp
->last_cookie
);
2172 dmaengine_terminate_all(chan
);
2173 dma_release_channel(chan
);
2176 qp_bit
= BIT_ULL(qp
->qp_num
);
2178 ntb_db_set_mask(qp
->ndev
, qp_bit
);
2179 tasklet_kill(&qp
->rxc_db_work
);
2181 cancel_delayed_work_sync(&qp
->link_work
);
2184 qp
->rx_handler
= NULL
;
2185 qp
->tx_handler
= NULL
;
2186 qp
->event_handler
= NULL
;
2188 while ((entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_free_q
)))
2191 while ((entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_pend_q
))) {
2192 dev_warn(&pdev
->dev
, "Freeing item from non-empty rx_pend_q\n");
2196 while ((entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_post_q
))) {
2197 dev_warn(&pdev
->dev
, "Freeing item from non-empty rx_post_q\n");
2201 while ((entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
)))
2204 qp
->transport
->qp_bitmap_free
|= qp_bit
;
2206 dev_info(&pdev
->dev
, "NTB Transport QP %d freed\n", qp
->qp_num
);
2208 EXPORT_SYMBOL_GPL(ntb_transport_free_queue
);
2211 * ntb_transport_rx_remove - Dequeues enqueued rx packet
2212 * @qp: NTB queue to be freed
2213 * @len: pointer to variable to write enqueued buffers length
2215 * Dequeues unused buffers from receive queue. Should only be used during
2218 * RETURNS: NULL error value on error, or void* for success.
2220 void *ntb_transport_rx_remove(struct ntb_transport_qp
*qp
, unsigned int *len
)
2222 struct ntb_queue_entry
*entry
;
2225 if (!qp
|| qp
->client_ready
)
2228 entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_pend_q
);
2232 buf
= entry
->cb_data
;
2235 ntb_list_add(&qp
->ntb_rx_q_lock
, &entry
->entry
, &qp
->rx_free_q
);
2239 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove
);
2242 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
2243 * @qp: NTB transport layer queue the entry is to be enqueued on
2244 * @cb: per buffer pointer for callback function to use
2245 * @data: pointer to data buffer that incoming packets will be copied into
2246 * @len: length of the data buffer
2248 * Enqueue a new receive buffer onto the transport queue into which a NTB
2249 * payload can be received into.
2251 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2253 int ntb_transport_rx_enqueue(struct ntb_transport_qp
*qp
, void *cb
, void *data
,
2256 struct ntb_queue_entry
*entry
;
2261 entry
= ntb_list_rm(&qp
->ntb_rx_q_lock
, &qp
->rx_free_q
);
2265 entry
->cb_data
= cb
;
2271 entry
->rx_index
= 0;
2273 ntb_list_add(&qp
->ntb_rx_q_lock
, &entry
->entry
, &qp
->rx_pend_q
);
2276 tasklet_schedule(&qp
->rxc_db_work
);
2280 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue
);
2283 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
2284 * @qp: NTB transport layer queue the entry is to be enqueued on
2285 * @cb: per buffer pointer for callback function to use
2286 * @data: pointer to data buffer that will be sent
2287 * @len: length of the data buffer
2289 * Enqueue a new transmit buffer onto the transport queue from which a NTB
2290 * payload will be transmitted. This assumes that a lock is being held to
2291 * serialize access to the qp.
2293 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2295 int ntb_transport_tx_enqueue(struct ntb_transport_qp
*qp
, void *cb
, void *data
,
2298 struct ntb_queue_entry
*entry
;
2304 /* If the qp link is down already, just ignore. */
2305 if (!qp
->link_is_up
)
2308 entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
);
2310 qp
->tx_err_no_buf
++;
2314 entry
->cb_data
= cb
;
2320 entry
->tx_index
= 0;
2322 rc
= ntb_process_tx(qp
, entry
);
2324 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
,
2329 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue
);
2332 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
2333 * @qp: NTB transport layer queue to be enabled
2335 * Notify NTB transport layer of client readiness to use queue
2337 void ntb_transport_link_up(struct ntb_transport_qp
*qp
)
2342 qp
->client_ready
= true;
2344 if (qp
->transport
->link_is_up
)
2345 schedule_delayed_work(&qp
->link_work
, 0);
2347 EXPORT_SYMBOL_GPL(ntb_transport_link_up
);
2350 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
2351 * @qp: NTB transport layer queue to be disabled
2353 * Notify NTB transport layer of client's desire to no longer receive data on
2354 * transport queue specified. It is the client's responsibility to ensure all
2355 * entries on queue are purged or otherwise handled appropriately.
2357 void ntb_transport_link_down(struct ntb_transport_qp
*qp
)
2364 qp
->client_ready
= false;
2366 val
= ntb_spad_read(qp
->ndev
, QP_LINKS
);
2368 ntb_peer_spad_write(qp
->ndev
, PIDX
, QP_LINKS
, val
& ~BIT(qp
->qp_num
));
2371 ntb_send_link_down(qp
);
2373 cancel_delayed_work_sync(&qp
->link_work
);
2375 EXPORT_SYMBOL_GPL(ntb_transport_link_down
);
2378 * ntb_transport_link_query - Query transport link state
2379 * @qp: NTB transport layer queue to be queried
2381 * Query connectivity to the remote system of the NTB transport queue
2383 * RETURNS: true for link up or false for link down
2385 bool ntb_transport_link_query(struct ntb_transport_qp
*qp
)
2390 return qp
->link_is_up
;
2392 EXPORT_SYMBOL_GPL(ntb_transport_link_query
);
2395 * ntb_transport_qp_num - Query the qp number
2396 * @qp: NTB transport layer queue to be queried
2398 * Query qp number of the NTB transport queue
2400 * RETURNS: a zero based number specifying the qp number
2402 unsigned char ntb_transport_qp_num(struct ntb_transport_qp
*qp
)
2409 EXPORT_SYMBOL_GPL(ntb_transport_qp_num
);
2412 * ntb_transport_max_size - Query the max payload size of a qp
2413 * @qp: NTB transport layer queue to be queried
2415 * Query the maximum payload size permissible on the given qp
2417 * RETURNS: the max payload size of a qp
2419 unsigned int ntb_transport_max_size(struct ntb_transport_qp
*qp
)
2421 unsigned int max_size
;
2422 unsigned int copy_align
;
2423 struct dma_chan
*rx_chan
, *tx_chan
;
2428 rx_chan
= qp
->rx_dma_chan
;
2429 tx_chan
= qp
->tx_dma_chan
;
2431 copy_align
= max(rx_chan
? rx_chan
->device
->copy_align
: 0,
2432 tx_chan
? tx_chan
->device
->copy_align
: 0);
2434 /* If DMA engine usage is possible, try to find the max size for that */
2435 max_size
= qp
->tx_max_frame
- sizeof(struct ntb_payload_header
);
2436 max_size
= round_down(max_size
, 1 << copy_align
);
2440 EXPORT_SYMBOL_GPL(ntb_transport_max_size
);
2442 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp
*qp
)
2444 unsigned int head
= qp
->tx_index
;
2445 unsigned int tail
= qp
->remote_rx_info
->entry
;
2447 return tail
>= head
? tail
- head
: qp
->tx_max_entry
+ tail
- head
;
2449 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry
);
2451 static void ntb_transport_doorbell_callback(void *data
, int vector
)
2453 struct ntb_transport_ctx
*nt
= data
;
2454 struct ntb_transport_qp
*qp
;
2456 unsigned int qp_num
;
2458 if (ntb_db_read(nt
->ndev
) & nt
->msi_db_mask
) {
2459 ntb_transport_msi_peer_desc_changed(nt
);
2460 ntb_db_clear(nt
->ndev
, nt
->msi_db_mask
);
2463 db_bits
= (nt
->qp_bitmap
& ~nt
->qp_bitmap_free
&
2464 ntb_db_vector_mask(nt
->ndev
, vector
));
2467 qp_num
= __ffs(db_bits
);
2468 qp
= &nt
->qp_vec
[qp_num
];
2471 tasklet_schedule(&qp
->rxc_db_work
);
2473 db_bits
&= ~BIT_ULL(qp_num
);
2477 static const struct ntb_ctx_ops ntb_transport_ops
= {
2478 .link_event
= ntb_transport_event_callback
,
2479 .db_event
= ntb_transport_doorbell_callback
,
2482 static struct ntb_client ntb_transport_client
= {
2484 .probe
= ntb_transport_probe
,
2485 .remove
= ntb_transport_free
,
2489 static int __init
ntb_transport_init(void)
2493 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC
, NTB_TRANSPORT_VER
);
2495 if (debugfs_initialized())
2496 nt_debugfs_dir
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
2498 rc
= bus_register(&ntb_transport_bus
);
2502 rc
= ntb_register_client(&ntb_transport_client
);
2509 bus_unregister(&ntb_transport_bus
);
2511 debugfs_remove_recursive(nt_debugfs_dir
);
2514 module_init(ntb_transport_init
);
2516 static void __exit
ntb_transport_exit(void)
2518 ntb_unregister_client(&ntb_transport_client
);
2519 bus_unregister(&ntb_transport_bus
);
2520 debugfs_remove_recursive(nt_debugfs_dir
);
2522 module_exit(ntb_transport_exit
);