PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / ntb / ntb_transport.c
blob3217f394d45b106051b282f824be1413b5efa65d
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
5 * GPL LICENSE SUMMARY
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * BSD LICENSE
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Intel PCIe NTB Linux driver
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
48 #include <linux/debugfs.h>
49 #include <linux/delay.h>
50 #include <linux/dmaengine.h>
51 #include <linux/dma-mapping.h>
52 #include <linux/errno.h>
53 #include <linux/export.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 #include <linux/pci.h>
57 #include <linux/slab.h>
58 #include <linux/types.h>
59 #include <linux/ntb.h>
60 #include "ntb_hw.h"
62 #define NTB_TRANSPORT_VERSION 3
64 static unsigned int transport_mtu = 0x401E;
65 module_param(transport_mtu, uint, 0644);
66 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
68 static unsigned char max_num_clients;
69 module_param(max_num_clients, byte, 0644);
70 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
72 static unsigned int copy_bytes = 1024;
73 module_param(copy_bytes, uint, 0644);
74 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
76 struct ntb_queue_entry {
77 /* ntb_queue list reference */
78 struct list_head entry;
79 /* pointers to data to be transfered */
80 void *cb_data;
81 void *buf;
82 unsigned int len;
83 unsigned int flags;
85 struct ntb_transport_qp *qp;
86 union {
87 struct ntb_payload_header __iomem *tx_hdr;
88 struct ntb_payload_header *rx_hdr;
90 unsigned int index;
93 struct ntb_rx_info {
94 unsigned int entry;
97 struct ntb_transport_qp {
98 struct ntb_transport *transport;
99 struct ntb_device *ndev;
100 void *cb_data;
101 struct dma_chan *dma_chan;
103 bool client_ready;
104 bool qp_link;
105 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
107 struct ntb_rx_info __iomem *rx_info;
108 struct ntb_rx_info *remote_rx_info;
110 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
111 void *data, int len);
112 struct list_head tx_free_q;
113 spinlock_t ntb_tx_free_q_lock;
114 void __iomem *tx_mw;
115 dma_addr_t tx_mw_phys;
116 unsigned int tx_index;
117 unsigned int tx_max_entry;
118 unsigned int tx_max_frame;
120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
121 void *data, int len);
122 struct list_head rx_pend_q;
123 struct list_head rx_free_q;
124 spinlock_t ntb_rx_pend_q_lock;
125 spinlock_t ntb_rx_free_q_lock;
126 void *rx_buff;
127 unsigned int rx_index;
128 unsigned int rx_max_entry;
129 unsigned int rx_max_frame;
130 dma_cookie_t last_cookie;
132 void (*event_handler) (void *data, int status);
133 struct delayed_work link_work;
134 struct work_struct link_cleanup;
136 struct dentry *debugfs_dir;
137 struct dentry *debugfs_stats;
139 /* Stats */
140 u64 rx_bytes;
141 u64 rx_pkts;
142 u64 rx_ring_empty;
143 u64 rx_err_no_buf;
144 u64 rx_err_oflow;
145 u64 rx_err_ver;
146 u64 rx_memcpy;
147 u64 rx_async;
148 u64 tx_bytes;
149 u64 tx_pkts;
150 u64 tx_ring_full;
151 u64 tx_err_no_buf;
152 u64 tx_memcpy;
153 u64 tx_async;
156 struct ntb_transport_mw {
157 size_t size;
158 void *virt_addr;
159 dma_addr_t dma_addr;
162 struct ntb_transport_client_dev {
163 struct list_head entry;
164 struct device dev;
167 struct ntb_transport {
168 struct list_head entry;
169 struct list_head client_devs;
171 struct ntb_device *ndev;
172 struct ntb_transport_mw *mw;
173 struct ntb_transport_qp *qps;
174 unsigned int max_qps;
175 unsigned long qp_bitmap;
176 bool transport_link;
177 struct delayed_work link_work;
178 struct work_struct link_cleanup;
181 enum {
182 DESC_DONE_FLAG = 1 << 0,
183 LINK_DOWN_FLAG = 1 << 1,
186 struct ntb_payload_header {
187 unsigned int ver;
188 unsigned int len;
189 unsigned int flags;
192 enum {
193 VERSION = 0,
194 QP_LINKS,
195 NUM_QPS,
196 NUM_MWS,
197 MW0_SZ_HIGH,
198 MW0_SZ_LOW,
199 MW1_SZ_HIGH,
200 MW1_SZ_LOW,
201 MAX_SPAD,
204 #define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev))
205 #define NTB_QP_DEF_NUM_ENTRIES 100
206 #define NTB_LINK_DOWN_TIMEOUT 10
208 static int ntb_match_bus(struct device *dev, struct device_driver *drv)
210 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
213 static int ntb_client_probe(struct device *dev)
215 const struct ntb_client *drv = container_of(dev->driver,
216 struct ntb_client, driver);
217 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
218 int rc = -EINVAL;
220 get_device(dev);
221 if (drv && drv->probe)
222 rc = drv->probe(pdev);
223 if (rc)
224 put_device(dev);
226 return rc;
229 static int ntb_client_remove(struct device *dev)
231 const struct ntb_client *drv = container_of(dev->driver,
232 struct ntb_client, driver);
233 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
235 if (drv && drv->remove)
236 drv->remove(pdev);
238 put_device(dev);
240 return 0;
243 static struct bus_type ntb_bus_type = {
244 .name = "ntb_bus",
245 .match = ntb_match_bus,
246 .probe = ntb_client_probe,
247 .remove = ntb_client_remove,
250 static LIST_HEAD(ntb_transport_list);
252 static int ntb_bus_init(struct ntb_transport *nt)
254 if (list_empty(&ntb_transport_list)) {
255 int rc = bus_register(&ntb_bus_type);
256 if (rc)
257 return rc;
260 list_add(&nt->entry, &ntb_transport_list);
262 return 0;
265 static void ntb_bus_remove(struct ntb_transport *nt)
267 struct ntb_transport_client_dev *client_dev, *cd;
269 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
270 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
271 dev_name(&client_dev->dev));
272 list_del(&client_dev->entry);
273 device_unregister(&client_dev->dev);
276 list_del(&nt->entry);
278 if (list_empty(&ntb_transport_list))
279 bus_unregister(&ntb_bus_type);
282 static void ntb_client_release(struct device *dev)
284 struct ntb_transport_client_dev *client_dev;
285 client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
287 kfree(client_dev);
291 * ntb_unregister_client_dev - Unregister NTB client device
292 * @device_name: Name of NTB client device
294 * Unregister an NTB client device with the NTB transport layer
296 void ntb_unregister_client_dev(char *device_name)
298 struct ntb_transport_client_dev *client, *cd;
299 struct ntb_transport *nt;
301 list_for_each_entry(nt, &ntb_transport_list, entry)
302 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
303 if (!strncmp(dev_name(&client->dev), device_name,
304 strlen(device_name))) {
305 list_del(&client->entry);
306 device_unregister(&client->dev);
309 EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
312 * ntb_register_client_dev - Register NTB client device
313 * @device_name: Name of NTB client device
315 * Register an NTB client device with the NTB transport layer
317 int ntb_register_client_dev(char *device_name)
319 struct ntb_transport_client_dev *client_dev;
320 struct ntb_transport *nt;
321 int rc, i = 0;
323 if (list_empty(&ntb_transport_list))
324 return -ENODEV;
326 list_for_each_entry(nt, &ntb_transport_list, entry) {
327 struct device *dev;
329 client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
330 GFP_KERNEL);
331 if (!client_dev) {
332 rc = -ENOMEM;
333 goto err;
336 dev = &client_dev->dev;
338 /* setup and register client devices */
339 dev_set_name(dev, "%s%d", device_name, i);
340 dev->bus = &ntb_bus_type;
341 dev->release = ntb_client_release;
342 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
344 rc = device_register(dev);
345 if (rc) {
346 kfree(client_dev);
347 goto err;
350 list_add_tail(&client_dev->entry, &nt->client_devs);
351 i++;
354 return 0;
356 err:
357 ntb_unregister_client_dev(device_name);
359 return rc;
361 EXPORT_SYMBOL_GPL(ntb_register_client_dev);
364 * ntb_register_client - Register NTB client driver
365 * @drv: NTB client driver to be registered
367 * Register an NTB client driver with the NTB transport layer
369 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
371 int ntb_register_client(struct ntb_client *drv)
373 drv->driver.bus = &ntb_bus_type;
375 if (list_empty(&ntb_transport_list))
376 return -ENODEV;
378 return driver_register(&drv->driver);
380 EXPORT_SYMBOL_GPL(ntb_register_client);
383 * ntb_unregister_client - Unregister NTB client driver
384 * @drv: NTB client driver to be unregistered
386 * Unregister an NTB client driver with the NTB transport layer
388 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
390 void ntb_unregister_client(struct ntb_client *drv)
392 driver_unregister(&drv->driver);
394 EXPORT_SYMBOL_GPL(ntb_unregister_client);
396 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
397 loff_t *offp)
399 struct ntb_transport_qp *qp;
400 char *buf;
401 ssize_t ret, out_offset, out_count;
403 out_count = 1000;
405 buf = kmalloc(out_count, GFP_KERNEL);
406 if (!buf)
407 return -ENOMEM;
409 qp = filp->private_data;
410 out_offset = 0;
411 out_offset += snprintf(buf + out_offset, out_count - out_offset,
412 "NTB QP stats\n");
413 out_offset += snprintf(buf + out_offset, out_count - out_offset,
414 "rx_bytes - \t%llu\n", qp->rx_bytes);
415 out_offset += snprintf(buf + out_offset, out_count - out_offset,
416 "rx_pkts - \t%llu\n", qp->rx_pkts);
417 out_offset += snprintf(buf + out_offset, out_count - out_offset,
418 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
419 out_offset += snprintf(buf + out_offset, out_count - out_offset,
420 "rx_async - \t%llu\n", qp->rx_async);
421 out_offset += snprintf(buf + out_offset, out_count - out_offset,
422 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
423 out_offset += snprintf(buf + out_offset, out_count - out_offset,
424 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
425 out_offset += snprintf(buf + out_offset, out_count - out_offset,
426 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
427 out_offset += snprintf(buf + out_offset, out_count - out_offset,
428 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
429 out_offset += snprintf(buf + out_offset, out_count - out_offset,
430 "rx_buff - \t%p\n", qp->rx_buff);
431 out_offset += snprintf(buf + out_offset, out_count - out_offset,
432 "rx_index - \t%u\n", qp->rx_index);
433 out_offset += snprintf(buf + out_offset, out_count - out_offset,
434 "rx_max_entry - \t%u\n", qp->rx_max_entry);
436 out_offset += snprintf(buf + out_offset, out_count - out_offset,
437 "tx_bytes - \t%llu\n", qp->tx_bytes);
438 out_offset += snprintf(buf + out_offset, out_count - out_offset,
439 "tx_pkts - \t%llu\n", qp->tx_pkts);
440 out_offset += snprintf(buf + out_offset, out_count - out_offset,
441 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
442 out_offset += snprintf(buf + out_offset, out_count - out_offset,
443 "tx_async - \t%llu\n", qp->tx_async);
444 out_offset += snprintf(buf + out_offset, out_count - out_offset,
445 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
446 out_offset += snprintf(buf + out_offset, out_count - out_offset,
447 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
448 out_offset += snprintf(buf + out_offset, out_count - out_offset,
449 "tx_mw - \t%p\n", qp->tx_mw);
450 out_offset += snprintf(buf + out_offset, out_count - out_offset,
451 "tx_index - \t%u\n", qp->tx_index);
452 out_offset += snprintf(buf + out_offset, out_count - out_offset,
453 "tx_max_entry - \t%u\n", qp->tx_max_entry);
455 out_offset += snprintf(buf + out_offset, out_count - out_offset,
456 "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
457 "Up" : "Down");
458 if (out_offset > out_count)
459 out_offset = out_count;
461 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
462 kfree(buf);
463 return ret;
466 static const struct file_operations ntb_qp_debugfs_stats = {
467 .owner = THIS_MODULE,
468 .open = simple_open,
469 .read = debugfs_read,
472 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
473 struct list_head *list)
475 unsigned long flags;
477 spin_lock_irqsave(lock, flags);
478 list_add_tail(entry, list);
479 spin_unlock_irqrestore(lock, flags);
482 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
483 struct list_head *list)
485 struct ntb_queue_entry *entry;
486 unsigned long flags;
488 spin_lock_irqsave(lock, flags);
489 if (list_empty(list)) {
490 entry = NULL;
491 goto out;
493 entry = list_first_entry(list, struct ntb_queue_entry, entry);
494 list_del(&entry->entry);
495 out:
496 spin_unlock_irqrestore(lock, flags);
498 return entry;
501 static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
502 unsigned int qp_num)
504 struct ntb_transport_qp *qp = &nt->qps[qp_num];
505 unsigned int rx_size, num_qps_mw;
506 u8 mw_num, mw_max;
507 unsigned int i;
509 mw_max = ntb_max_mw(nt->ndev);
510 mw_num = QP_TO_MW(nt->ndev, qp_num);
512 WARN_ON(nt->mw[mw_num].virt_addr == NULL);
514 if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
515 num_qps_mw = nt->max_qps / mw_max + 1;
516 else
517 num_qps_mw = nt->max_qps / mw_max;
519 rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
520 qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
521 rx_size -= sizeof(struct ntb_rx_info);
523 qp->remote_rx_info = qp->rx_buff + rx_size;
525 /* Due to housekeeping, there must be atleast 2 buffs */
526 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
527 qp->rx_max_entry = rx_size / qp->rx_max_frame;
528 qp->rx_index = 0;
530 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
532 /* setup the hdr offsets with 0's */
533 for (i = 0; i < qp->rx_max_entry; i++) {
534 void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
535 sizeof(struct ntb_payload_header);
536 memset(offset, 0, sizeof(struct ntb_payload_header));
539 qp->rx_pkts = 0;
540 qp->tx_pkts = 0;
541 qp->tx_index = 0;
544 static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
546 struct ntb_transport_mw *mw = &nt->mw[num_mw];
547 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
549 if (!mw->virt_addr)
550 return;
552 dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
553 mw->virt_addr = NULL;
556 static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
558 struct ntb_transport_mw *mw = &nt->mw[num_mw];
559 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
561 /* No need to re-setup */
562 if (mw->size == ALIGN(size, 4096))
563 return 0;
565 if (mw->size != 0)
566 ntb_free_mw(nt, num_mw);
568 /* Alloc memory for receiving data. Must be 4k aligned */
569 mw->size = ALIGN(size, 4096);
571 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
572 GFP_KERNEL);
573 if (!mw->virt_addr) {
574 mw->size = 0;
575 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
576 (int) mw->size);
577 return -ENOMEM;
580 /* Notify HW the memory location of the receive buffer */
581 ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
583 return 0;
586 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
588 struct ntb_transport *nt = qp->transport;
589 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
591 if (qp->qp_link == NTB_LINK_DOWN) {
592 cancel_delayed_work_sync(&qp->link_work);
593 return;
596 if (qp->event_handler)
597 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
599 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
600 qp->qp_link = NTB_LINK_DOWN;
603 static void ntb_qp_link_cleanup_work(struct work_struct *work)
605 struct ntb_transport_qp *qp = container_of(work,
606 struct ntb_transport_qp,
607 link_cleanup);
608 struct ntb_transport *nt = qp->transport;
610 ntb_qp_link_cleanup(qp);
612 if (nt->transport_link == NTB_LINK_UP)
613 schedule_delayed_work(&qp->link_work,
614 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
617 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
619 schedule_work(&qp->link_cleanup);
622 static void ntb_transport_link_cleanup(struct ntb_transport *nt)
624 int i;
626 /* Pass along the info to any clients */
627 for (i = 0; i < nt->max_qps; i++)
628 if (!test_bit(i, &nt->qp_bitmap))
629 ntb_qp_link_cleanup(&nt->qps[i]);
631 if (nt->transport_link == NTB_LINK_DOWN)
632 cancel_delayed_work_sync(&nt->link_work);
633 else
634 nt->transport_link = NTB_LINK_DOWN;
636 /* The scratchpad registers keep the values if the remote side
637 * goes down, blast them now to give them a sane value the next
638 * time they are accessed
640 for (i = 0; i < MAX_SPAD; i++)
641 ntb_write_local_spad(nt->ndev, i, 0);
644 static void ntb_transport_link_cleanup_work(struct work_struct *work)
646 struct ntb_transport *nt = container_of(work, struct ntb_transport,
647 link_cleanup);
649 ntb_transport_link_cleanup(nt);
652 static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
654 struct ntb_transport *nt = data;
656 switch (event) {
657 case NTB_EVENT_HW_LINK_UP:
658 schedule_delayed_work(&nt->link_work, 0);
659 break;
660 case NTB_EVENT_HW_LINK_DOWN:
661 schedule_work(&nt->link_cleanup);
662 break;
663 default:
664 BUG();
668 static void ntb_transport_link_work(struct work_struct *work)
670 struct ntb_transport *nt = container_of(work, struct ntb_transport,
671 link_work.work);
672 struct ntb_device *ndev = nt->ndev;
673 struct pci_dev *pdev = ntb_query_pdev(ndev);
674 u32 val;
675 int rc, i;
677 /* send the local info, in the opposite order of the way we read it */
678 for (i = 0; i < ntb_max_mw(ndev); i++) {
679 rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
680 ntb_get_mw_size(ndev, i) >> 32);
681 if (rc) {
682 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
683 (u32)(ntb_get_mw_size(ndev, i) >> 32),
684 MW0_SZ_HIGH + (i * 2));
685 goto out;
688 rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
689 (u32) ntb_get_mw_size(ndev, i));
690 if (rc) {
691 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
692 (u32) ntb_get_mw_size(ndev, i),
693 MW0_SZ_LOW + (i * 2));
694 goto out;
698 rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
699 if (rc) {
700 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
701 ntb_max_mw(ndev), NUM_MWS);
702 goto out;
705 rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
706 if (rc) {
707 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
708 nt->max_qps, NUM_QPS);
709 goto out;
712 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
713 if (rc) {
714 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
715 NTB_TRANSPORT_VERSION, VERSION);
716 goto out;
719 /* Query the remote side for its info */
720 rc = ntb_read_remote_spad(ndev, VERSION, &val);
721 if (rc) {
722 dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
723 goto out;
726 if (val != NTB_TRANSPORT_VERSION)
727 goto out;
728 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
730 rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
731 if (rc) {
732 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
733 goto out;
736 if (val != nt->max_qps)
737 goto out;
738 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
740 rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
741 if (rc) {
742 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
743 goto out;
746 if (val != ntb_max_mw(ndev))
747 goto out;
748 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
750 for (i = 0; i < ntb_max_mw(ndev); i++) {
751 u64 val64;
753 rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
754 if (rc) {
755 dev_err(&pdev->dev, "Error reading remote spad %d\n",
756 MW0_SZ_HIGH + (i * 2));
757 goto out1;
760 val64 = (u64) val << 32;
762 rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
763 if (rc) {
764 dev_err(&pdev->dev, "Error reading remote spad %d\n",
765 MW0_SZ_LOW + (i * 2));
766 goto out1;
769 val64 |= val;
771 dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
773 rc = ntb_set_mw(nt, i, val64);
774 if (rc)
775 goto out1;
778 nt->transport_link = NTB_LINK_UP;
780 for (i = 0; i < nt->max_qps; i++) {
781 struct ntb_transport_qp *qp = &nt->qps[i];
783 ntb_transport_setup_qp_mw(nt, i);
785 if (qp->client_ready == NTB_LINK_UP)
786 schedule_delayed_work(&qp->link_work, 0);
789 return;
791 out1:
792 for (i = 0; i < ntb_max_mw(ndev); i++)
793 ntb_free_mw(nt, i);
794 out:
795 if (ntb_hw_link_status(ndev))
796 schedule_delayed_work(&nt->link_work,
797 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
800 static void ntb_qp_link_work(struct work_struct *work)
802 struct ntb_transport_qp *qp = container_of(work,
803 struct ntb_transport_qp,
804 link_work.work);
805 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
806 struct ntb_transport *nt = qp->transport;
807 int rc, val;
809 WARN_ON(nt->transport_link != NTB_LINK_UP);
811 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
812 if (rc) {
813 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
814 return;
817 rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
818 if (rc)
819 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
820 val | 1 << qp->qp_num, QP_LINKS);
822 /* query remote spad for qp ready bits */
823 rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
824 if (rc)
825 dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
827 dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
829 /* See if the remote side is up */
830 if (1 << qp->qp_num & val) {
831 qp->qp_link = NTB_LINK_UP;
833 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
834 if (qp->event_handler)
835 qp->event_handler(qp->cb_data, NTB_LINK_UP);
836 } else if (nt->transport_link == NTB_LINK_UP)
837 schedule_delayed_work(&qp->link_work,
838 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
841 static int ntb_transport_init_queue(struct ntb_transport *nt,
842 unsigned int qp_num)
844 struct ntb_transport_qp *qp;
845 unsigned int num_qps_mw, tx_size;
846 u8 mw_num, mw_max;
847 u64 qp_offset;
849 mw_max = ntb_max_mw(nt->ndev);
850 mw_num = QP_TO_MW(nt->ndev, qp_num);
852 qp = &nt->qps[qp_num];
853 qp->qp_num = qp_num;
854 qp->transport = nt;
855 qp->ndev = nt->ndev;
856 qp->qp_link = NTB_LINK_DOWN;
857 qp->client_ready = NTB_LINK_DOWN;
858 qp->event_handler = NULL;
860 if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
861 num_qps_mw = nt->max_qps / mw_max + 1;
862 else
863 num_qps_mw = nt->max_qps / mw_max;
865 tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
866 qp_offset = qp_num / mw_max * tx_size;
867 qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
868 if (!qp->tx_mw)
869 return -EINVAL;
871 qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
872 if (!qp->tx_mw_phys)
873 return -EINVAL;
875 tx_size -= sizeof(struct ntb_rx_info);
876 qp->rx_info = qp->tx_mw + tx_size;
878 /* Due to housekeeping, there must be atleast 2 buffs */
879 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
880 qp->tx_max_entry = tx_size / qp->tx_max_frame;
882 if (ntb_query_debugfs(nt->ndev)) {
883 char debugfs_name[4];
885 snprintf(debugfs_name, 4, "qp%d", qp_num);
886 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
887 ntb_query_debugfs(nt->ndev));
889 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
890 qp->debugfs_dir, qp,
891 &ntb_qp_debugfs_stats);
894 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
895 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
897 spin_lock_init(&qp->ntb_rx_pend_q_lock);
898 spin_lock_init(&qp->ntb_rx_free_q_lock);
899 spin_lock_init(&qp->ntb_tx_free_q_lock);
901 INIT_LIST_HEAD(&qp->rx_pend_q);
902 INIT_LIST_HEAD(&qp->rx_free_q);
903 INIT_LIST_HEAD(&qp->tx_free_q);
905 return 0;
908 int ntb_transport_init(struct pci_dev *pdev)
910 struct ntb_transport *nt;
911 int rc, i;
913 nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
914 if (!nt)
915 return -ENOMEM;
917 nt->ndev = ntb_register_transport(pdev, nt);
918 if (!nt->ndev) {
919 rc = -EIO;
920 goto err;
923 nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
924 GFP_KERNEL);
925 if (!nt->mw) {
926 rc = -ENOMEM;
927 goto err1;
930 if (max_num_clients)
931 nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
932 else
933 nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
935 nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
936 GFP_KERNEL);
937 if (!nt->qps) {
938 rc = -ENOMEM;
939 goto err2;
942 nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
944 for (i = 0; i < nt->max_qps; i++) {
945 rc = ntb_transport_init_queue(nt, i);
946 if (rc)
947 goto err3;
950 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
951 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
953 rc = ntb_register_event_callback(nt->ndev,
954 ntb_transport_event_callback);
955 if (rc)
956 goto err3;
958 INIT_LIST_HEAD(&nt->client_devs);
959 rc = ntb_bus_init(nt);
960 if (rc)
961 goto err4;
963 if (ntb_hw_link_status(nt->ndev))
964 schedule_delayed_work(&nt->link_work, 0);
966 return 0;
968 err4:
969 ntb_unregister_event_callback(nt->ndev);
970 err3:
971 kfree(nt->qps);
972 err2:
973 kfree(nt->mw);
974 err1:
975 ntb_unregister_transport(nt->ndev);
976 err:
977 kfree(nt);
978 return rc;
981 void ntb_transport_free(void *transport)
983 struct ntb_transport *nt = transport;
984 struct ntb_device *ndev = nt->ndev;
985 int i;
987 ntb_transport_link_cleanup(nt);
989 /* verify that all the qp's are freed */
990 for (i = 0; i < nt->max_qps; i++) {
991 if (!test_bit(i, &nt->qp_bitmap))
992 ntb_transport_free_queue(&nt->qps[i]);
993 debugfs_remove_recursive(nt->qps[i].debugfs_dir);
996 ntb_bus_remove(nt);
998 cancel_delayed_work_sync(&nt->link_work);
1000 ntb_unregister_event_callback(ndev);
1002 for (i = 0; i < ntb_max_mw(ndev); i++)
1003 ntb_free_mw(nt, i);
1005 kfree(nt->qps);
1006 kfree(nt->mw);
1007 ntb_unregister_transport(ndev);
1008 kfree(nt);
1011 static void ntb_rx_copy_callback(void *data)
1013 struct ntb_queue_entry *entry = data;
1014 struct ntb_transport_qp *qp = entry->qp;
1015 void *cb_data = entry->cb_data;
1016 unsigned int len = entry->len;
1017 struct ntb_payload_header *hdr = entry->rx_hdr;
1019 /* Ensure that the data is fully copied out before clearing the flag */
1020 wmb();
1021 hdr->flags = 0;
1023 iowrite32(entry->index, &qp->rx_info->entry);
1025 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1027 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
1028 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1031 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1033 void *buf = entry->buf;
1034 size_t len = entry->len;
1036 memcpy(buf, offset, len);
1038 ntb_rx_copy_callback(entry);
1041 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1042 size_t len)
1044 struct dma_async_tx_descriptor *txd;
1045 struct ntb_transport_qp *qp = entry->qp;
1046 struct dma_chan *chan = qp->dma_chan;
1047 struct dma_device *device;
1048 size_t pay_off, buff_off;
1049 struct dmaengine_unmap_data *unmap;
1050 dma_cookie_t cookie;
1051 void *buf = entry->buf;
1053 entry->len = len;
1055 if (!chan)
1056 goto err;
1058 if (len < copy_bytes)
1059 goto err_wait;
1061 device = chan->device;
1062 pay_off = (size_t) offset & ~PAGE_MASK;
1063 buff_off = (size_t) buf & ~PAGE_MASK;
1065 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1066 goto err_wait;
1068 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1069 if (!unmap)
1070 goto err_wait;
1072 unmap->len = len;
1073 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1074 pay_off, len, DMA_TO_DEVICE);
1075 if (dma_mapping_error(device->dev, unmap->addr[0]))
1076 goto err_get_unmap;
1078 unmap->to_cnt = 1;
1080 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1081 buff_off, len, DMA_FROM_DEVICE);
1082 if (dma_mapping_error(device->dev, unmap->addr[1]))
1083 goto err_get_unmap;
1085 unmap->from_cnt = 1;
1087 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1088 unmap->addr[0], len,
1089 DMA_PREP_INTERRUPT);
1090 if (!txd)
1091 goto err_get_unmap;
1093 txd->callback = ntb_rx_copy_callback;
1094 txd->callback_param = entry;
1095 dma_set_unmap(txd, unmap);
1097 cookie = dmaengine_submit(txd);
1098 if (dma_submit_error(cookie))
1099 goto err_set_unmap;
1101 dmaengine_unmap_put(unmap);
1103 qp->last_cookie = cookie;
1105 qp->rx_async++;
1107 return;
1109 err_set_unmap:
1110 dmaengine_unmap_put(unmap);
1111 err_get_unmap:
1112 dmaengine_unmap_put(unmap);
1113 err_wait:
1114 /* If the callbacks come out of order, the writing of the index to the
1115 * last completed will be out of order. This may result in the
1116 * receive stalling forever.
1118 dma_sync_wait(chan, qp->last_cookie);
1119 err:
1120 ntb_memcpy_rx(entry, offset);
1121 qp->rx_memcpy++;
1124 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1126 struct ntb_payload_header *hdr;
1127 struct ntb_queue_entry *entry;
1128 void *offset;
1130 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1131 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1133 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1134 if (!entry) {
1135 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1136 "no buffer - HDR ver %u, len %d, flags %x\n",
1137 hdr->ver, hdr->len, hdr->flags);
1138 qp->rx_err_no_buf++;
1139 return -ENOMEM;
1142 if (!(hdr->flags & DESC_DONE_FLAG)) {
1143 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1144 &qp->rx_pend_q);
1145 qp->rx_ring_empty++;
1146 return -EAGAIN;
1149 if (hdr->ver != (u32) qp->rx_pkts) {
1150 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1151 "qp %d: version mismatch, expected %llu - got %u\n",
1152 qp->qp_num, qp->rx_pkts, hdr->ver);
1153 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1154 &qp->rx_pend_q);
1155 qp->rx_err_ver++;
1156 return -EIO;
1159 if (hdr->flags & LINK_DOWN_FLAG) {
1160 ntb_qp_link_down(qp);
1162 goto err;
1165 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1166 "rx offset %u, ver %u - %d payload received, buf size %d\n",
1167 qp->rx_index, hdr->ver, hdr->len, entry->len);
1169 qp->rx_bytes += hdr->len;
1170 qp->rx_pkts++;
1172 if (hdr->len > entry->len) {
1173 qp->rx_err_oflow++;
1174 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1175 "RX overflow! Wanted %d got %d\n",
1176 hdr->len, entry->len);
1178 goto err;
1181 entry->index = qp->rx_index;
1182 entry->rx_hdr = hdr;
1184 ntb_async_rx(entry, offset, hdr->len);
1186 out:
1187 qp->rx_index++;
1188 qp->rx_index %= qp->rx_max_entry;
1190 return 0;
1192 err:
1193 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1194 &qp->rx_pend_q);
1195 /* Ensure that the data is fully copied out before clearing the flag */
1196 wmb();
1197 hdr->flags = 0;
1198 iowrite32(qp->rx_index, &qp->rx_info->entry);
1200 goto out;
1203 static int ntb_transport_rxc_db(void *data, int db_num)
1205 struct ntb_transport_qp *qp = data;
1206 int rc, i;
1208 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1209 __func__, db_num);
1211 /* Limit the number of packets processed in a single interrupt to
1212 * provide fairness to others
1214 for (i = 0; i < qp->rx_max_entry; i++) {
1215 rc = ntb_process_rxc(qp);
1216 if (rc)
1217 break;
1220 if (qp->dma_chan)
1221 dma_async_issue_pending(qp->dma_chan);
1223 return i;
1226 static void ntb_tx_copy_callback(void *data)
1228 struct ntb_queue_entry *entry = data;
1229 struct ntb_transport_qp *qp = entry->qp;
1230 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1232 /* Ensure that the data is fully copied out before setting the flags */
1233 wmb();
1234 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1236 ntb_ring_doorbell(qp->ndev, qp->qp_num);
1238 /* The entry length can only be zero if the packet is intended to be a
1239 * "link down" or similar. Since no payload is being sent in these
1240 * cases, there is nothing to add to the completion queue.
1242 if (entry->len > 0) {
1243 qp->tx_bytes += entry->len;
1245 if (qp->tx_handler)
1246 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1247 entry->len);
1250 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1253 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1255 memcpy_toio(offset, entry->buf, entry->len);
1257 ntb_tx_copy_callback(entry);
1260 static void ntb_async_tx(struct ntb_transport_qp *qp,
1261 struct ntb_queue_entry *entry)
1263 struct ntb_payload_header __iomem *hdr;
1264 struct dma_async_tx_descriptor *txd;
1265 struct dma_chan *chan = qp->dma_chan;
1266 struct dma_device *device;
1267 size_t dest_off, buff_off;
1268 struct dmaengine_unmap_data *unmap;
1269 dma_addr_t dest;
1270 dma_cookie_t cookie;
1271 void __iomem *offset;
1272 size_t len = entry->len;
1273 void *buf = entry->buf;
1275 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1276 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1277 entry->tx_hdr = hdr;
1279 iowrite32(entry->len, &hdr->len);
1280 iowrite32((u32) qp->tx_pkts, &hdr->ver);
1282 if (!chan)
1283 goto err;
1285 if (len < copy_bytes)
1286 goto err;
1288 device = chan->device;
1289 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1290 buff_off = (size_t) buf & ~PAGE_MASK;
1291 dest_off = (size_t) dest & ~PAGE_MASK;
1293 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1294 goto err;
1296 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1297 if (!unmap)
1298 goto err;
1300 unmap->len = len;
1301 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1302 buff_off, len, DMA_TO_DEVICE);
1303 if (dma_mapping_error(device->dev, unmap->addr[0]))
1304 goto err_get_unmap;
1306 unmap->to_cnt = 1;
1308 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1309 DMA_PREP_INTERRUPT);
1310 if (!txd)
1311 goto err_get_unmap;
1313 txd->callback = ntb_tx_copy_callback;
1314 txd->callback_param = entry;
1315 dma_set_unmap(txd, unmap);
1317 cookie = dmaengine_submit(txd);
1318 if (dma_submit_error(cookie))
1319 goto err_set_unmap;
1321 dmaengine_unmap_put(unmap);
1323 dma_async_issue_pending(chan);
1324 qp->tx_async++;
1326 return;
1327 err_set_unmap:
1328 dmaengine_unmap_put(unmap);
1329 err_get_unmap:
1330 dmaengine_unmap_put(unmap);
1331 err:
1332 ntb_memcpy_tx(entry, offset);
1333 qp->tx_memcpy++;
1336 static int ntb_process_tx(struct ntb_transport_qp *qp,
1337 struct ntb_queue_entry *entry)
1339 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
1340 qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
1341 entry->buf);
1342 if (qp->tx_index == qp->remote_rx_info->entry) {
1343 qp->tx_ring_full++;
1344 return -EAGAIN;
1347 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1348 if (qp->tx_handler)
1349 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1351 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1352 &qp->tx_free_q);
1353 return 0;
1356 ntb_async_tx(qp, entry);
1358 qp->tx_index++;
1359 qp->tx_index %= qp->tx_max_entry;
1361 qp->tx_pkts++;
1363 return 0;
1366 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1368 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1369 struct ntb_queue_entry *entry;
1370 int i, rc;
1372 if (qp->qp_link == NTB_LINK_DOWN)
1373 return;
1375 qp->qp_link = NTB_LINK_DOWN;
1376 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1378 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1379 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1380 if (entry)
1381 break;
1382 msleep(100);
1385 if (!entry)
1386 return;
1388 entry->cb_data = NULL;
1389 entry->buf = NULL;
1390 entry->len = 0;
1391 entry->flags = LINK_DOWN_FLAG;
1393 rc = ntb_process_tx(qp, entry);
1394 if (rc)
1395 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1396 qp->qp_num);
1400 * ntb_transport_create_queue - Create a new NTB transport layer queue
1401 * @rx_handler: receive callback function
1402 * @tx_handler: transmit callback function
1403 * @event_handler: event callback function
1405 * Create a new NTB transport layer queue and provide the queue with a callback
1406 * routine for both transmit and receive. The receive callback routine will be
1407 * used to pass up data when the transport has received it on the queue. The
1408 * transmit callback routine will be called when the transport has completed the
1409 * transmission of the data on the queue and the data is ready to be freed.
1411 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1413 struct ntb_transport_qp *
1414 ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1415 const struct ntb_queue_handlers *handlers)
1417 struct ntb_queue_entry *entry;
1418 struct ntb_transport_qp *qp;
1419 struct ntb_transport *nt;
1420 unsigned int free_queue;
1421 int rc, i;
1423 nt = ntb_find_transport(pdev);
1424 if (!nt)
1425 goto err;
1427 free_queue = ffs(nt->qp_bitmap);
1428 if (!free_queue)
1429 goto err;
1431 /* decrement free_queue to make it zero based */
1432 free_queue--;
1434 clear_bit(free_queue, &nt->qp_bitmap);
1436 qp = &nt->qps[free_queue];
1437 qp->cb_data = data;
1438 qp->rx_handler = handlers->rx_handler;
1439 qp->tx_handler = handlers->tx_handler;
1440 qp->event_handler = handlers->event_handler;
1442 dmaengine_get();
1443 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1444 if (!qp->dma_chan) {
1445 dmaengine_put();
1446 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1449 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1450 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1451 if (!entry)
1452 goto err1;
1454 entry->qp = qp;
1455 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1456 &qp->rx_free_q);
1459 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1460 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1461 if (!entry)
1462 goto err2;
1464 entry->qp = qp;
1465 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1466 &qp->tx_free_q);
1469 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1470 ntb_transport_rxc_db);
1471 if (rc)
1472 goto err2;
1474 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1476 return qp;
1478 err2:
1479 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1480 kfree(entry);
1481 err1:
1482 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1483 kfree(entry);
1484 if (qp->dma_chan)
1485 dmaengine_put();
1486 set_bit(free_queue, &nt->qp_bitmap);
1487 err:
1488 return NULL;
1490 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1493 * ntb_transport_free_queue - Frees NTB transport queue
1494 * @qp: NTB queue to be freed
1496 * Frees NTB transport queue
1498 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1500 struct pci_dev *pdev;
1501 struct ntb_queue_entry *entry;
1503 if (!qp)
1504 return;
1506 pdev = ntb_query_pdev(qp->ndev);
1508 if (qp->dma_chan) {
1509 struct dma_chan *chan = qp->dma_chan;
1510 /* Putting the dma_chan to NULL will force any new traffic to be
1511 * processed by the CPU instead of the DAM engine
1513 qp->dma_chan = NULL;
1515 /* Try to be nice and wait for any queued DMA engine
1516 * transactions to process before smashing it with a rock
1518 dma_sync_wait(chan, qp->last_cookie);
1519 dmaengine_terminate_all(chan);
1520 dmaengine_put();
1523 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1525 cancel_delayed_work_sync(&qp->link_work);
1527 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1528 kfree(entry);
1530 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1531 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1532 kfree(entry);
1535 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1536 kfree(entry);
1538 set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1540 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1542 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1545 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1546 * @qp: NTB queue to be freed
1547 * @len: pointer to variable to write enqueued buffers length
1549 * Dequeues unused buffers from receive queue. Should only be used during
1550 * shutdown of qp.
1552 * RETURNS: NULL error value on error, or void* for success.
1554 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1556 struct ntb_queue_entry *entry;
1557 void *buf;
1559 if (!qp || qp->client_ready == NTB_LINK_UP)
1560 return NULL;
1562 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1563 if (!entry)
1564 return NULL;
1566 buf = entry->cb_data;
1567 *len = entry->len;
1569 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1571 return buf;
1573 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1576 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1577 * @qp: NTB transport layer queue the entry is to be enqueued on
1578 * @cb: per buffer pointer for callback function to use
1579 * @data: pointer to data buffer that incoming packets will be copied into
1580 * @len: length of the data buffer
1582 * Enqueue a new receive buffer onto the transport queue into which a NTB
1583 * payload can be received into.
1585 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1587 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1588 unsigned int len)
1590 struct ntb_queue_entry *entry;
1592 if (!qp)
1593 return -EINVAL;
1595 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1596 if (!entry)
1597 return -ENOMEM;
1599 entry->cb_data = cb;
1600 entry->buf = data;
1601 entry->len = len;
1603 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1605 return 0;
1607 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1610 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1611 * @qp: NTB transport layer queue the entry is to be enqueued on
1612 * @cb: per buffer pointer for callback function to use
1613 * @data: pointer to data buffer that will be sent
1614 * @len: length of the data buffer
1616 * Enqueue a new transmit buffer onto the transport queue from which a NTB
1617 * payload will be transmitted. This assumes that a lock is being held to
1618 * serialize access to the qp.
1620 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1622 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1623 unsigned int len)
1625 struct ntb_queue_entry *entry;
1626 int rc;
1628 if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1629 return -EINVAL;
1631 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1632 if (!entry) {
1633 qp->tx_err_no_buf++;
1634 return -ENOMEM;
1637 entry->cb_data = cb;
1638 entry->buf = data;
1639 entry->len = len;
1640 entry->flags = 0;
1642 rc = ntb_process_tx(qp, entry);
1643 if (rc)
1644 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1645 &qp->tx_free_q);
1647 return rc;
1649 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1652 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1653 * @qp: NTB transport layer queue to be enabled
1655 * Notify NTB transport layer of client readiness to use queue
1657 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1659 if (!qp)
1660 return;
1662 qp->client_ready = NTB_LINK_UP;
1664 if (qp->transport->transport_link == NTB_LINK_UP)
1665 schedule_delayed_work(&qp->link_work, 0);
1667 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1670 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1671 * @qp: NTB transport layer queue to be disabled
1673 * Notify NTB transport layer of client's desire to no longer receive data on
1674 * transport queue specified. It is the client's responsibility to ensure all
1675 * entries on queue are purged or otherwise handled appropriately.
1677 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1679 struct pci_dev *pdev;
1680 int rc, val;
1682 if (!qp)
1683 return;
1685 pdev = ntb_query_pdev(qp->ndev);
1686 qp->client_ready = NTB_LINK_DOWN;
1688 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1689 if (rc) {
1690 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1691 return;
1694 rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1695 val & ~(1 << qp->qp_num));
1696 if (rc)
1697 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1698 val & ~(1 << qp->qp_num), QP_LINKS);
1700 if (qp->qp_link == NTB_LINK_UP)
1701 ntb_send_link_down(qp);
1702 else
1703 cancel_delayed_work_sync(&qp->link_work);
1705 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1708 * ntb_transport_link_query - Query transport link state
1709 * @qp: NTB transport layer queue to be queried
1711 * Query connectivity to the remote system of the NTB transport queue
1713 * RETURNS: true for link up or false for link down
1715 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1717 if (!qp)
1718 return false;
1720 return qp->qp_link == NTB_LINK_UP;
1722 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1725 * ntb_transport_qp_num - Query the qp number
1726 * @qp: NTB transport layer queue to be queried
1728 * Query qp number of the NTB transport queue
1730 * RETURNS: a zero based number specifying the qp number
1732 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1734 if (!qp)
1735 return 0;
1737 return qp->qp_num;
1739 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1742 * ntb_transport_max_size - Query the max payload size of a qp
1743 * @qp: NTB transport layer queue to be queried
1745 * Query the maximum payload size permissible on the given qp
1747 * RETURNS: the max payload size of a qp
1749 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1751 unsigned int max;
1753 if (!qp)
1754 return 0;
1756 if (!qp->dma_chan)
1757 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1759 /* If DMA engine usage is possible, try to find the max size for that */
1760 max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1761 max -= max % (1 << qp->dma_chan->device->copy_align);
1763 return max;
1765 EXPORT_SYMBOL_GPL(ntb_transport_max_size);