2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2015 Intel Corporation. All rights reserved.
8 * Copyright(c) 2017 T-Platforms. All Rights Reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
16 * Copyright(c) 2015 Intel Corporation. All rights reserved.
17 * Copyright(c) 2017 T-Platforms. All Rights Reserved.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * PCIe NTB Perf Linux driver
49 * How to use this tool, by example.
51 * Assuming $DBG_DIR is something like:
52 * '/sys/kernel/debug/ntb_perf/0000:00:03.0'
53 * Suppose aside from local device there is at least one remote device
54 * connected to NTB with index 0.
55 *-----------------------------------------------------------------------------
56 * Eg: install driver with specified chunk/total orders and dma-enabled flag
58 * root@self# insmod ntb_perf.ko chunk_order=19 total_order=28 use_dma
59 *-----------------------------------------------------------------------------
60 * Eg: check NTB ports (index) and MW mapping information
62 * root@self# cat $DBG_DIR/info
63 *-----------------------------------------------------------------------------
64 * Eg: start performance test with peer (index 0) and get the test metrics
66 * root@self# echo 0 > $DBG_DIR/run
67 * root@self# cat $DBG_DIR/run
70 #include <linux/init.h>
71 #include <linux/kernel.h>
72 #include <linux/module.h>
73 #include <linux/sched.h>
74 #include <linux/wait.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/dmaengine.h>
77 #include <linux/pci.h>
78 #include <linux/ktime.h>
79 #include <linux/slab.h>
80 #include <linux/delay.h>
81 #include <linux/sizes.h>
82 #include <linux/workqueue.h>
83 #include <linux/debugfs.h>
84 #include <linux/random.h>
85 #include <linux/ntb.h>
87 #define DRIVER_NAME "ntb_perf"
88 #define DRIVER_VERSION "2.0"
90 MODULE_LICENSE("Dual BSD/GPL");
91 MODULE_VERSION(DRIVER_VERSION
);
92 MODULE_AUTHOR("Dave Jiang <dave.jiang@intel.com>");
93 MODULE_DESCRIPTION("PCIe NTB Performance Measurement Tool");
95 #define MAX_THREADS_CNT 32
96 #define DEF_THREADS_CNT 1
97 #define MAX_CHUNK_SIZE SZ_1M
98 #define MAX_CHUNK_ORDER 20 /* no larger than 1M */
100 #define DMA_TRIES 100
101 #define DMA_MDELAY 10
103 #define MSG_TRIES 1000
104 #define MSG_UDELAY_LOW 1000000
105 #define MSG_UDELAY_HIGH 2000000
107 #define PERF_BUF_LEN 1024
109 static unsigned long max_mw_size
;
110 module_param(max_mw_size
, ulong
, 0644);
111 MODULE_PARM_DESC(max_mw_size
, "Upper limit of memory window size");
113 static unsigned char chunk_order
= 19; /* 512K */
114 module_param(chunk_order
, byte
, 0644);
115 MODULE_PARM_DESC(chunk_order
, "Data chunk order [2^n] to transfer");
117 static unsigned char total_order
= 30; /* 1G */
118 module_param(total_order
, byte
, 0644);
119 MODULE_PARM_DESC(total_order
, "Total data order [2^n] to transfer");
121 static bool use_dma
; /* default to 0 */
122 module_param(use_dma
, bool, 0644);
123 MODULE_PARM_DESC(use_dma
, "Use DMA engine to measure performance");
125 /*==============================================================================
126 * Perf driver data definition
127 *==============================================================================
131 PERF_CMD_INVAL
= -1,/* invalid spad command */
132 PERF_CMD_SSIZE
= 0, /* send out buffer size */
133 PERF_CMD_RSIZE
= 1, /* recv in buffer size */
134 PERF_CMD_SXLAT
= 2, /* send in buffer xlat */
135 PERF_CMD_RXLAT
= 3, /* recv out buffer xlat */
136 PERF_CMD_CLEAR
= 4, /* clear allocated memory */
137 PERF_STS_DONE
= 5, /* init is done */
138 PERF_STS_LNKUP
= 6, /* link up state flag */
144 struct perf_ctx
*perf
;
148 /* Outbound MW params */
150 resource_size_t outbuf_size
;
151 void __iomem
*outbuf
;
152 phys_addr_t out_phys_addr
;
153 dma_addr_t dma_dst_addr
;
154 /* Inbound MW params */
155 dma_addr_t inbuf_xlat
;
156 resource_size_t inbuf_size
;
159 /* NTB connection setup service */
160 struct work_struct service
;
163 struct completion init_comp
;
165 #define to_peer_service(__work) \
166 container_of(__work, struct perf_peer, service)
169 struct perf_ctx
*perf
;
172 /* DMA-based test sync parameters */
174 wait_queue_head_t dma_wait
;
175 struct dma_chan
*dma_chan
;
177 /* Data source and measured statistics */
182 struct work_struct work
;
184 #define to_thread_work(__work) \
185 container_of(__work, struct perf_thread, work)
190 /* Global device index and peers descriptors */
193 struct perf_peer
*peers
;
195 /* Performance measuring work-threads interface */
196 unsigned long busy_flag
;
197 wait_queue_head_t twait
;
200 struct perf_peer
*test_peer
;
201 struct perf_thread threads
[MAX_THREADS_CNT
];
203 /* Scratchpad/Message IO operations */
204 int (*cmd_send
)(struct perf_peer
*peer
, enum perf_cmd cmd
, u64 data
);
205 int (*cmd_recv
)(struct perf_ctx
*perf
, int *pidx
, enum perf_cmd
*cmd
,
208 struct dentry
*dbgfs_dir
;
212 * Scratchpads-base commands interface
214 #define PERF_SPAD_CNT(_pcnt) \
216 #define PERF_SPAD_CMD(_gidx) \
218 #define PERF_SPAD_LDATA(_gidx) \
220 #define PERF_SPAD_HDATA(_gidx) \
222 #define PERF_SPAD_NOTIFY(_gidx) \
226 * Messages-base commands interface
228 #define PERF_MSG_CNT 3
229 #define PERF_MSG_CMD 0
230 #define PERF_MSG_LDATA 1
231 #define PERF_MSG_HDATA 2
233 /*==============================================================================
234 * Static data declarations
235 *==============================================================================
238 static struct dentry
*perf_dbgfs_topdir
;
240 static struct workqueue_struct
*perf_wq __read_mostly
;
242 /*==============================================================================
243 * NTB cross-link commands execution service
244 *==============================================================================
247 static void perf_terminate_test(struct perf_ctx
*perf
);
249 static inline bool perf_link_is_up(struct perf_peer
*peer
)
253 link
= ntb_link_is_up(peer
->perf
->ntb
, NULL
, NULL
);
254 return !!(link
& BIT_ULL_MASK(peer
->pidx
));
257 static int perf_spad_cmd_send(struct perf_peer
*peer
, enum perf_cmd cmd
,
260 struct perf_ctx
*perf
= peer
->perf
;
264 dev_dbg(&perf
->ntb
->dev
, "CMD send: %d 0x%llx\n", cmd
, data
);
267 * Perform predefined number of attempts before give up.
268 * We are sending the data to the port specific scratchpad, so
269 * to prevent a multi-port access race-condition. Additionally
270 * there is no need in local locking since only thread-safe
271 * service work is using this method.
273 for (try = 0; try < MSG_TRIES
; try++) {
274 if (!perf_link_is_up(peer
))
277 sts
= ntb_peer_spad_read(perf
->ntb
, peer
->pidx
,
278 PERF_SPAD_CMD(perf
->gidx
));
279 if (sts
!= PERF_CMD_INVAL
) {
280 usleep_range(MSG_UDELAY_LOW
, MSG_UDELAY_HIGH
);
284 ntb_peer_spad_write(perf
->ntb
, peer
->pidx
,
285 PERF_SPAD_LDATA(perf
->gidx
),
286 lower_32_bits(data
));
287 ntb_peer_spad_write(perf
->ntb
, peer
->pidx
,
288 PERF_SPAD_HDATA(perf
->gidx
),
289 upper_32_bits(data
));
290 ntb_peer_spad_write(perf
->ntb
, peer
->pidx
,
291 PERF_SPAD_CMD(perf
->gidx
),
293 ntb_peer_db_set(perf
->ntb
, PERF_SPAD_NOTIFY(peer
->gidx
));
295 dev_dbg(&perf
->ntb
->dev
, "DB ring peer %#llx\n",
296 PERF_SPAD_NOTIFY(peer
->gidx
));
301 return try < MSG_TRIES
? 0 : -EAGAIN
;
304 static int perf_spad_cmd_recv(struct perf_ctx
*perf
, int *pidx
,
305 enum perf_cmd
*cmd
, u64
*data
)
307 struct perf_peer
*peer
;
310 ntb_db_clear(perf
->ntb
, PERF_SPAD_NOTIFY(perf
->gidx
));
313 * We start scanning all over, since cleared DB may have been set
314 * by any peer. Yes, it makes peer with smaller index being
315 * serviced with greater priority, but it's convenient for spad
316 * and message code unification and simplicity.
318 for (*pidx
= 0; *pidx
< perf
->pcnt
; (*pidx
)++) {
319 peer
= &perf
->peers
[*pidx
];
321 if (!perf_link_is_up(peer
))
324 val
= ntb_spad_read(perf
->ntb
, PERF_SPAD_CMD(peer
->gidx
));
325 if (val
== PERF_CMD_INVAL
)
330 val
= ntb_spad_read(perf
->ntb
, PERF_SPAD_LDATA(peer
->gidx
));
333 val
= ntb_spad_read(perf
->ntb
, PERF_SPAD_HDATA(peer
->gidx
));
334 *data
|= (u64
)val
<< 32;
336 /* Next command can be retrieved from now */
337 ntb_spad_write(perf
->ntb
, PERF_SPAD_CMD(peer
->gidx
),
340 dev_dbg(&perf
->ntb
->dev
, "CMD recv: %d 0x%llx\n", *cmd
, *data
);
348 static int perf_msg_cmd_send(struct perf_peer
*peer
, enum perf_cmd cmd
,
351 struct perf_ctx
*perf
= peer
->perf
;
355 dev_dbg(&perf
->ntb
->dev
, "CMD send: %d 0x%llx\n", cmd
, data
);
358 * Perform predefined number of attempts before give up. Message
359 * registers are free of race-condition problem when accessed
360 * from different ports, so we don't need splitting registers
361 * by global device index. We also won't have local locking,
362 * since the method is used from service work only.
364 outbits
= ntb_msg_outbits(perf
->ntb
);
365 for (try = 0; try < MSG_TRIES
; try++) {
366 if (!perf_link_is_up(peer
))
369 ret
= ntb_msg_clear_sts(perf
->ntb
, outbits
);
373 ntb_peer_msg_write(perf
->ntb
, peer
->pidx
, PERF_MSG_LDATA
,
374 lower_32_bits(data
));
376 if (ntb_msg_read_sts(perf
->ntb
) & outbits
) {
377 usleep_range(MSG_UDELAY_LOW
, MSG_UDELAY_HIGH
);
381 ntb_peer_msg_write(perf
->ntb
, peer
->pidx
, PERF_MSG_HDATA
,
382 upper_32_bits(data
));
384 /* This call shall trigger peer message event */
385 ntb_peer_msg_write(perf
->ntb
, peer
->pidx
, PERF_MSG_CMD
, cmd
);
390 return try < MSG_TRIES
? 0 : -EAGAIN
;
393 static int perf_msg_cmd_recv(struct perf_ctx
*perf
, int *pidx
,
394 enum perf_cmd
*cmd
, u64
*data
)
399 inbits
= ntb_msg_inbits(perf
->ntb
);
401 if (hweight64(ntb_msg_read_sts(perf
->ntb
) & inbits
) < 3)
404 val
= ntb_msg_read(perf
->ntb
, pidx
, PERF_MSG_CMD
);
407 val
= ntb_msg_read(perf
->ntb
, pidx
, PERF_MSG_LDATA
);
410 val
= ntb_msg_read(perf
->ntb
, pidx
, PERF_MSG_HDATA
);
411 *data
|= (u64
)val
<< 32;
413 /* Next command can be retrieved from now */
414 ntb_msg_clear_sts(perf
->ntb
, inbits
);
416 dev_dbg(&perf
->ntb
->dev
, "CMD recv: %d 0x%llx\n", *cmd
, *data
);
421 static int perf_cmd_send(struct perf_peer
*peer
, enum perf_cmd cmd
, u64 data
)
423 struct perf_ctx
*perf
= peer
->perf
;
425 if (cmd
== PERF_CMD_SSIZE
|| cmd
== PERF_CMD_SXLAT
)
426 return perf
->cmd_send(peer
, cmd
, data
);
428 dev_err(&perf
->ntb
->dev
, "Send invalid command\n");
432 static int perf_cmd_exec(struct perf_peer
*peer
, enum perf_cmd cmd
)
442 dev_err(&peer
->perf
->ntb
->dev
, "Exec invalid command\n");
446 /* No need of memory barrier, since bit ops have invernal lock */
447 set_bit(cmd
, &peer
->sts
);
449 dev_dbg(&peer
->perf
->ntb
->dev
, "CMD exec: %d\n", cmd
);
451 (void)queue_work(system_highpri_wq
, &peer
->service
);
456 static int perf_cmd_recv(struct perf_ctx
*perf
)
458 struct perf_peer
*peer
;
462 while (!(ret
= perf
->cmd_recv(perf
, &pidx
, &cmd
, &data
))) {
463 peer
= &perf
->peers
[pidx
];
467 peer
->inbuf_size
= data
;
468 return perf_cmd_exec(peer
, PERF_CMD_RSIZE
);
470 peer
->outbuf_xlat
= data
;
471 return perf_cmd_exec(peer
, PERF_CMD_RXLAT
);
473 dev_err(&perf
->ntb
->dev
, "Recv invalid command\n");
478 /* Return 0 if no data left to process, otherwise an error */
479 return ret
== -ENODATA
? 0 : ret
;
482 static void perf_link_event(void *ctx
)
484 struct perf_ctx
*perf
= ctx
;
485 struct perf_peer
*peer
;
489 for (pidx
= 0; pidx
< perf
->pcnt
; pidx
++) {
490 peer
= &perf
->peers
[pidx
];
492 lnk_up
= perf_link_is_up(peer
);
495 !test_and_set_bit(PERF_STS_LNKUP
, &peer
->sts
)) {
496 perf_cmd_exec(peer
, PERF_CMD_SSIZE
);
497 } else if (!lnk_up
&&
498 test_and_clear_bit(PERF_STS_LNKUP
, &peer
->sts
)) {
499 perf_cmd_exec(peer
, PERF_CMD_CLEAR
);
504 static void perf_db_event(void *ctx
, int vec
)
506 struct perf_ctx
*perf
= ctx
;
508 dev_dbg(&perf
->ntb
->dev
, "DB vec %d mask %#llx bits %#llx\n", vec
,
509 ntb_db_vector_mask(perf
->ntb
, vec
), ntb_db_read(perf
->ntb
));
511 /* Just receive all available commands */
512 (void)perf_cmd_recv(perf
);
515 static void perf_msg_event(void *ctx
)
517 struct perf_ctx
*perf
= ctx
;
519 dev_dbg(&perf
->ntb
->dev
, "Msg status bits %#llx\n",
520 ntb_msg_read_sts(perf
->ntb
));
522 /* Messages are only sent one-by-one */
523 (void)perf_cmd_recv(perf
);
526 static const struct ntb_ctx_ops perf_ops
= {
527 .link_event
= perf_link_event
,
528 .db_event
= perf_db_event
,
529 .msg_event
= perf_msg_event
532 static void perf_free_outbuf(struct perf_peer
*peer
)
534 (void)ntb_peer_mw_clear_trans(peer
->perf
->ntb
, peer
->pidx
, peer
->gidx
);
537 static int perf_setup_outbuf(struct perf_peer
*peer
)
539 struct perf_ctx
*perf
= peer
->perf
;
542 /* Outbuf size can be unaligned due to custom max_mw_size */
543 ret
= ntb_peer_mw_set_trans(perf
->ntb
, peer
->pidx
, peer
->gidx
,
544 peer
->outbuf_xlat
, peer
->outbuf_size
);
546 dev_err(&perf
->ntb
->dev
, "Failed to set outbuf translation\n");
550 /* Initialization is finally done */
551 set_bit(PERF_STS_DONE
, &peer
->sts
);
552 complete_all(&peer
->init_comp
);
557 static void perf_free_inbuf(struct perf_peer
*peer
)
562 (void)ntb_mw_clear_trans(peer
->perf
->ntb
, peer
->pidx
, peer
->gidx
);
563 dma_free_coherent(&peer
->perf
->ntb
->pdev
->dev
, peer
->inbuf_size
,
564 peer
->inbuf
, peer
->inbuf_xlat
);
568 static int perf_setup_inbuf(struct perf_peer
*peer
)
570 resource_size_t xlat_align
, size_align
, size_max
;
571 struct perf_ctx
*perf
= peer
->perf
;
574 /* Get inbound MW parameters */
575 ret
= ntb_mw_get_align(perf
->ntb
, peer
->pidx
, perf
->gidx
,
576 &xlat_align
, &size_align
, &size_max
);
578 dev_err(&perf
->ntb
->dev
, "Couldn't get inbuf restrictions\n");
582 if (peer
->inbuf_size
> size_max
) {
583 dev_err(&perf
->ntb
->dev
, "Too big inbuf size %pa > %pa\n",
584 &peer
->inbuf_size
, &size_max
);
588 peer
->inbuf_size
= round_up(peer
->inbuf_size
, size_align
);
590 perf_free_inbuf(peer
);
592 peer
->inbuf
= dma_alloc_coherent(&perf
->ntb
->pdev
->dev
,
593 peer
->inbuf_size
, &peer
->inbuf_xlat
,
596 dev_err(&perf
->ntb
->dev
, "Failed to alloc inbuf of %pa\n",
600 if (!IS_ALIGNED(peer
->inbuf_xlat
, xlat_align
)) {
601 dev_err(&perf
->ntb
->dev
, "Unaligned inbuf allocated\n");
605 ret
= ntb_mw_set_trans(perf
->ntb
, peer
->pidx
, peer
->gidx
,
606 peer
->inbuf_xlat
, peer
->inbuf_size
);
608 dev_err(&perf
->ntb
->dev
, "Failed to set inbuf translation\n");
613 * We submit inbuf xlat transmission cmd for execution here to follow
614 * the code architecture, even though this method is called from service
615 * work itself so the command will be executed right after it returns.
617 (void)perf_cmd_exec(peer
, PERF_CMD_SXLAT
);
622 perf_free_inbuf(peer
);
627 static void perf_service_work(struct work_struct
*work
)
629 struct perf_peer
*peer
= to_peer_service(work
);
631 if (test_and_clear_bit(PERF_CMD_SSIZE
, &peer
->sts
))
632 perf_cmd_send(peer
, PERF_CMD_SSIZE
, peer
->outbuf_size
);
634 if (test_and_clear_bit(PERF_CMD_RSIZE
, &peer
->sts
))
635 perf_setup_inbuf(peer
);
637 if (test_and_clear_bit(PERF_CMD_SXLAT
, &peer
->sts
))
638 perf_cmd_send(peer
, PERF_CMD_SXLAT
, peer
->inbuf_xlat
);
640 if (test_and_clear_bit(PERF_CMD_RXLAT
, &peer
->sts
))
641 perf_setup_outbuf(peer
);
643 if (test_and_clear_bit(PERF_CMD_CLEAR
, &peer
->sts
)) {
644 init_completion(&peer
->init_comp
);
645 clear_bit(PERF_STS_DONE
, &peer
->sts
);
646 if (test_bit(0, &peer
->perf
->busy_flag
) &&
647 peer
== peer
->perf
->test_peer
) {
648 dev_warn(&peer
->perf
->ntb
->dev
,
649 "Freeing while test on-fly\n");
650 perf_terminate_test(peer
->perf
);
652 perf_free_outbuf(peer
);
653 perf_free_inbuf(peer
);
657 static int perf_init_service(struct perf_ctx
*perf
)
661 if (ntb_peer_mw_count(perf
->ntb
) < perf
->pcnt
) {
662 dev_err(&perf
->ntb
->dev
, "Not enough memory windows\n");
666 if (ntb_msg_count(perf
->ntb
) >= PERF_MSG_CNT
) {
667 perf
->cmd_send
= perf_msg_cmd_send
;
668 perf
->cmd_recv
= perf_msg_cmd_recv
;
670 dev_dbg(&perf
->ntb
->dev
, "Message service initialized\n");
675 dev_dbg(&perf
->ntb
->dev
, "Message service unsupported\n");
677 mask
= GENMASK_ULL(perf
->pcnt
, 0);
678 if (ntb_spad_count(perf
->ntb
) >= PERF_SPAD_CNT(perf
->pcnt
) &&
679 (ntb_db_valid_mask(perf
->ntb
) & mask
) == mask
) {
680 perf
->cmd_send
= perf_spad_cmd_send
;
681 perf
->cmd_recv
= perf_spad_cmd_recv
;
683 dev_dbg(&perf
->ntb
->dev
, "Scratchpad service initialized\n");
688 dev_dbg(&perf
->ntb
->dev
, "Scratchpad service unsupported\n");
690 dev_err(&perf
->ntb
->dev
, "Command services unsupported\n");
695 static int perf_enable_service(struct perf_ctx
*perf
)
700 mask
= ntb_db_valid_mask(perf
->ntb
);
701 (void)ntb_db_set_mask(perf
->ntb
, mask
);
703 ret
= ntb_set_ctx(perf
->ntb
, perf
, &perf_ops
);
707 if (perf
->cmd_send
== perf_msg_cmd_send
) {
710 inbits
= ntb_msg_inbits(perf
->ntb
);
711 outbits
= ntb_msg_outbits(perf
->ntb
);
712 (void)ntb_msg_set_mask(perf
->ntb
, inbits
| outbits
);
714 incmd_bit
= BIT_ULL(__ffs64(inbits
));
715 ret
= ntb_msg_clear_mask(perf
->ntb
, incmd_bit
);
717 dev_dbg(&perf
->ntb
->dev
, "MSG sts unmasked %#llx\n", incmd_bit
);
719 scnt
= ntb_spad_count(perf
->ntb
);
720 for (sidx
= 0; sidx
< scnt
; sidx
++)
721 ntb_spad_write(perf
->ntb
, sidx
, PERF_CMD_INVAL
);
722 incmd_bit
= PERF_SPAD_NOTIFY(perf
->gidx
);
723 ret
= ntb_db_clear_mask(perf
->ntb
, incmd_bit
);
725 dev_dbg(&perf
->ntb
->dev
, "DB bits unmasked %#llx\n", incmd_bit
);
728 ntb_clear_ctx(perf
->ntb
);
732 ntb_link_enable(perf
->ntb
, NTB_SPEED_AUTO
, NTB_WIDTH_AUTO
);
733 /* Might be not necessary */
734 ntb_link_event(perf
->ntb
);
739 static void perf_disable_service(struct perf_ctx
*perf
)
743 if (perf
->cmd_send
== perf_msg_cmd_send
) {
746 inbits
= ntb_msg_inbits(perf
->ntb
);
747 (void)ntb_msg_set_mask(perf
->ntb
, inbits
);
749 (void)ntb_db_set_mask(perf
->ntb
, PERF_SPAD_NOTIFY(perf
->gidx
));
752 ntb_clear_ctx(perf
->ntb
);
754 for (pidx
= 0; pidx
< perf
->pcnt
; pidx
++)
755 perf_cmd_exec(&perf
->peers
[pidx
], PERF_CMD_CLEAR
);
757 for (pidx
= 0; pidx
< perf
->pcnt
; pidx
++)
758 flush_work(&perf
->peers
[pidx
].service
);
760 for (pidx
= 0; pidx
< perf
->pcnt
; pidx
++) {
761 struct perf_peer
*peer
= &perf
->peers
[pidx
];
763 ntb_spad_write(perf
->ntb
, PERF_SPAD_CMD(peer
->gidx
), 0);
766 ntb_db_clear(perf
->ntb
, PERF_SPAD_NOTIFY(perf
->gidx
));
768 ntb_link_disable(perf
->ntb
);
771 /*==============================================================================
772 * Performance measuring work-thread
773 *==============================================================================
776 static void perf_dma_copy_callback(void *data
)
778 struct perf_thread
*pthr
= data
;
780 atomic_dec(&pthr
->dma_sync
);
781 wake_up(&pthr
->dma_wait
);
784 static int perf_copy_chunk(struct perf_thread
*pthr
,
785 void __iomem
*dst
, void *src
, size_t len
)
787 struct dma_async_tx_descriptor
*tx
;
788 struct dmaengine_unmap_data
*unmap
;
789 struct device
*dma_dev
;
790 int try = 0, ret
= 0;
791 struct perf_peer
*peer
= pthr
->perf
->test_peer
;
793 void __iomem
*dst_vaddr
;
794 dma_addr_t dst_dma_addr
;
797 memcpy_toio(dst
, src
, len
);
798 goto ret_check_tsync
;
801 dma_dev
= pthr
->dma_chan
->device
->dev
;
803 if (!is_dma_copy_aligned(pthr
->dma_chan
->device
, offset_in_page(src
),
804 offset_in_page(dst
), len
))
807 vbase
= peer
->outbuf
;
809 dst_dma_addr
= peer
->dma_dst_addr
+ (dst_vaddr
- vbase
);
811 unmap
= dmaengine_get_unmap_data(dma_dev
, 1, GFP_NOWAIT
);
816 unmap
->addr
[0] = dma_map_page(dma_dev
, virt_to_page(src
),
817 offset_in_page(src
), len
, DMA_TO_DEVICE
);
818 if (dma_mapping_error(dma_dev
, unmap
->addr
[0])) {
820 goto err_free_resource
;
825 tx
= dmaengine_prep_dma_memcpy(pthr
->dma_chan
, dst_dma_addr
,
826 unmap
->addr
[0], len
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
829 } while (!tx
&& (try++ < DMA_TRIES
));
833 goto err_free_resource
;
836 tx
->callback
= perf_dma_copy_callback
;
837 tx
->callback_param
= pthr
;
838 dma_set_unmap(tx
, unmap
);
840 ret
= dma_submit_error(dmaengine_submit(tx
));
842 dmaengine_unmap_put(unmap
);
843 goto err_free_resource
;
846 dmaengine_unmap_put(unmap
);
848 atomic_inc(&pthr
->dma_sync
);
849 dma_async_issue_pending(pthr
->dma_chan
);
852 return likely(atomic_read(&pthr
->perf
->tsync
) > 0) ? 0 : -EINTR
;
855 dmaengine_unmap_put(unmap
);
860 static bool perf_dma_filter(struct dma_chan
*chan
, void *data
)
862 struct perf_ctx
*perf
= data
;
865 node
= dev_to_node(&perf
->ntb
->dev
);
867 return node
== NUMA_NO_NODE
|| node
== dev_to_node(chan
->device
->dev
);
870 static int perf_init_test(struct perf_thread
*pthr
)
872 struct perf_ctx
*perf
= pthr
->perf
;
873 dma_cap_mask_t dma_mask
;
874 struct perf_peer
*peer
= pthr
->perf
->test_peer
;
876 pthr
->src
= kmalloc_node(perf
->test_peer
->outbuf_size
, GFP_KERNEL
,
877 dev_to_node(&perf
->ntb
->dev
));
881 get_random_bytes(pthr
->src
, perf
->test_peer
->outbuf_size
);
886 dma_cap_zero(dma_mask
);
887 dma_cap_set(DMA_MEMCPY
, dma_mask
);
888 pthr
->dma_chan
= dma_request_channel(dma_mask
, perf_dma_filter
, perf
);
889 if (!pthr
->dma_chan
) {
890 dev_err(&perf
->ntb
->dev
, "%d: Failed to get DMA channel\n",
895 dma_map_resource(pthr
->dma_chan
->device
->dev
,
896 peer
->out_phys_addr
, peer
->outbuf_size
,
898 if (dma_mapping_error(pthr
->dma_chan
->device
->dev
,
899 peer
->dma_dst_addr
)) {
900 dev_err(pthr
->dma_chan
->device
->dev
, "%d: Failed to map DMA addr\n",
902 peer
->dma_dst_addr
= 0;
903 dma_release_channel(pthr
->dma_chan
);
906 dev_dbg(pthr
->dma_chan
->device
->dev
, "%d: Map MMIO %pa to DMA addr %pad\n",
908 &peer
->out_phys_addr
,
909 &peer
->dma_dst_addr
);
911 atomic_set(&pthr
->dma_sync
, 0);
915 atomic_dec(&perf
->tsync
);
916 wake_up(&perf
->twait
);
921 static int perf_run_test(struct perf_thread
*pthr
)
923 struct perf_peer
*peer
= pthr
->perf
->test_peer
;
924 struct perf_ctx
*perf
= pthr
->perf
;
925 void __iomem
*flt_dst
, *bnd_dst
;
926 u64 total_size
, chunk_size
;
930 total_size
= 1ULL << total_order
;
931 chunk_size
= 1ULL << chunk_order
;
932 chunk_size
= min_t(u64
, peer
->outbuf_size
, chunk_size
);
935 bnd_dst
= peer
->outbuf
+ peer
->outbuf_size
;
936 flt_dst
= peer
->outbuf
;
938 pthr
->duration
= ktime_get();
940 /* Copied field is cleared on test launch stage */
941 while (pthr
->copied
< total_size
) {
942 ret
= perf_copy_chunk(pthr
, flt_dst
, flt_src
, chunk_size
);
944 dev_err(&perf
->ntb
->dev
, "%d: Got error %d on test\n",
949 pthr
->copied
+= chunk_size
;
951 flt_dst
+= chunk_size
;
952 flt_src
+= chunk_size
;
953 if (flt_dst
>= bnd_dst
|| flt_dst
< peer
->outbuf
) {
954 flt_dst
= peer
->outbuf
;
958 /* Give up CPU to give a chance for other threads to use it */
965 static int perf_sync_test(struct perf_thread
*pthr
)
967 struct perf_ctx
*perf
= pthr
->perf
;
972 wait_event(pthr
->dma_wait
,
973 (atomic_read(&pthr
->dma_sync
) == 0 ||
974 atomic_read(&perf
->tsync
) < 0));
976 if (atomic_read(&perf
->tsync
) < 0)
980 pthr
->duration
= ktime_sub(ktime_get(), pthr
->duration
);
982 dev_dbg(&perf
->ntb
->dev
, "%d: copied %llu bytes\n",
983 pthr
->tidx
, pthr
->copied
);
985 dev_dbg(&perf
->ntb
->dev
, "%d: lasted %llu usecs\n",
986 pthr
->tidx
, ktime_to_us(pthr
->duration
));
988 dev_dbg(&perf
->ntb
->dev
, "%d: %llu MBytes/s\n", pthr
->tidx
,
989 div64_u64(pthr
->copied
, ktime_to_us(pthr
->duration
)));
994 static void perf_clear_test(struct perf_thread
*pthr
)
996 struct perf_ctx
*perf
= pthr
->perf
;
1002 * If test finished without errors, termination isn't needed.
1003 * We call it anyway just to be sure of the transfers completion.
1005 (void)dmaengine_terminate_sync(pthr
->dma_chan
);
1006 if (pthr
->perf
->test_peer
->dma_dst_addr
)
1007 dma_unmap_resource(pthr
->dma_chan
->device
->dev
,
1008 pthr
->perf
->test_peer
->dma_dst_addr
,
1009 pthr
->perf
->test_peer
->outbuf_size
,
1010 DMA_FROM_DEVICE
, 0);
1012 dma_release_channel(pthr
->dma_chan
);
1015 atomic_dec(&perf
->tsync
);
1016 wake_up(&perf
->twait
);
1020 static void perf_thread_work(struct work_struct
*work
)
1022 struct perf_thread
*pthr
= to_thread_work(work
);
1026 * Perform stages in compliance with use_dma flag value.
1027 * Test status is changed only if error happened, otherwise
1028 * status -ENODATA is kept while test is on-fly. Results
1029 * synchronization is performed only if test fininshed
1030 * without an error or interruption.
1032 ret
= perf_init_test(pthr
);
1038 ret
= perf_run_test(pthr
);
1041 goto err_clear_test
;
1044 pthr
->status
= perf_sync_test(pthr
);
1047 perf_clear_test(pthr
);
1050 static int perf_set_tcnt(struct perf_ctx
*perf
, u8 tcnt
)
1052 if (tcnt
== 0 || tcnt
> MAX_THREADS_CNT
)
1055 if (test_and_set_bit_lock(0, &perf
->busy_flag
))
1060 clear_bit_unlock(0, &perf
->busy_flag
);
1065 static void perf_terminate_test(struct perf_ctx
*perf
)
1069 atomic_set(&perf
->tsync
, -1);
1070 wake_up(&perf
->twait
);
1072 for (tidx
= 0; tidx
< MAX_THREADS_CNT
; tidx
++) {
1073 wake_up(&perf
->threads
[tidx
].dma_wait
);
1074 cancel_work_sync(&perf
->threads
[tidx
].work
);
1078 static int perf_submit_test(struct perf_peer
*peer
)
1080 struct perf_ctx
*perf
= peer
->perf
;
1081 struct perf_thread
*pthr
;
1084 ret
= wait_for_completion_interruptible(&peer
->init_comp
);
1088 if (test_and_set_bit_lock(0, &perf
->busy_flag
))
1091 perf
->test_peer
= peer
;
1092 atomic_set(&perf
->tsync
, perf
->tcnt
);
1094 for (tidx
= 0; tidx
< MAX_THREADS_CNT
; tidx
++) {
1095 pthr
= &perf
->threads
[tidx
];
1097 pthr
->status
= -ENODATA
;
1099 pthr
->duration
= ktime_set(0, 0);
1100 if (tidx
< perf
->tcnt
)
1101 (void)queue_work(perf_wq
, &pthr
->work
);
1104 ret
= wait_event_interruptible(perf
->twait
,
1105 atomic_read(&perf
->tsync
) <= 0);
1106 if (ret
== -ERESTARTSYS
) {
1107 perf_terminate_test(perf
);
1111 clear_bit_unlock(0, &perf
->busy_flag
);
1116 static int perf_read_stats(struct perf_ctx
*perf
, char *buf
,
1117 size_t size
, ssize_t
*pos
)
1119 struct perf_thread
*pthr
;
1122 if (test_and_set_bit_lock(0, &perf
->busy_flag
))
1125 (*pos
) += scnprintf(buf
+ *pos
, size
- *pos
,
1126 " Peer %d test statistics:\n", perf
->test_peer
->pidx
);
1128 for (tidx
= 0; tidx
< MAX_THREADS_CNT
; tidx
++) {
1129 pthr
= &perf
->threads
[tidx
];
1131 if (pthr
->status
== -ENODATA
)
1135 (*pos
) += scnprintf(buf
+ *pos
, size
- *pos
,
1136 "%d: error status %d\n", tidx
, pthr
->status
);
1140 (*pos
) += scnprintf(buf
+ *pos
, size
- *pos
,
1141 "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
1142 tidx
, pthr
->copied
, ktime_to_us(pthr
->duration
),
1143 div64_u64(pthr
->copied
, ktime_to_us(pthr
->duration
)));
1146 clear_bit_unlock(0, &perf
->busy_flag
);
1151 static void perf_init_threads(struct perf_ctx
*perf
)
1153 struct perf_thread
*pthr
;
1156 perf
->tcnt
= DEF_THREADS_CNT
;
1157 perf
->test_peer
= &perf
->peers
[0];
1158 init_waitqueue_head(&perf
->twait
);
1160 for (tidx
= 0; tidx
< MAX_THREADS_CNT
; tidx
++) {
1161 pthr
= &perf
->threads
[tidx
];
1165 pthr
->status
= -ENODATA
;
1166 init_waitqueue_head(&pthr
->dma_wait
);
1167 INIT_WORK(&pthr
->work
, perf_thread_work
);
1171 static void perf_clear_threads(struct perf_ctx
*perf
)
1173 perf_terminate_test(perf
);
1176 /*==============================================================================
1178 *==============================================================================
1181 static ssize_t
perf_dbgfs_read_info(struct file
*filep
, char __user
*ubuf
,
1182 size_t size
, loff_t
*offp
)
1184 struct perf_ctx
*perf
= filep
->private_data
;
1185 struct perf_peer
*peer
;
1191 buf_size
= min_t(size_t, size
, 0x1000U
);
1193 buf
= kmalloc(buf_size
, GFP_KERNEL
);
1197 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1198 " Performance measuring tool info:\n\n");
1200 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1201 "Local port %d, Global index %d\n", ntb_port_number(perf
->ntb
),
1203 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
, "Test status: ");
1204 if (test_bit(0, &perf
->busy_flag
)) {
1205 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1206 "on-fly with port %d (%d)\n",
1207 ntb_peer_port_number(perf
->ntb
, perf
->test_peer
->pidx
),
1208 perf
->test_peer
->pidx
);
1210 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
, "idle\n");
1213 for (pidx
= 0; pidx
< perf
->pcnt
; pidx
++) {
1214 peer
= &perf
->peers
[pidx
];
1216 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1217 "Port %d (%d), Global index %d:\n",
1218 ntb_peer_port_number(perf
->ntb
, peer
->pidx
), peer
->pidx
,
1221 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1222 "\tLink status: %s\n",
1223 test_bit(PERF_STS_LNKUP
, &peer
->sts
) ? "up" : "down");
1225 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1226 "\tOut buffer addr 0x%pK\n", peer
->outbuf
);
1228 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1229 "\tOut buff phys addr %pa[p]\n", &peer
->out_phys_addr
);
1231 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1232 "\tOut buffer size %pa\n", &peer
->outbuf_size
);
1234 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1235 "\tOut buffer xlat 0x%016llx[p]\n", peer
->outbuf_xlat
);
1238 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1239 "\tIn buffer addr: unallocated\n");
1243 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1244 "\tIn buffer addr 0x%pK\n", peer
->inbuf
);
1246 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1247 "\tIn buffer size %pa\n", &peer
->inbuf_size
);
1249 pos
+= scnprintf(buf
+ pos
, buf_size
- pos
,
1250 "\tIn buffer xlat %pad[p]\n", &peer
->inbuf_xlat
);
1253 ret
= simple_read_from_buffer(ubuf
, size
, offp
, buf
, pos
);
1259 static const struct file_operations perf_dbgfs_info
= {
1260 .open
= simple_open
,
1261 .read
= perf_dbgfs_read_info
1264 static ssize_t
perf_dbgfs_read_run(struct file
*filep
, char __user
*ubuf
,
1265 size_t size
, loff_t
*offp
)
1267 struct perf_ctx
*perf
= filep
->private_data
;
1268 ssize_t ret
, pos
= 0;
1271 buf
= kmalloc(PERF_BUF_LEN
, GFP_KERNEL
);
1275 ret
= perf_read_stats(perf
, buf
, PERF_BUF_LEN
, &pos
);
1279 ret
= simple_read_from_buffer(ubuf
, size
, offp
, buf
, pos
);
1286 static ssize_t
perf_dbgfs_write_run(struct file
*filep
, const char __user
*ubuf
,
1287 size_t size
, loff_t
*offp
)
1289 struct perf_ctx
*perf
= filep
->private_data
;
1290 struct perf_peer
*peer
;
1293 ret
= kstrtoint_from_user(ubuf
, size
, 0, &pidx
);
1297 if (pidx
< 0 || pidx
>= perf
->pcnt
)
1300 peer
= &perf
->peers
[pidx
];
1302 ret
= perf_submit_test(peer
);
1309 static const struct file_operations perf_dbgfs_run
= {
1310 .open
= simple_open
,
1311 .read
= perf_dbgfs_read_run
,
1312 .write
= perf_dbgfs_write_run
1315 static ssize_t
perf_dbgfs_read_tcnt(struct file
*filep
, char __user
*ubuf
,
1316 size_t size
, loff_t
*offp
)
1318 struct perf_ctx
*perf
= filep
->private_data
;
1322 pos
= scnprintf(buf
, sizeof(buf
), "%hhu\n", perf
->tcnt
);
1324 return simple_read_from_buffer(ubuf
, size
, offp
, buf
, pos
);
1327 static ssize_t
perf_dbgfs_write_tcnt(struct file
*filep
,
1328 const char __user
*ubuf
,
1329 size_t size
, loff_t
*offp
)
1331 struct perf_ctx
*perf
= filep
->private_data
;
1335 ret
= kstrtou8_from_user(ubuf
, size
, 0, &val
);
1339 ret
= perf_set_tcnt(perf
, val
);
1346 static const struct file_operations perf_dbgfs_tcnt
= {
1347 .open
= simple_open
,
1348 .read
= perf_dbgfs_read_tcnt
,
1349 .write
= perf_dbgfs_write_tcnt
1352 static void perf_setup_dbgfs(struct perf_ctx
*perf
)
1354 struct pci_dev
*pdev
= perf
->ntb
->pdev
;
1356 perf
->dbgfs_dir
= debugfs_create_dir(pci_name(pdev
), perf_dbgfs_topdir
);
1357 if (!perf
->dbgfs_dir
) {
1358 dev_warn(&perf
->ntb
->dev
, "DebugFS unsupported\n");
1362 debugfs_create_file("info", 0600, perf
->dbgfs_dir
, perf
,
1365 debugfs_create_file("run", 0600, perf
->dbgfs_dir
, perf
,
1368 debugfs_create_file("threads_count", 0600, perf
->dbgfs_dir
, perf
,
1371 /* They are made read-only for test exec safety and integrity */
1372 debugfs_create_u8("chunk_order", 0500, perf
->dbgfs_dir
, &chunk_order
);
1374 debugfs_create_u8("total_order", 0500, perf
->dbgfs_dir
, &total_order
);
1376 debugfs_create_bool("use_dma", 0500, perf
->dbgfs_dir
, &use_dma
);
1379 static void perf_clear_dbgfs(struct perf_ctx
*perf
)
1381 debugfs_remove_recursive(perf
->dbgfs_dir
);
1384 /*==============================================================================
1385 * Basic driver initialization
1386 *==============================================================================
1389 static struct perf_ctx
*perf_create_data(struct ntb_dev
*ntb
)
1391 struct perf_ctx
*perf
;
1393 perf
= devm_kzalloc(&ntb
->dev
, sizeof(*perf
), GFP_KERNEL
);
1395 return ERR_PTR(-ENOMEM
);
1397 perf
->pcnt
= ntb_peer_port_count(ntb
);
1398 perf
->peers
= devm_kcalloc(&ntb
->dev
, perf
->pcnt
, sizeof(*perf
->peers
),
1401 return ERR_PTR(-ENOMEM
);
1408 static int perf_setup_peer_mw(struct perf_peer
*peer
)
1410 struct perf_ctx
*perf
= peer
->perf
;
1411 phys_addr_t phys_addr
;
1414 /* Get outbound MW parameters and map it */
1415 ret
= ntb_peer_mw_get_addr(perf
->ntb
, perf
->gidx
, &phys_addr
,
1416 &peer
->outbuf_size
);
1420 peer
->outbuf
= devm_ioremap_wc(&perf
->ntb
->dev
, phys_addr
,
1425 peer
->out_phys_addr
= phys_addr
;
1427 if (max_mw_size
&& peer
->outbuf_size
> max_mw_size
) {
1428 peer
->outbuf_size
= max_mw_size
;
1429 dev_warn(&peer
->perf
->ntb
->dev
,
1430 "Peer %d outbuf reduced to %pa\n", peer
->pidx
,
1431 &peer
->outbuf_size
);
1437 static int perf_init_peers(struct perf_ctx
*perf
)
1439 struct perf_peer
*peer
;
1440 int pidx
, lport
, ret
;
1442 lport
= ntb_port_number(perf
->ntb
);
1444 for (pidx
= 0; pidx
< perf
->pcnt
; pidx
++) {
1445 peer
= &perf
->peers
[pidx
];
1449 if (lport
< ntb_peer_port_number(perf
->ntb
, pidx
)) {
1450 if (perf
->gidx
== -1)
1452 peer
->gidx
= pidx
+ 1;
1456 INIT_WORK(&peer
->service
, perf_service_work
);
1457 init_completion(&peer
->init_comp
);
1459 if (perf
->gidx
== -1)
1463 * Hardware with only two ports may not have unique port
1464 * numbers. In this case, the gidxs should all be zero.
1466 if (perf
->pcnt
== 1 && ntb_port_number(perf
->ntb
) == 0 &&
1467 ntb_peer_port_number(perf
->ntb
, 0) == 0) {
1469 perf
->peers
[0].gidx
= 0;
1472 for (pidx
= 0; pidx
< perf
->pcnt
; pidx
++) {
1473 ret
= perf_setup_peer_mw(&perf
->peers
[pidx
]);
1478 dev_dbg(&perf
->ntb
->dev
, "Global port index %d\n", perf
->gidx
);
1483 static int perf_probe(struct ntb_client
*client
, struct ntb_dev
*ntb
)
1485 struct perf_ctx
*perf
;
1488 perf
= perf_create_data(ntb
);
1490 return PTR_ERR(perf
);
1492 ret
= perf_init_peers(perf
);
1496 perf_init_threads(perf
);
1498 ret
= perf_init_service(perf
);
1502 ret
= perf_enable_service(perf
);
1506 perf_setup_dbgfs(perf
);
1511 static void perf_remove(struct ntb_client
*client
, struct ntb_dev
*ntb
)
1513 struct perf_ctx
*perf
= ntb
->ctx
;
1515 perf_clear_dbgfs(perf
);
1517 perf_disable_service(perf
);
1519 perf_clear_threads(perf
);
1522 static struct ntb_client perf_client
= {
1524 .probe
= perf_probe
,
1525 .remove
= perf_remove
1529 static int __init
perf_init(void)
1533 if (chunk_order
> MAX_CHUNK_ORDER
) {
1534 chunk_order
= MAX_CHUNK_ORDER
;
1535 pr_info("Chunk order reduced to %hhu\n", chunk_order
);
1538 if (total_order
< chunk_order
) {
1539 total_order
= chunk_order
;
1540 pr_info("Total data order reduced to %hhu\n", total_order
);
1543 perf_wq
= alloc_workqueue("perf_wq", WQ_UNBOUND
| WQ_SYSFS
, 0);
1547 if (debugfs_initialized())
1548 perf_dbgfs_topdir
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
1550 ret
= ntb_register_client(&perf_client
);
1552 debugfs_remove_recursive(perf_dbgfs_topdir
);
1553 destroy_workqueue(perf_wq
);
1558 module_init(perf_init
);
1560 static void __exit
perf_exit(void)
1562 ntb_unregister_client(&perf_client
);
1563 debugfs_remove_recursive(perf_dbgfs_topdir
);
1564 destroy_workqueue(perf_wq
);
1566 module_exit(perf_exit
);