1 // SPDX-License-Identifier: GPL-2.0
3 * Test driver to test endpoint functionality
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/pci_ids.h>
16 #include <linux/random.h>
18 #include <linux/pci-epc.h>
19 #include <linux/pci-epf.h>
20 #include <linux/pci_regs.h>
22 #define IRQ_TYPE_LEGACY 0
23 #define IRQ_TYPE_MSI 1
24 #define IRQ_TYPE_MSIX 2
26 #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
27 #define COMMAND_RAISE_MSI_IRQ BIT(1)
28 #define COMMAND_RAISE_MSIX_IRQ BIT(2)
29 #define COMMAND_READ BIT(3)
30 #define COMMAND_WRITE BIT(4)
31 #define COMMAND_COPY BIT(5)
33 #define STATUS_READ_SUCCESS BIT(0)
34 #define STATUS_READ_FAIL BIT(1)
35 #define STATUS_WRITE_SUCCESS BIT(2)
36 #define STATUS_WRITE_FAIL BIT(3)
37 #define STATUS_COPY_SUCCESS BIT(4)
38 #define STATUS_COPY_FAIL BIT(5)
39 #define STATUS_IRQ_RAISED BIT(6)
40 #define STATUS_SRC_ADDR_INVALID BIT(7)
41 #define STATUS_DST_ADDR_INVALID BIT(8)
43 #define FLAG_USE_DMA BIT(0)
45 #define TIMER_RESOLUTION 1
47 static struct workqueue_struct
*kpcitest_workqueue
;
50 void *reg
[PCI_STD_NUM_BARS
];
52 enum pci_barno test_reg_bar
;
53 size_t msix_table_offset
;
54 struct delayed_work cmd_handler
;
55 struct dma_chan
*dma_chan
;
56 struct completion transfer_complete
;
58 const struct pci_epc_features
*epc_features
;
61 struct pci_epf_test_reg
{
74 static struct pci_epf_header test_header
= {
75 .vendorid
= PCI_ANY_ID
,
76 .deviceid
= PCI_ANY_ID
,
77 .baseclass_code
= PCI_CLASS_OTHERS
,
78 .interrupt_pin
= PCI_INTERRUPT_INTA
,
81 static size_t bar_size
[] = { 512, 512, 1024, 16384, 131072, 1048576 };
83 static void pci_epf_test_dma_callback(void *param
)
85 struct pci_epf_test
*epf_test
= param
;
87 complete(&epf_test
->transfer_complete
);
91 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
92 * data between PCIe EP and remote PCIe RC
93 * @epf_test: the EPF test device that performs the data transfer operation
94 * @dma_dst: The destination address of the data transfer. It can be a physical
95 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
96 * @dma_src: The source address of the data transfer. It can be a physical
97 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
98 * @len: The size of the data transfer
100 * Function that uses dmaengine API to transfer data between PCIe EP and remote
101 * PCIe RC. The source and destination address can be a physical address given
102 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
104 * The function returns '0' on success and negative value on failure.
106 static int pci_epf_test_data_transfer(struct pci_epf_test
*epf_test
,
107 dma_addr_t dma_dst
, dma_addr_t dma_src
,
110 enum dma_ctrl_flags flags
= DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
;
111 struct dma_chan
*chan
= epf_test
->dma_chan
;
112 struct pci_epf
*epf
= epf_test
->epf
;
113 struct dma_async_tx_descriptor
*tx
;
114 struct device
*dev
= &epf
->dev
;
118 if (IS_ERR_OR_NULL(chan
)) {
119 dev_err(dev
, "Invalid DMA memcpy channel\n");
123 tx
= dmaengine_prep_dma_memcpy(chan
, dma_dst
, dma_src
, len
, flags
);
125 dev_err(dev
, "Failed to prepare DMA memcpy\n");
129 tx
->callback
= pci_epf_test_dma_callback
;
130 tx
->callback_param
= epf_test
;
131 cookie
= tx
->tx_submit(tx
);
132 reinit_completion(&epf_test
->transfer_complete
);
134 ret
= dma_submit_error(cookie
);
136 dev_err(dev
, "Failed to do DMA tx_submit %d\n", cookie
);
140 dma_async_issue_pending(chan
);
141 ret
= wait_for_completion_interruptible(&epf_test
->transfer_complete
);
143 dmaengine_terminate_sync(chan
);
144 dev_err(dev
, "DMA wait_for_completion_timeout\n");
152 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
153 * @epf_test: the EPF test device that performs data transfer operation
155 * Function to initialize EPF test DMA channel.
157 static int pci_epf_test_init_dma_chan(struct pci_epf_test
*epf_test
)
159 struct pci_epf
*epf
= epf_test
->epf
;
160 struct device
*dev
= &epf
->dev
;
161 struct dma_chan
*dma_chan
;
166 dma_cap_set(DMA_MEMCPY
, mask
);
168 dma_chan
= dma_request_chan_by_mask(&mask
);
169 if (IS_ERR(dma_chan
)) {
170 ret
= PTR_ERR(dma_chan
);
171 if (ret
!= -EPROBE_DEFER
)
172 dev_err(dev
, "Failed to get DMA channel\n");
175 init_completion(&epf_test
->transfer_complete
);
177 epf_test
->dma_chan
= dma_chan
;
183 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
184 * @epf: the EPF test device that performs data transfer operation
186 * Helper to cleanup EPF test DMA channel.
188 static void pci_epf_test_clean_dma_chan(struct pci_epf_test
*epf_test
)
190 dma_release_channel(epf_test
->dma_chan
);
191 epf_test
->dma_chan
= NULL
;
194 static void pci_epf_test_print_rate(const char *ops
, u64 size
,
195 struct timespec64
*start
,
196 struct timespec64
*end
, bool dma
)
198 struct timespec64 ts
;
201 ts
= timespec64_sub(*end
, *start
);
203 /* convert both size (stored in 'rate') and time in terms of 'ns' */
204 ns
= timespec64_to_ns(&ts
);
205 rate
= size
* NSEC_PER_SEC
;
207 /* Divide both size (stored in 'rate') and ns by a common factor */
208 while (ns
> UINT_MAX
) {
216 /* calculate the rate */
217 do_div(rate
, (uint32_t)ns
);
219 pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
220 "Rate: %llu KB/s\n", ops
, size
, dma
? "YES" : "NO",
221 (u64
)ts
.tv_sec
, (u32
)ts
.tv_nsec
, rate
/ 1024);
224 static int pci_epf_test_copy(struct pci_epf_test
*epf_test
)
228 void __iomem
*src_addr
;
229 void __iomem
*dst_addr
;
230 phys_addr_t src_phys_addr
;
231 phys_addr_t dst_phys_addr
;
232 struct timespec64 start
, end
;
233 struct pci_epf
*epf
= epf_test
->epf
;
234 struct device
*dev
= &epf
->dev
;
235 struct pci_epc
*epc
= epf
->epc
;
236 enum pci_barno test_reg_bar
= epf_test
->test_reg_bar
;
237 struct pci_epf_test_reg
*reg
= epf_test
->reg
[test_reg_bar
];
239 src_addr
= pci_epc_mem_alloc_addr(epc
, &src_phys_addr
, reg
->size
);
241 dev_err(dev
, "Failed to allocate source address\n");
242 reg
->status
= STATUS_SRC_ADDR_INVALID
;
247 ret
= pci_epc_map_addr(epc
, epf
->func_no
, src_phys_addr
, reg
->src_addr
,
250 dev_err(dev
, "Failed to map source address\n");
251 reg
->status
= STATUS_SRC_ADDR_INVALID
;
255 dst_addr
= pci_epc_mem_alloc_addr(epc
, &dst_phys_addr
, reg
->size
);
257 dev_err(dev
, "Failed to allocate destination address\n");
258 reg
->status
= STATUS_DST_ADDR_INVALID
;
260 goto err_src_map_addr
;
263 ret
= pci_epc_map_addr(epc
, epf
->func_no
, dst_phys_addr
, reg
->dst_addr
,
266 dev_err(dev
, "Failed to map destination address\n");
267 reg
->status
= STATUS_DST_ADDR_INVALID
;
271 ktime_get_ts64(&start
);
272 use_dma
= !!(reg
->flags
& FLAG_USE_DMA
);
274 if (!epf_test
->dma_supported
) {
275 dev_err(dev
, "Cannot transfer data using DMA\n");
280 ret
= pci_epf_test_data_transfer(epf_test
, dst_phys_addr
,
281 src_phys_addr
, reg
->size
);
283 dev_err(dev
, "Data transfer failed\n");
285 memcpy(dst_addr
, src_addr
, reg
->size
);
287 ktime_get_ts64(&end
);
288 pci_epf_test_print_rate("COPY", reg
->size
, &start
, &end
, use_dma
);
291 pci_epc_unmap_addr(epc
, epf
->func_no
, dst_phys_addr
);
294 pci_epc_mem_free_addr(epc
, dst_phys_addr
, dst_addr
, reg
->size
);
297 pci_epc_unmap_addr(epc
, epf
->func_no
, src_phys_addr
);
300 pci_epc_mem_free_addr(epc
, src_phys_addr
, src_addr
, reg
->size
);
306 static int pci_epf_test_read(struct pci_epf_test
*epf_test
)
309 void __iomem
*src_addr
;
313 phys_addr_t phys_addr
;
314 phys_addr_t dst_phys_addr
;
315 struct timespec64 start
, end
;
316 struct pci_epf
*epf
= epf_test
->epf
;
317 struct device
*dev
= &epf
->dev
;
318 struct pci_epc
*epc
= epf
->epc
;
319 struct device
*dma_dev
= epf
->epc
->dev
.parent
;
320 enum pci_barno test_reg_bar
= epf_test
->test_reg_bar
;
321 struct pci_epf_test_reg
*reg
= epf_test
->reg
[test_reg_bar
];
323 src_addr
= pci_epc_mem_alloc_addr(epc
, &phys_addr
, reg
->size
);
325 dev_err(dev
, "Failed to allocate address\n");
326 reg
->status
= STATUS_SRC_ADDR_INVALID
;
331 ret
= pci_epc_map_addr(epc
, epf
->func_no
, phys_addr
, reg
->src_addr
,
334 dev_err(dev
, "Failed to map address\n");
335 reg
->status
= STATUS_SRC_ADDR_INVALID
;
339 buf
= kzalloc(reg
->size
, GFP_KERNEL
);
345 use_dma
= !!(reg
->flags
& FLAG_USE_DMA
);
347 if (!epf_test
->dma_supported
) {
348 dev_err(dev
, "Cannot transfer data using DMA\n");
353 dst_phys_addr
= dma_map_single(dma_dev
, buf
, reg
->size
,
355 if (dma_mapping_error(dma_dev
, dst_phys_addr
)) {
356 dev_err(dev
, "Failed to map destination buffer addr\n");
361 ktime_get_ts64(&start
);
362 ret
= pci_epf_test_data_transfer(epf_test
, dst_phys_addr
,
363 phys_addr
, reg
->size
);
365 dev_err(dev
, "Data transfer failed\n");
366 ktime_get_ts64(&end
);
368 dma_unmap_single(dma_dev
, dst_phys_addr
, reg
->size
,
371 ktime_get_ts64(&start
);
372 memcpy_fromio(buf
, src_addr
, reg
->size
);
373 ktime_get_ts64(&end
);
376 pci_epf_test_print_rate("READ", reg
->size
, &start
, &end
, use_dma
);
378 crc32
= crc32_le(~0, buf
, reg
->size
);
379 if (crc32
!= reg
->checksum
)
386 pci_epc_unmap_addr(epc
, epf
->func_no
, phys_addr
);
389 pci_epc_mem_free_addr(epc
, phys_addr
, src_addr
, reg
->size
);
395 static int pci_epf_test_write(struct pci_epf_test
*epf_test
)
398 void __iomem
*dst_addr
;
401 phys_addr_t phys_addr
;
402 phys_addr_t src_phys_addr
;
403 struct timespec64 start
, end
;
404 struct pci_epf
*epf
= epf_test
->epf
;
405 struct device
*dev
= &epf
->dev
;
406 struct pci_epc
*epc
= epf
->epc
;
407 struct device
*dma_dev
= epf
->epc
->dev
.parent
;
408 enum pci_barno test_reg_bar
= epf_test
->test_reg_bar
;
409 struct pci_epf_test_reg
*reg
= epf_test
->reg
[test_reg_bar
];
411 dst_addr
= pci_epc_mem_alloc_addr(epc
, &phys_addr
, reg
->size
);
413 dev_err(dev
, "Failed to allocate address\n");
414 reg
->status
= STATUS_DST_ADDR_INVALID
;
419 ret
= pci_epc_map_addr(epc
, epf
->func_no
, phys_addr
, reg
->dst_addr
,
422 dev_err(dev
, "Failed to map address\n");
423 reg
->status
= STATUS_DST_ADDR_INVALID
;
427 buf
= kzalloc(reg
->size
, GFP_KERNEL
);
433 get_random_bytes(buf
, reg
->size
);
434 reg
->checksum
= crc32_le(~0, buf
, reg
->size
);
436 use_dma
= !!(reg
->flags
& FLAG_USE_DMA
);
438 if (!epf_test
->dma_supported
) {
439 dev_err(dev
, "Cannot transfer data using DMA\n");
444 src_phys_addr
= dma_map_single(dma_dev
, buf
, reg
->size
,
446 if (dma_mapping_error(dma_dev
, src_phys_addr
)) {
447 dev_err(dev
, "Failed to map source buffer addr\n");
452 ktime_get_ts64(&start
);
453 ret
= pci_epf_test_data_transfer(epf_test
, phys_addr
,
454 src_phys_addr
, reg
->size
);
456 dev_err(dev
, "Data transfer failed\n");
457 ktime_get_ts64(&end
);
459 dma_unmap_single(dma_dev
, src_phys_addr
, reg
->size
,
462 ktime_get_ts64(&start
);
463 memcpy_toio(dst_addr
, buf
, reg
->size
);
464 ktime_get_ts64(&end
);
467 pci_epf_test_print_rate("WRITE", reg
->size
, &start
, &end
, use_dma
);
470 * wait 1ms inorder for the write to complete. Without this delay L3
471 * error in observed in the host system.
473 usleep_range(1000, 2000);
479 pci_epc_unmap_addr(epc
, epf
->func_no
, phys_addr
);
482 pci_epc_mem_free_addr(epc
, phys_addr
, dst_addr
, reg
->size
);
488 static void pci_epf_test_raise_irq(struct pci_epf_test
*epf_test
, u8 irq_type
,
491 struct pci_epf
*epf
= epf_test
->epf
;
492 struct device
*dev
= &epf
->dev
;
493 struct pci_epc
*epc
= epf
->epc
;
494 enum pci_barno test_reg_bar
= epf_test
->test_reg_bar
;
495 struct pci_epf_test_reg
*reg
= epf_test
->reg
[test_reg_bar
];
497 reg
->status
|= STATUS_IRQ_RAISED
;
500 case IRQ_TYPE_LEGACY
:
501 pci_epc_raise_irq(epc
, epf
->func_no
, PCI_EPC_IRQ_LEGACY
, 0);
504 pci_epc_raise_irq(epc
, epf
->func_no
, PCI_EPC_IRQ_MSI
, irq
);
507 pci_epc_raise_irq(epc
, epf
->func_no
, PCI_EPC_IRQ_MSIX
, irq
);
510 dev_err(dev
, "Failed to raise IRQ, unknown type\n");
515 static void pci_epf_test_cmd_handler(struct work_struct
*work
)
520 struct pci_epf_test
*epf_test
= container_of(work
, struct pci_epf_test
,
522 struct pci_epf
*epf
= epf_test
->epf
;
523 struct device
*dev
= &epf
->dev
;
524 struct pci_epc
*epc
= epf
->epc
;
525 enum pci_barno test_reg_bar
= epf_test
->test_reg_bar
;
526 struct pci_epf_test_reg
*reg
= epf_test
->reg
[test_reg_bar
];
528 command
= reg
->command
;
535 if (reg
->irq_type
> IRQ_TYPE_MSIX
) {
536 dev_err(dev
, "Failed to detect IRQ type\n");
540 if (command
& COMMAND_RAISE_LEGACY_IRQ
) {
541 reg
->status
= STATUS_IRQ_RAISED
;
542 pci_epc_raise_irq(epc
, epf
->func_no
, PCI_EPC_IRQ_LEGACY
, 0);
546 if (command
& COMMAND_WRITE
) {
547 ret
= pci_epf_test_write(epf_test
);
549 reg
->status
|= STATUS_WRITE_FAIL
;
551 reg
->status
|= STATUS_WRITE_SUCCESS
;
552 pci_epf_test_raise_irq(epf_test
, reg
->irq_type
,
557 if (command
& COMMAND_READ
) {
558 ret
= pci_epf_test_read(epf_test
);
560 reg
->status
|= STATUS_READ_SUCCESS
;
562 reg
->status
|= STATUS_READ_FAIL
;
563 pci_epf_test_raise_irq(epf_test
, reg
->irq_type
,
568 if (command
& COMMAND_COPY
) {
569 ret
= pci_epf_test_copy(epf_test
);
571 reg
->status
|= STATUS_COPY_SUCCESS
;
573 reg
->status
|= STATUS_COPY_FAIL
;
574 pci_epf_test_raise_irq(epf_test
, reg
->irq_type
,
579 if (command
& COMMAND_RAISE_MSI_IRQ
) {
580 count
= pci_epc_get_msi(epc
, epf
->func_no
);
581 if (reg
->irq_number
> count
|| count
<= 0)
583 reg
->status
= STATUS_IRQ_RAISED
;
584 pci_epc_raise_irq(epc
, epf
->func_no
, PCI_EPC_IRQ_MSI
,
589 if (command
& COMMAND_RAISE_MSIX_IRQ
) {
590 count
= pci_epc_get_msix(epc
, epf
->func_no
);
591 if (reg
->irq_number
> count
|| count
<= 0)
593 reg
->status
= STATUS_IRQ_RAISED
;
594 pci_epc_raise_irq(epc
, epf
->func_no
, PCI_EPC_IRQ_MSIX
,
600 queue_delayed_work(kpcitest_workqueue
, &epf_test
->cmd_handler
,
601 msecs_to_jiffies(1));
604 static void pci_epf_test_unbind(struct pci_epf
*epf
)
606 struct pci_epf_test
*epf_test
= epf_get_drvdata(epf
);
607 struct pci_epc
*epc
= epf
->epc
;
608 struct pci_epf_bar
*epf_bar
;
611 cancel_delayed_work(&epf_test
->cmd_handler
);
612 pci_epf_test_clean_dma_chan(epf_test
);
614 for (bar
= 0; bar
< PCI_STD_NUM_BARS
; bar
++) {
615 epf_bar
= &epf
->bar
[bar
];
617 if (epf_test
->reg
[bar
]) {
618 pci_epc_clear_bar(epc
, epf
->func_no
, epf_bar
);
619 pci_epf_free_space(epf
, epf_test
->reg
[bar
], bar
);
624 static int pci_epf_test_set_bar(struct pci_epf
*epf
)
628 struct pci_epf_bar
*epf_bar
;
629 struct pci_epc
*epc
= epf
->epc
;
630 struct device
*dev
= &epf
->dev
;
631 struct pci_epf_test
*epf_test
= epf_get_drvdata(epf
);
632 enum pci_barno test_reg_bar
= epf_test
->test_reg_bar
;
633 const struct pci_epc_features
*epc_features
;
635 epc_features
= epf_test
->epc_features
;
637 for (bar
= 0; bar
< PCI_STD_NUM_BARS
; bar
+= add
) {
638 epf_bar
= &epf
->bar
[bar
];
640 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
641 * if the specific implementation required a 64-bit BAR,
642 * even if we only requested a 32-bit BAR.
644 add
= (epf_bar
->flags
& PCI_BASE_ADDRESS_MEM_TYPE_64
) ? 2 : 1;
646 if (!!(epc_features
->reserved_bar
& (1 << bar
)))
649 ret
= pci_epc_set_bar(epc
, epf
->func_no
, epf_bar
);
651 pci_epf_free_space(epf
, epf_test
->reg
[bar
], bar
);
652 dev_err(dev
, "Failed to set BAR%d\n", bar
);
653 if (bar
== test_reg_bar
)
661 static int pci_epf_test_core_init(struct pci_epf
*epf
)
663 struct pci_epf_test
*epf_test
= epf_get_drvdata(epf
);
664 struct pci_epf_header
*header
= epf
->header
;
665 const struct pci_epc_features
*epc_features
;
666 struct pci_epc
*epc
= epf
->epc
;
667 struct device
*dev
= &epf
->dev
;
668 bool msix_capable
= false;
669 bool msi_capable
= true;
672 epc_features
= pci_epc_get_features(epc
, epf
->func_no
);
674 msix_capable
= epc_features
->msix_capable
;
675 msi_capable
= epc_features
->msi_capable
;
678 ret
= pci_epc_write_header(epc
, epf
->func_no
, header
);
680 dev_err(dev
, "Configuration header write failed\n");
684 ret
= pci_epf_test_set_bar(epf
);
689 ret
= pci_epc_set_msi(epc
, epf
->func_no
, epf
->msi_interrupts
);
691 dev_err(dev
, "MSI configuration failed\n");
697 ret
= pci_epc_set_msix(epc
, epf
->func_no
, epf
->msix_interrupts
,
698 epf_test
->test_reg_bar
,
699 epf_test
->msix_table_offset
);
701 dev_err(dev
, "MSI-X configuration failed\n");
709 static int pci_epf_test_notifier(struct notifier_block
*nb
, unsigned long val
,
712 struct pci_epf
*epf
= container_of(nb
, struct pci_epf
, nb
);
713 struct pci_epf_test
*epf_test
= epf_get_drvdata(epf
);
718 ret
= pci_epf_test_core_init(epf
);
724 queue_delayed_work(kpcitest_workqueue
, &epf_test
->cmd_handler
,
725 msecs_to_jiffies(1));
729 dev_err(&epf
->dev
, "Invalid EPF test notifier event\n");
736 static int pci_epf_test_alloc_space(struct pci_epf
*epf
)
738 struct pci_epf_test
*epf_test
= epf_get_drvdata(epf
);
739 struct device
*dev
= &epf
->dev
;
740 struct pci_epf_bar
*epf_bar
;
741 size_t msix_table_size
= 0;
742 size_t test_reg_bar_size
;
747 enum pci_barno test_reg_bar
= epf_test
->test_reg_bar
;
748 const struct pci_epc_features
*epc_features
;
749 size_t test_reg_size
;
751 epc_features
= epf_test
->epc_features
;
753 test_reg_bar_size
= ALIGN(sizeof(struct pci_epf_test_reg
), 128);
755 msix_capable
= epc_features
->msix_capable
;
757 msix_table_size
= PCI_MSIX_ENTRY_SIZE
* epf
->msix_interrupts
;
758 epf_test
->msix_table_offset
= test_reg_bar_size
;
759 /* Align to QWORD or 8 Bytes */
760 pba_size
= ALIGN(DIV_ROUND_UP(epf
->msix_interrupts
, 8), 8);
762 test_reg_size
= test_reg_bar_size
+ msix_table_size
+ pba_size
;
764 if (epc_features
->bar_fixed_size
[test_reg_bar
]) {
765 if (test_reg_size
> bar_size
[test_reg_bar
])
767 test_reg_size
= bar_size
[test_reg_bar
];
770 base
= pci_epf_alloc_space(epf
, test_reg_size
, test_reg_bar
,
771 epc_features
->align
);
773 dev_err(dev
, "Failed to allocated register space\n");
776 epf_test
->reg
[test_reg_bar
] = base
;
778 for (bar
= 0; bar
< PCI_STD_NUM_BARS
; bar
+= add
) {
779 epf_bar
= &epf
->bar
[bar
];
780 add
= (epf_bar
->flags
& PCI_BASE_ADDRESS_MEM_TYPE_64
) ? 2 : 1;
782 if (bar
== test_reg_bar
)
785 if (!!(epc_features
->reserved_bar
& (1 << bar
)))
788 base
= pci_epf_alloc_space(epf
, bar_size
[bar
], bar
,
789 epc_features
->align
);
791 dev_err(dev
, "Failed to allocate space for BAR%d\n",
793 epf_test
->reg
[bar
] = base
;
799 static void pci_epf_configure_bar(struct pci_epf
*epf
,
800 const struct pci_epc_features
*epc_features
)
802 struct pci_epf_bar
*epf_bar
;
803 bool bar_fixed_64bit
;
806 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
807 epf_bar
= &epf
->bar
[i
];
808 bar_fixed_64bit
= !!(epc_features
->bar_fixed_64bit
& (1 << i
));
810 epf_bar
->flags
|= PCI_BASE_ADDRESS_MEM_TYPE_64
;
811 if (epc_features
->bar_fixed_size
[i
])
812 bar_size
[i
] = epc_features
->bar_fixed_size
[i
];
816 static int pci_epf_test_bind(struct pci_epf
*epf
)
819 struct pci_epf_test
*epf_test
= epf_get_drvdata(epf
);
820 const struct pci_epc_features
*epc_features
;
821 enum pci_barno test_reg_bar
= BAR_0
;
822 struct pci_epc
*epc
= epf
->epc
;
823 bool linkup_notifier
= false;
824 bool core_init_notifier
= false;
826 if (WARN_ON_ONCE(!epc
))
829 epc_features
= pci_epc_get_features(epc
, epf
->func_no
);
831 linkup_notifier
= epc_features
->linkup_notifier
;
832 core_init_notifier
= epc_features
->core_init_notifier
;
833 test_reg_bar
= pci_epc_get_first_free_bar(epc_features
);
834 pci_epf_configure_bar(epf
, epc_features
);
837 epf_test
->test_reg_bar
= test_reg_bar
;
838 epf_test
->epc_features
= epc_features
;
840 ret
= pci_epf_test_alloc_space(epf
);
844 if (!core_init_notifier
) {
845 ret
= pci_epf_test_core_init(epf
);
850 epf_test
->dma_supported
= true;
852 ret
= pci_epf_test_init_dma_chan(epf_test
);
854 epf_test
->dma_supported
= false;
856 if (linkup_notifier
) {
857 epf
->nb
.notifier_call
= pci_epf_test_notifier
;
858 pci_epc_register_notifier(epc
, &epf
->nb
);
860 queue_work(kpcitest_workqueue
, &epf_test
->cmd_handler
.work
);
866 static const struct pci_epf_device_id pci_epf_test_ids
[] = {
868 .name
= "pci_epf_test",
873 static int pci_epf_test_probe(struct pci_epf
*epf
)
875 struct pci_epf_test
*epf_test
;
876 struct device
*dev
= &epf
->dev
;
878 epf_test
= devm_kzalloc(dev
, sizeof(*epf_test
), GFP_KERNEL
);
882 epf
->header
= &test_header
;
885 INIT_DELAYED_WORK(&epf_test
->cmd_handler
, pci_epf_test_cmd_handler
);
887 epf_set_drvdata(epf
, epf_test
);
891 static struct pci_epf_ops ops
= {
892 .unbind
= pci_epf_test_unbind
,
893 .bind
= pci_epf_test_bind
,
896 static struct pci_epf_driver test_driver
= {
897 .driver
.name
= "pci_epf_test",
898 .probe
= pci_epf_test_probe
,
899 .id_table
= pci_epf_test_ids
,
901 .owner
= THIS_MODULE
,
904 static int __init
pci_epf_test_init(void)
908 kpcitest_workqueue
= alloc_workqueue("kpcitest",
909 WQ_MEM_RECLAIM
| WQ_HIGHPRI
, 0);
910 if (!kpcitest_workqueue
) {
911 pr_err("Failed to allocate the kpcitest work queue\n");
915 ret
= pci_epf_register_driver(&test_driver
);
917 pr_err("Failed to register pci epf test driver --> %d\n", ret
);
923 module_init(pci_epf_test_init
);
925 static void __exit
pci_epf_test_exit(void)
927 pci_epf_unregister_driver(&test_driver
);
929 module_exit(pci_epf_test_exit
);
931 MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
932 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
933 MODULE_LICENSE("GPL v2");