2 * This file implements the DMA operations for NVLink devices. The NPU
3 * devices all point to the same iommu table as the parent PCI device.
5 * Copyright Alistair Popple, IBM Corporation 2015.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/mmu_context.h>
16 #include <linux/export.h>
17 #include <linux/pci.h>
18 #include <linux/memblock.h>
19 #include <linux/iommu.h>
22 #include <asm/powernv.h>
26 #include <asm/iommu.h>
27 #include <asm/pnv-pci.h>
28 #include <asm/msi_bitmap.h>
34 #define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
37 * Other types of TCE cache invalidation are not functional in the
40 static struct pci_dev
*get_pci_dev(struct device_node
*dn
)
42 struct pci_dn
*pdn
= PCI_DN(dn
);
44 return pci_get_domain_bus_and_slot(pci_domain_nr(pdn
->phb
->bus
),
45 pdn
->busno
, pdn
->devfn
);
48 /* Given a NPU device get the associated PCI device. */
49 struct pci_dev
*pnv_pci_get_gpu_dev(struct pci_dev
*npdev
)
51 struct device_node
*dn
;
52 struct pci_dev
*gpdev
;
57 if (WARN_ON(!npdev
->dev
.of_node
))
60 /* Get assoicated PCI device */
61 dn
= of_parse_phandle(npdev
->dev
.of_node
, "ibm,gpu", 0);
65 gpdev
= get_pci_dev(dn
);
70 EXPORT_SYMBOL(pnv_pci_get_gpu_dev
);
72 /* Given the real PCI device get a linked NPU device. */
73 struct pci_dev
*pnv_pci_get_npu_dev(struct pci_dev
*gpdev
, int index
)
75 struct device_node
*dn
;
76 struct pci_dev
*npdev
;
81 /* Not all PCI devices have device-tree nodes */
82 if (!gpdev
->dev
.of_node
)
85 /* Get assoicated PCI device */
86 dn
= of_parse_phandle(gpdev
->dev
.of_node
, "ibm,npu", index
);
90 npdev
= get_pci_dev(dn
);
95 EXPORT_SYMBOL(pnv_pci_get_npu_dev
);
97 #define NPU_DMA_OP_UNSUPPORTED() \
98 dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
101 static void *dma_npu_alloc(struct device
*dev
, size_t size
,
102 dma_addr_t
*dma_handle
, gfp_t flag
,
105 NPU_DMA_OP_UNSUPPORTED();
109 static void dma_npu_free(struct device
*dev
, size_t size
,
110 void *vaddr
, dma_addr_t dma_handle
,
113 NPU_DMA_OP_UNSUPPORTED();
116 static dma_addr_t
dma_npu_map_page(struct device
*dev
, struct page
*page
,
117 unsigned long offset
, size_t size
,
118 enum dma_data_direction direction
,
121 NPU_DMA_OP_UNSUPPORTED();
125 static int dma_npu_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
126 int nelems
, enum dma_data_direction direction
,
129 NPU_DMA_OP_UNSUPPORTED();
133 static int dma_npu_dma_supported(struct device
*dev
, u64 mask
)
135 NPU_DMA_OP_UNSUPPORTED();
139 static u64
dma_npu_get_required_mask(struct device
*dev
)
141 NPU_DMA_OP_UNSUPPORTED();
145 static const struct dma_map_ops dma_npu_ops
= {
146 .map_page
= dma_npu_map_page
,
147 .map_sg
= dma_npu_map_sg
,
148 .alloc
= dma_npu_alloc
,
149 .free
= dma_npu_free
,
150 .dma_supported
= dma_npu_dma_supported
,
151 .get_required_mask
= dma_npu_get_required_mask
,
155 * Returns the PE assoicated with the PCI device of the given
156 * NPU. Returns the linked pci device if pci_dev != NULL.
158 static struct pnv_ioda_pe
*get_gpu_pci_dev_and_pe(struct pnv_ioda_pe
*npe
,
159 struct pci_dev
**gpdev
)
162 struct pci_controller
*hose
;
163 struct pci_dev
*pdev
;
164 struct pnv_ioda_pe
*pe
;
167 pdev
= pnv_pci_get_gpu_dev(npe
->pdev
);
171 pdn
= pci_get_pdn(pdev
);
172 if (WARN_ON(!pdn
|| pdn
->pe_number
== IODA_INVALID_PE
))
175 hose
= pci_bus_to_host(pdev
->bus
);
176 phb
= hose
->private_data
;
177 pe
= &phb
->ioda
.pe_array
[pdn
->pe_number
];
185 long pnv_npu_set_window(struct pnv_ioda_pe
*npe
, int num
,
186 struct iommu_table
*tbl
)
188 struct pnv_phb
*phb
= npe
->phb
;
190 const unsigned long size
= tbl
->it_indirect_levels
?
191 tbl
->it_level_size
: tbl
->it_size
;
192 const __u64 start_addr
= tbl
->it_offset
<< tbl
->it_page_shift
;
193 const __u64 win_size
= tbl
->it_size
<< tbl
->it_page_shift
;
195 pe_info(npe
, "Setting up window %llx..%llx pg=%lx\n",
196 start_addr
, start_addr
+ win_size
- 1,
197 IOMMU_PAGE_SIZE(tbl
));
199 rc
= opal_pci_map_pe_dma_window(phb
->opal_id
,
202 tbl
->it_indirect_levels
+ 1,
205 IOMMU_PAGE_SIZE(tbl
));
207 pe_err(npe
, "Failed to configure TCE table, err %lld\n", rc
);
210 pnv_pci_ioda2_tce_invalidate_entire(phb
, false);
212 /* Add the table to the list so its TCE cache will get invalidated */
213 pnv_pci_link_table_and_group(phb
->hose
->node
, num
,
214 tbl
, &npe
->table_group
);
219 long pnv_npu_unset_window(struct pnv_ioda_pe
*npe
, int num
)
221 struct pnv_phb
*phb
= npe
->phb
;
224 pe_info(npe
, "Removing DMA window\n");
226 rc
= opal_pci_map_pe_dma_window(phb
->opal_id
, npe
->pe_number
,
228 0/* levels */, 0/* table address */,
229 0/* table size */, 0/* page size */);
231 pe_err(npe
, "Unmapping failed, ret = %lld\n", rc
);
234 pnv_pci_ioda2_tce_invalidate_entire(phb
, false);
236 pnv_pci_unlink_table_and_group(npe
->table_group
.tables
[num
],
243 * Enables 32 bit DMA on NPU.
245 static void pnv_npu_dma_set_32(struct pnv_ioda_pe
*npe
)
247 struct pci_dev
*gpdev
;
248 struct pnv_ioda_pe
*gpe
;
252 * Find the assoicated PCI devices and get the dma window
253 * information from there.
255 if (!npe
->pdev
|| !(npe
->flags
& PNV_IODA_PE_DEV
))
258 gpe
= get_gpu_pci_dev_and_pe(npe
, &gpdev
);
262 rc
= pnv_npu_set_window(npe
, 0, gpe
->table_group
.tables
[0]);
265 * We don't initialise npu_pe->tce32_table as we always use
266 * dma_npu_ops which are nops.
268 set_dma_ops(&npe
->pdev
->dev
, &dma_npu_ops
);
272 * Enables bypass mode on the NPU. The NPU only supports one
273 * window per link, so bypass needs to be explicitly enabled or
274 * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
275 * active at the same time.
277 static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe
*npe
)
279 struct pnv_phb
*phb
= npe
->phb
;
281 phys_addr_t top
= memblock_end_of_DRAM();
283 if (phb
->type
!= PNV_PHB_NPU_NVLINK
|| !npe
->pdev
)
286 rc
= pnv_npu_unset_window(npe
, 0);
287 if (rc
!= OPAL_SUCCESS
)
290 /* Enable the bypass window */
292 top
= roundup_pow_of_two(top
);
293 dev_info(&npe
->pdev
->dev
, "Enabling bypass for PE %x\n",
295 rc
= opal_pci_map_pe_dma_window_real(phb
->opal_id
,
296 npe
->pe_number
, npe
->pe_number
,
297 0 /* bypass base */, top
);
299 if (rc
== OPAL_SUCCESS
)
300 pnv_pci_ioda2_tce_invalidate_entire(phb
, false);
305 void pnv_npu_try_dma_set_bypass(struct pci_dev
*gpdev
, bool bypass
)
310 struct pnv_ioda_pe
*npe
;
311 struct pci_dev
*npdev
;
314 npdev
= pnv_pci_get_npu_dev(gpdev
, i
);
319 pdn
= pci_get_pdn(npdev
);
320 if (WARN_ON(!pdn
|| pdn
->pe_number
== IODA_INVALID_PE
))
323 phb
= pci_bus_to_host(npdev
->bus
)->private_data
;
325 /* We only do bypass if it's enabled on the linked device */
326 npe
= &phb
->ioda
.pe_array
[pdn
->pe_number
];
329 dev_info(&npdev
->dev
,
330 "Using 64-bit DMA iommu bypass\n");
331 pnv_npu_dma_set_bypass(npe
);
333 dev_info(&npdev
->dev
, "Using 32-bit DMA via iommu\n");
334 pnv_npu_dma_set_32(npe
);
339 /* Switch ownership from platform code to external user (e.g. VFIO) */
340 void pnv_npu_take_ownership(struct pnv_ioda_pe
*npe
)
342 struct pnv_phb
*phb
= npe
->phb
;
346 * Note: NPU has just a single TVE in the hardware which means that
347 * while used by the kernel, it can have either 32bit window or
348 * DMA bypass but never both. So we deconfigure 32bit window only
349 * if it was enabled at the moment of ownership change.
351 if (npe
->table_group
.tables
[0]) {
352 pnv_npu_unset_window(npe
, 0);
357 rc
= opal_pci_map_pe_dma_window_real(phb
->opal_id
,
358 npe
->pe_number
, npe
->pe_number
,
359 0 /* bypass base */, 0);
361 pe_err(npe
, "Failed to disable bypass, err %lld\n", rc
);
364 pnv_pci_ioda2_tce_invalidate_entire(npe
->phb
, false);
367 struct pnv_ioda_pe
*pnv_pci_npu_setup_iommu(struct pnv_ioda_pe
*npe
)
369 struct pnv_phb
*phb
= npe
->phb
;
370 struct pci_bus
*pbus
= phb
->hose
->bus
;
371 struct pci_dev
*npdev
, *gpdev
= NULL
, *gptmp
;
372 struct pnv_ioda_pe
*gpe
= get_gpu_pci_dev_and_pe(npe
, &gpdev
);
377 list_for_each_entry(npdev
, &pbus
->devices
, bus_list
) {
378 gptmp
= pnv_pci_get_gpu_dev(npdev
);
383 pe_info(gpe
, "Attached NPU %s\n", dev_name(&npdev
->dev
));
384 iommu_group_add_device(gpe
->table_group
.group
, &npdev
->dev
);
390 /* Maximum number of nvlinks per npu */
391 #define NV_MAX_LINKS 6
393 /* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */
394 static int max_npu2_index
;
397 struct mm_struct
*mm
;
398 struct pci_dev
*npdev
[NV_MAX_NPUS
][NV_MAX_LINKS
];
399 struct mmu_notifier mn
;
403 /* Callback to stop translation requests on a given GPU */
404 struct npu_context
*(*release_cb
)(struct npu_context
*, void *);
407 * Private pointer passed to the above callback for usage by
414 * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
415 * if none are available.
417 static int get_mmio_atsd_reg(struct npu
*npu
)
421 for (i
= 0; i
< npu
->mmio_atsd_count
; i
++) {
422 if (!test_and_set_bit(i
, &npu
->mmio_atsd_usage
))
429 static void put_mmio_atsd_reg(struct npu
*npu
, int reg
)
431 clear_bit(reg
, &npu
->mmio_atsd_usage
);
434 /* MMIO ATSD register offsets */
435 #define XTS_ATSD_AVA 1
436 #define XTS_ATSD_STAT 2
438 static int mmio_launch_invalidate(struct npu
*npu
, unsigned long launch
,
444 mmio_atsd_reg
= get_mmio_atsd_reg(npu
);
446 } while (mmio_atsd_reg
< 0);
448 __raw_writeq(cpu_to_be64(va
),
449 npu
->mmio_atsd_regs
[mmio_atsd_reg
] + XTS_ATSD_AVA
);
451 __raw_writeq(cpu_to_be64(launch
), npu
->mmio_atsd_regs
[mmio_atsd_reg
]);
453 return mmio_atsd_reg
;
456 static int mmio_invalidate_pid(struct npu
*npu
, unsigned long pid
, bool flush
)
458 unsigned long launch
;
460 /* IS set to invalidate matching PID */
461 launch
= PPC_BIT(12);
463 /* PRS set to process-scoped */
464 launch
|= PPC_BIT(13);
467 launch
|= (u64
) mmu_get_ap(mmu_virtual_psize
) << PPC_BITLSHIFT(17);
470 launch
|= pid
<< PPC_BITLSHIFT(38);
473 launch
|= !flush
<< PPC_BITLSHIFT(39);
475 /* Invalidating the entire process doesn't use a va */
476 return mmio_launch_invalidate(npu
, launch
, 0);
479 static int mmio_invalidate_va(struct npu
*npu
, unsigned long va
,
480 unsigned long pid
, bool flush
)
482 unsigned long launch
;
484 /* IS set to invalidate target VA */
487 /* PRS set to process scoped */
488 launch
|= PPC_BIT(13);
491 launch
|= (u64
) mmu_get_ap(mmu_virtual_psize
) << PPC_BITLSHIFT(17);
494 launch
|= pid
<< PPC_BITLSHIFT(38);
497 launch
|= !flush
<< PPC_BITLSHIFT(39);
499 return mmio_launch_invalidate(npu
, launch
, va
);
502 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
504 struct mmio_atsd_reg
{
509 static void mmio_invalidate_wait(
510 struct mmio_atsd_reg mmio_atsd_reg
[NV_MAX_NPUS
], bool flush
)
515 /* Wait for all invalidations to complete */
516 for (i
= 0; i
<= max_npu2_index
; i
++) {
517 if (mmio_atsd_reg
[i
].reg
< 0)
520 /* Wait for completion */
521 npu
= mmio_atsd_reg
[i
].npu
;
522 reg
= mmio_atsd_reg
[i
].reg
;
523 while (__raw_readq(npu
->mmio_atsd_regs
[reg
] + XTS_ATSD_STAT
))
526 put_mmio_atsd_reg(npu
, reg
);
529 * The GPU requires two flush ATSDs to ensure all entries have
530 * been flushed. We use PID 0 as it will never be used for a
531 * process on the GPU.
534 mmio_invalidate_pid(npu
, 0, true);
539 * Invalidate either a single address or an entire PID depending on
542 static void mmio_invalidate(struct npu_context
*npu_context
, int va
,
543 unsigned long address
, bool flush
)
547 struct pnv_phb
*nphb
;
548 struct pci_dev
*npdev
;
549 struct mmio_atsd_reg mmio_atsd_reg
[NV_MAX_NPUS
];
550 unsigned long pid
= npu_context
->mm
->context
.id
;
552 if (npu_context
->nmmu_flush
)
554 * Unfortunately the nest mmu does not support flushing specific
555 * addresses so we have to flush the whole mm once before
556 * shooting down the GPU translation.
558 flush_all_mm(npu_context
->mm
);
561 * Loop over all the NPUs this process is active on and launch
564 for (i
= 0; i
<= max_npu2_index
; i
++) {
565 mmio_atsd_reg
[i
].reg
= -1;
566 for (j
= 0; j
< NV_MAX_LINKS
; j
++) {
567 npdev
= npu_context
->npdev
[i
][j
];
571 nphb
= pci_bus_to_host(npdev
->bus
)->private_data
;
573 mmio_atsd_reg
[i
].npu
= npu
;
576 mmio_atsd_reg
[i
].reg
=
577 mmio_invalidate_va(npu
, address
, pid
,
580 mmio_atsd_reg
[i
].reg
=
581 mmio_invalidate_pid(npu
, pid
, flush
);
584 * The NPU hardware forwards the shootdown to all GPUs
585 * so we only have to launch one shootdown per NPU.
591 mmio_invalidate_wait(mmio_atsd_reg
, flush
);
593 /* Wait for the flush to complete */
594 mmio_invalidate_wait(mmio_atsd_reg
, false);
597 static void pnv_npu2_mn_release(struct mmu_notifier
*mn
,
598 struct mm_struct
*mm
)
600 struct npu_context
*npu_context
= mn_to_npu_context(mn
);
602 /* Call into device driver to stop requests to the NMMU */
603 if (npu_context
->release_cb
)
604 npu_context
->release_cb(npu_context
, npu_context
->priv
);
607 * There should be no more translation requests for this PID, but we
608 * need to ensure any entries for it are removed from the TLB.
610 mmio_invalidate(npu_context
, 0, 0, true);
613 static void pnv_npu2_mn_change_pte(struct mmu_notifier
*mn
,
614 struct mm_struct
*mm
,
615 unsigned long address
,
618 struct npu_context
*npu_context
= mn_to_npu_context(mn
);
620 mmio_invalidate(npu_context
, 1, address
, true);
623 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier
*mn
,
624 struct mm_struct
*mm
,
625 unsigned long start
, unsigned long end
)
627 struct npu_context
*npu_context
= mn_to_npu_context(mn
);
628 unsigned long address
;
630 for (address
= start
; address
< end
; address
+= PAGE_SIZE
)
631 mmio_invalidate(npu_context
, 1, address
, false);
633 /* Do the flush only on the final addess == end */
634 mmio_invalidate(npu_context
, 1, address
, true);
637 static const struct mmu_notifier_ops nv_nmmu_notifier_ops
= {
638 .release
= pnv_npu2_mn_release
,
639 .change_pte
= pnv_npu2_mn_change_pte
,
640 .invalidate_range
= pnv_npu2_mn_invalidate_range
,
644 * Call into OPAL to setup the nmmu context for the current task in
645 * the NPU. This must be called to setup the context tables before the
646 * GPU issues ATRs. pdev should be a pointed to PCIe GPU device.
648 * A release callback should be registered to allow a device driver to
649 * be notified that it should not launch any new translation requests
650 * as the final TLB invalidate is about to occur.
652 * Returns an error if there no contexts are currently available or a
653 * npu_context which should be passed to pnv_npu2_handle_fault().
655 * mmap_sem must be held in write mode.
657 struct npu_context
*pnv_npu2_init_context(struct pci_dev
*gpdev
,
659 struct npu_context
*(*cb
)(struct npu_context
*, void *),
664 struct device_node
*nvlink_dn
;
665 struct mm_struct
*mm
= current
->mm
;
666 struct pnv_phb
*nphb
;
668 struct npu_context
*npu_context
;
671 * At present we don't support GPUs connected to multiple NPUs and I'm
672 * not sure the hardware does either.
674 struct pci_dev
*npdev
= pnv_pci_get_npu_dev(gpdev
, 0);
676 if (!firmware_has_feature(FW_FEATURE_OPAL
))
677 return ERR_PTR(-ENODEV
);
680 /* No nvlink associated with this GPU device */
681 return ERR_PTR(-ENODEV
);
683 if (!mm
|| mm
->context
.id
== 0) {
685 * Kernel thread contexts are not supported and context id 0 is
686 * reserved on the GPU.
688 return ERR_PTR(-EINVAL
);
691 nphb
= pci_bus_to_host(npdev
->bus
)->private_data
;
695 * Setup the NPU context table for a particular GPU. These need to be
696 * per-GPU as we need the tables to filter ATSDs when there are no
697 * active contexts on a particular GPU.
699 rc
= opal_npu_init_context(nphb
->opal_id
, mm
->context
.id
, flags
,
700 PCI_DEVID(gpdev
->bus
->number
, gpdev
->devfn
));
702 return ERR_PTR(-ENOSPC
);
705 * We store the npu pci device so we can more easily get at the
708 npu_context
= mm
->context
.npu_context
;
710 npu_context
= kzalloc(sizeof(struct npu_context
), GFP_KERNEL
);
712 return ERR_PTR(-ENOMEM
);
714 mm
->context
.npu_context
= npu_context
;
715 npu_context
->mm
= mm
;
716 npu_context
->mn
.ops
= &nv_nmmu_notifier_ops
;
717 __mmu_notifier_register(&npu_context
->mn
, mm
);
718 kref_init(&npu_context
->kref
);
720 kref_get(&npu_context
->kref
);
723 npu_context
->release_cb
= cb
;
724 npu_context
->priv
= priv
;
725 nvlink_dn
= of_parse_phandle(npdev
->dev
.of_node
, "ibm,nvlink", 0);
726 if (WARN_ON(of_property_read_u32(nvlink_dn
, "ibm,npu-link-index",
728 return ERR_PTR(-ENODEV
);
729 npu_context
->npdev
[npu
->index
][nvlink_index
] = npdev
;
731 if (!nphb
->npu
.nmmu_flush
) {
733 * If we're not explicitly flushing ourselves we need to mark
734 * the thread for global flushes
736 npu_context
->nmmu_flush
= false;
737 mm_context_add_copro(mm
);
739 npu_context
->nmmu_flush
= true;
743 EXPORT_SYMBOL(pnv_npu2_init_context
);
745 static void pnv_npu2_release_context(struct kref
*kref
)
747 struct npu_context
*npu_context
=
748 container_of(kref
, struct npu_context
, kref
);
750 if (!npu_context
->nmmu_flush
)
751 mm_context_remove_copro(npu_context
->mm
);
753 npu_context
->mm
->context
.npu_context
= NULL
;
754 mmu_notifier_unregister(&npu_context
->mn
,
760 void pnv_npu2_destroy_context(struct npu_context
*npu_context
,
761 struct pci_dev
*gpdev
)
763 struct pnv_phb
*nphb
;
765 struct pci_dev
*npdev
= pnv_pci_get_npu_dev(gpdev
, 0);
766 struct device_node
*nvlink_dn
;
772 if (!firmware_has_feature(FW_FEATURE_OPAL
))
775 nphb
= pci_bus_to_host(npdev
->bus
)->private_data
;
777 nvlink_dn
= of_parse_phandle(npdev
->dev
.of_node
, "ibm,nvlink", 0);
778 if (WARN_ON(of_property_read_u32(nvlink_dn
, "ibm,npu-link-index",
781 npu_context
->npdev
[npu
->index
][nvlink_index
] = NULL
;
782 opal_npu_destroy_context(nphb
->opal_id
, npu_context
->mm
->context
.id
,
783 PCI_DEVID(gpdev
->bus
->number
, gpdev
->devfn
));
784 kref_put(&npu_context
->kref
, pnv_npu2_release_context
);
786 EXPORT_SYMBOL(pnv_npu2_destroy_context
);
789 * Assumes mmap_sem is held for the contexts associated mm.
791 int pnv_npu2_handle_fault(struct npu_context
*context
, uintptr_t *ea
,
792 unsigned long *flags
, unsigned long *status
, int count
)
794 u64 rc
= 0, result
= 0;
796 struct page
*page
[1];
798 /* mmap_sem should be held so the struct_mm must be present */
799 struct mm_struct
*mm
= context
->mm
;
801 if (!firmware_has_feature(FW_FEATURE_OPAL
))
804 WARN_ON(!rwsem_is_locked(&mm
->mmap_sem
));
806 for (i
= 0; i
< count
; i
++) {
807 is_write
= flags
[i
] & NPU2_WRITE
;
808 rc
= get_user_pages_remote(NULL
, mm
, ea
[i
], 1,
809 is_write
? FOLL_WRITE
: 0,
813 * To support virtualised environments we will have to do an
814 * access to the page to ensure it gets faulted into the
815 * hypervisor. For the moment virtualisation is not supported in
816 * other areas so leave the access out.
830 EXPORT_SYMBOL(pnv_npu2_handle_fault
);
832 int pnv_npu2_init(struct pnv_phb
*phb
)
836 struct device_node
*dn
;
837 struct pci_dev
*gpdev
;
838 static int npu_index
;
841 phb
->npu
.nmmu_flush
=
842 of_property_read_bool(phb
->hose
->dn
, "ibm,nmmu-flush");
843 for_each_child_of_node(phb
->hose
->dn
, dn
) {
844 gpdev
= pnv_pci_get_gpu_dev(get_pci_dev(dn
));
846 rc
= opal_npu_map_lpar(phb
->opal_id
,
847 PCI_DEVID(gpdev
->bus
->number
, gpdev
->devfn
),
851 "Error %lld mapping device to LPAR\n",
856 for (i
= 0; !of_property_read_u64_index(phb
->hose
->dn
, "ibm,mmio-atsd",
858 phb
->npu
.mmio_atsd_regs
[i
] = ioremap(mmio_atsd
, 32);
860 pr_info("NPU%lld: Found %d MMIO ATSD registers", phb
->opal_id
, i
);
861 phb
->npu
.mmio_atsd_count
= i
;
862 phb
->npu
.mmio_atsd_usage
= 0;
864 if (WARN_ON(npu_index
>= NV_MAX_NPUS
))
866 max_npu2_index
= npu_index
;
867 phb
->npu
.index
= npu_index
;