Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / powerpc / platforms / powernv / npu-dma.c
blob0a253b64ac5fed049f81082183a35ef6e0fdf76b
1 /*
2 * This file implements the DMA operations for NVLink devices. The NPU
3 * devices all point to the same iommu table as the parent PCI device.
5 * Copyright Alistair Popple, IBM Corporation 2015.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/mmu_context.h>
15 #include <linux/of.h>
16 #include <linux/export.h>
17 #include <linux/pci.h>
18 #include <linux/memblock.h>
19 #include <linux/iommu.h>
21 #include <asm/tlb.h>
22 #include <asm/powernv.h>
23 #include <asm/reg.h>
24 #include <asm/opal.h>
25 #include <asm/io.h>
26 #include <asm/iommu.h>
27 #include <asm/pnv-pci.h>
28 #include <asm/msi_bitmap.h>
29 #include <asm/opal.h>
31 #include "powernv.h"
32 #include "pci.h"
34 #define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
37 * Other types of TCE cache invalidation are not functional in the
38 * hardware.
40 static struct pci_dev *get_pci_dev(struct device_node *dn)
42 struct pci_dn *pdn = PCI_DN(dn);
44 return pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
45 pdn->busno, pdn->devfn);
48 /* Given a NPU device get the associated PCI device. */
49 struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
51 struct device_node *dn;
52 struct pci_dev *gpdev;
54 if (WARN_ON(!npdev))
55 return NULL;
57 if (WARN_ON(!npdev->dev.of_node))
58 return NULL;
60 /* Get assoicated PCI device */
61 dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
62 if (!dn)
63 return NULL;
65 gpdev = get_pci_dev(dn);
66 of_node_put(dn);
68 return gpdev;
70 EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
72 /* Given the real PCI device get a linked NPU device. */
73 struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
75 struct device_node *dn;
76 struct pci_dev *npdev;
78 if (WARN_ON(!gpdev))
79 return NULL;
81 /* Not all PCI devices have device-tree nodes */
82 if (!gpdev->dev.of_node)
83 return NULL;
85 /* Get assoicated PCI device */
86 dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
87 if (!dn)
88 return NULL;
90 npdev = get_pci_dev(dn);
91 of_node_put(dn);
93 return npdev;
95 EXPORT_SYMBOL(pnv_pci_get_npu_dev);
97 #define NPU_DMA_OP_UNSUPPORTED() \
98 dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
99 __func__)
101 static void *dma_npu_alloc(struct device *dev, size_t size,
102 dma_addr_t *dma_handle, gfp_t flag,
103 unsigned long attrs)
105 NPU_DMA_OP_UNSUPPORTED();
106 return NULL;
109 static void dma_npu_free(struct device *dev, size_t size,
110 void *vaddr, dma_addr_t dma_handle,
111 unsigned long attrs)
113 NPU_DMA_OP_UNSUPPORTED();
116 static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
117 unsigned long offset, size_t size,
118 enum dma_data_direction direction,
119 unsigned long attrs)
121 NPU_DMA_OP_UNSUPPORTED();
122 return 0;
125 static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
126 int nelems, enum dma_data_direction direction,
127 unsigned long attrs)
129 NPU_DMA_OP_UNSUPPORTED();
130 return 0;
133 static int dma_npu_dma_supported(struct device *dev, u64 mask)
135 NPU_DMA_OP_UNSUPPORTED();
136 return 0;
139 static u64 dma_npu_get_required_mask(struct device *dev)
141 NPU_DMA_OP_UNSUPPORTED();
142 return 0;
145 static const struct dma_map_ops dma_npu_ops = {
146 .map_page = dma_npu_map_page,
147 .map_sg = dma_npu_map_sg,
148 .alloc = dma_npu_alloc,
149 .free = dma_npu_free,
150 .dma_supported = dma_npu_dma_supported,
151 .get_required_mask = dma_npu_get_required_mask,
155 * Returns the PE assoicated with the PCI device of the given
156 * NPU. Returns the linked pci device if pci_dev != NULL.
158 static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
159 struct pci_dev **gpdev)
161 struct pnv_phb *phb;
162 struct pci_controller *hose;
163 struct pci_dev *pdev;
164 struct pnv_ioda_pe *pe;
165 struct pci_dn *pdn;
167 pdev = pnv_pci_get_gpu_dev(npe->pdev);
168 if (!pdev)
169 return NULL;
171 pdn = pci_get_pdn(pdev);
172 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
173 return NULL;
175 hose = pci_bus_to_host(pdev->bus);
176 phb = hose->private_data;
177 pe = &phb->ioda.pe_array[pdn->pe_number];
179 if (gpdev)
180 *gpdev = pdev;
182 return pe;
185 long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
186 struct iommu_table *tbl)
188 struct pnv_phb *phb = npe->phb;
189 int64_t rc;
190 const unsigned long size = tbl->it_indirect_levels ?
191 tbl->it_level_size : tbl->it_size;
192 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
193 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
195 pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
196 start_addr, start_addr + win_size - 1,
197 IOMMU_PAGE_SIZE(tbl));
199 rc = opal_pci_map_pe_dma_window(phb->opal_id,
200 npe->pe_number,
201 npe->pe_number,
202 tbl->it_indirect_levels + 1,
203 __pa(tbl->it_base),
204 size << 3,
205 IOMMU_PAGE_SIZE(tbl));
206 if (rc) {
207 pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
208 return rc;
210 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
212 /* Add the table to the list so its TCE cache will get invalidated */
213 pnv_pci_link_table_and_group(phb->hose->node, num,
214 tbl, &npe->table_group);
216 return 0;
219 long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num)
221 struct pnv_phb *phb = npe->phb;
222 int64_t rc;
224 pe_info(npe, "Removing DMA window\n");
226 rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
227 npe->pe_number,
228 0/* levels */, 0/* table address */,
229 0/* table size */, 0/* page size */);
230 if (rc) {
231 pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
232 return rc;
234 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
236 pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
237 &npe->table_group);
239 return 0;
243 * Enables 32 bit DMA on NPU.
245 static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
247 struct pci_dev *gpdev;
248 struct pnv_ioda_pe *gpe;
249 int64_t rc;
252 * Find the assoicated PCI devices and get the dma window
253 * information from there.
255 if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
256 return;
258 gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
259 if (!gpe)
260 return;
262 rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
265 * We don't initialise npu_pe->tce32_table as we always use
266 * dma_npu_ops which are nops.
268 set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
272 * Enables bypass mode on the NPU. The NPU only supports one
273 * window per link, so bypass needs to be explicitly enabled or
274 * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
275 * active at the same time.
277 static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
279 struct pnv_phb *phb = npe->phb;
280 int64_t rc = 0;
281 phys_addr_t top = memblock_end_of_DRAM();
283 if (phb->type != PNV_PHB_NPU_NVLINK || !npe->pdev)
284 return -EINVAL;
286 rc = pnv_npu_unset_window(npe, 0);
287 if (rc != OPAL_SUCCESS)
288 return rc;
290 /* Enable the bypass window */
292 top = roundup_pow_of_two(top);
293 dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n",
294 npe->pe_number);
295 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
296 npe->pe_number, npe->pe_number,
297 0 /* bypass base */, top);
299 if (rc == OPAL_SUCCESS)
300 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
302 return rc;
305 void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass)
307 int i;
308 struct pnv_phb *phb;
309 struct pci_dn *pdn;
310 struct pnv_ioda_pe *npe;
311 struct pci_dev *npdev;
313 for (i = 0; ; ++i) {
314 npdev = pnv_pci_get_npu_dev(gpdev, i);
316 if (!npdev)
317 break;
319 pdn = pci_get_pdn(npdev);
320 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
321 return;
323 phb = pci_bus_to_host(npdev->bus)->private_data;
325 /* We only do bypass if it's enabled on the linked device */
326 npe = &phb->ioda.pe_array[pdn->pe_number];
328 if (bypass) {
329 dev_info(&npdev->dev,
330 "Using 64-bit DMA iommu bypass\n");
331 pnv_npu_dma_set_bypass(npe);
332 } else {
333 dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n");
334 pnv_npu_dma_set_32(npe);
339 /* Switch ownership from platform code to external user (e.g. VFIO) */
340 void pnv_npu_take_ownership(struct pnv_ioda_pe *npe)
342 struct pnv_phb *phb = npe->phb;
343 int64_t rc;
346 * Note: NPU has just a single TVE in the hardware which means that
347 * while used by the kernel, it can have either 32bit window or
348 * DMA bypass but never both. So we deconfigure 32bit window only
349 * if it was enabled at the moment of ownership change.
351 if (npe->table_group.tables[0]) {
352 pnv_npu_unset_window(npe, 0);
353 return;
356 /* Disable bypass */
357 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
358 npe->pe_number, npe->pe_number,
359 0 /* bypass base */, 0);
360 if (rc) {
361 pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
362 return;
364 pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
367 struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
369 struct pnv_phb *phb = npe->phb;
370 struct pci_bus *pbus = phb->hose->bus;
371 struct pci_dev *npdev, *gpdev = NULL, *gptmp;
372 struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
374 if (!gpe || !gpdev)
375 return NULL;
377 list_for_each_entry(npdev, &pbus->devices, bus_list) {
378 gptmp = pnv_pci_get_gpu_dev(npdev);
380 if (gptmp != gpdev)
381 continue;
383 pe_info(gpe, "Attached NPU %s\n", dev_name(&npdev->dev));
384 iommu_group_add_device(gpe->table_group.group, &npdev->dev);
387 return gpe;
390 /* Maximum number of nvlinks per npu */
391 #define NV_MAX_LINKS 6
393 /* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */
394 static int max_npu2_index;
396 struct npu_context {
397 struct mm_struct *mm;
398 struct pci_dev *npdev[NV_MAX_NPUS][NV_MAX_LINKS];
399 struct mmu_notifier mn;
400 struct kref kref;
401 bool nmmu_flush;
403 /* Callback to stop translation requests on a given GPU */
404 struct npu_context *(*release_cb)(struct npu_context *, void *);
407 * Private pointer passed to the above callback for usage by
408 * device drivers.
410 void *priv;
414 * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
415 * if none are available.
417 static int get_mmio_atsd_reg(struct npu *npu)
419 int i;
421 for (i = 0; i < npu->mmio_atsd_count; i++) {
422 if (!test_and_set_bit(i, &npu->mmio_atsd_usage))
423 return i;
426 return -ENOSPC;
429 static void put_mmio_atsd_reg(struct npu *npu, int reg)
431 clear_bit(reg, &npu->mmio_atsd_usage);
434 /* MMIO ATSD register offsets */
435 #define XTS_ATSD_AVA 1
436 #define XTS_ATSD_STAT 2
438 static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
439 unsigned long va)
441 int mmio_atsd_reg;
443 do {
444 mmio_atsd_reg = get_mmio_atsd_reg(npu);
445 cpu_relax();
446 } while (mmio_atsd_reg < 0);
448 __raw_writeq(cpu_to_be64(va),
449 npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA);
450 eieio();
451 __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]);
453 return mmio_atsd_reg;
456 static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
458 unsigned long launch;
460 /* IS set to invalidate matching PID */
461 launch = PPC_BIT(12);
463 /* PRS set to process-scoped */
464 launch |= PPC_BIT(13);
466 /* AP */
467 launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
469 /* PID */
470 launch |= pid << PPC_BITLSHIFT(38);
472 /* No flush */
473 launch |= !flush << PPC_BITLSHIFT(39);
475 /* Invalidating the entire process doesn't use a va */
476 return mmio_launch_invalidate(npu, launch, 0);
479 static int mmio_invalidate_va(struct npu *npu, unsigned long va,
480 unsigned long pid, bool flush)
482 unsigned long launch;
484 /* IS set to invalidate target VA */
485 launch = 0;
487 /* PRS set to process scoped */
488 launch |= PPC_BIT(13);
490 /* AP */
491 launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
493 /* PID */
494 launch |= pid << PPC_BITLSHIFT(38);
496 /* No flush */
497 launch |= !flush << PPC_BITLSHIFT(39);
499 return mmio_launch_invalidate(npu, launch, va);
502 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
504 struct mmio_atsd_reg {
505 struct npu *npu;
506 int reg;
509 static void mmio_invalidate_wait(
510 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
512 struct npu *npu;
513 int i, reg;
515 /* Wait for all invalidations to complete */
516 for (i = 0; i <= max_npu2_index; i++) {
517 if (mmio_atsd_reg[i].reg < 0)
518 continue;
520 /* Wait for completion */
521 npu = mmio_atsd_reg[i].npu;
522 reg = mmio_atsd_reg[i].reg;
523 while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
524 cpu_relax();
526 put_mmio_atsd_reg(npu, reg);
529 * The GPU requires two flush ATSDs to ensure all entries have
530 * been flushed. We use PID 0 as it will never be used for a
531 * process on the GPU.
533 if (flush)
534 mmio_invalidate_pid(npu, 0, true);
539 * Invalidate either a single address or an entire PID depending on
540 * the value of va.
542 static void mmio_invalidate(struct npu_context *npu_context, int va,
543 unsigned long address, bool flush)
545 int i, j;
546 struct npu *npu;
547 struct pnv_phb *nphb;
548 struct pci_dev *npdev;
549 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
550 unsigned long pid = npu_context->mm->context.id;
552 if (npu_context->nmmu_flush)
554 * Unfortunately the nest mmu does not support flushing specific
555 * addresses so we have to flush the whole mm once before
556 * shooting down the GPU translation.
558 flush_all_mm(npu_context->mm);
561 * Loop over all the NPUs this process is active on and launch
562 * an invalidate.
564 for (i = 0; i <= max_npu2_index; i++) {
565 mmio_atsd_reg[i].reg = -1;
566 for (j = 0; j < NV_MAX_LINKS; j++) {
567 npdev = npu_context->npdev[i][j];
568 if (!npdev)
569 continue;
571 nphb = pci_bus_to_host(npdev->bus)->private_data;
572 npu = &nphb->npu;
573 mmio_atsd_reg[i].npu = npu;
575 if (va)
576 mmio_atsd_reg[i].reg =
577 mmio_invalidate_va(npu, address, pid,
578 flush);
579 else
580 mmio_atsd_reg[i].reg =
581 mmio_invalidate_pid(npu, pid, flush);
584 * The NPU hardware forwards the shootdown to all GPUs
585 * so we only have to launch one shootdown per NPU.
587 break;
591 mmio_invalidate_wait(mmio_atsd_reg, flush);
592 if (flush)
593 /* Wait for the flush to complete */
594 mmio_invalidate_wait(mmio_atsd_reg, false);
597 static void pnv_npu2_mn_release(struct mmu_notifier *mn,
598 struct mm_struct *mm)
600 struct npu_context *npu_context = mn_to_npu_context(mn);
602 /* Call into device driver to stop requests to the NMMU */
603 if (npu_context->release_cb)
604 npu_context->release_cb(npu_context, npu_context->priv);
607 * There should be no more translation requests for this PID, but we
608 * need to ensure any entries for it are removed from the TLB.
610 mmio_invalidate(npu_context, 0, 0, true);
613 static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
614 struct mm_struct *mm,
615 unsigned long address,
616 pte_t pte)
618 struct npu_context *npu_context = mn_to_npu_context(mn);
620 mmio_invalidate(npu_context, 1, address, true);
623 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
624 struct mm_struct *mm,
625 unsigned long start, unsigned long end)
627 struct npu_context *npu_context = mn_to_npu_context(mn);
628 unsigned long address;
630 for (address = start; address < end; address += PAGE_SIZE)
631 mmio_invalidate(npu_context, 1, address, false);
633 /* Do the flush only on the final addess == end */
634 mmio_invalidate(npu_context, 1, address, true);
637 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
638 .release = pnv_npu2_mn_release,
639 .change_pte = pnv_npu2_mn_change_pte,
640 .invalidate_range = pnv_npu2_mn_invalidate_range,
644 * Call into OPAL to setup the nmmu context for the current task in
645 * the NPU. This must be called to setup the context tables before the
646 * GPU issues ATRs. pdev should be a pointed to PCIe GPU device.
648 * A release callback should be registered to allow a device driver to
649 * be notified that it should not launch any new translation requests
650 * as the final TLB invalidate is about to occur.
652 * Returns an error if there no contexts are currently available or a
653 * npu_context which should be passed to pnv_npu2_handle_fault().
655 * mmap_sem must be held in write mode.
657 struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
658 unsigned long flags,
659 struct npu_context *(*cb)(struct npu_context *, void *),
660 void *priv)
662 int rc;
663 u32 nvlink_index;
664 struct device_node *nvlink_dn;
665 struct mm_struct *mm = current->mm;
666 struct pnv_phb *nphb;
667 struct npu *npu;
668 struct npu_context *npu_context;
671 * At present we don't support GPUs connected to multiple NPUs and I'm
672 * not sure the hardware does either.
674 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
676 if (!firmware_has_feature(FW_FEATURE_OPAL))
677 return ERR_PTR(-ENODEV);
679 if (!npdev)
680 /* No nvlink associated with this GPU device */
681 return ERR_PTR(-ENODEV);
683 if (!mm || mm->context.id == 0) {
685 * Kernel thread contexts are not supported and context id 0 is
686 * reserved on the GPU.
688 return ERR_PTR(-EINVAL);
691 nphb = pci_bus_to_host(npdev->bus)->private_data;
692 npu = &nphb->npu;
695 * Setup the NPU context table for a particular GPU. These need to be
696 * per-GPU as we need the tables to filter ATSDs when there are no
697 * active contexts on a particular GPU.
699 rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
700 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
701 if (rc < 0)
702 return ERR_PTR(-ENOSPC);
705 * We store the npu pci device so we can more easily get at the
706 * associated npus.
708 npu_context = mm->context.npu_context;
709 if (!npu_context) {
710 npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
711 if (!npu_context)
712 return ERR_PTR(-ENOMEM);
714 mm->context.npu_context = npu_context;
715 npu_context->mm = mm;
716 npu_context->mn.ops = &nv_nmmu_notifier_ops;
717 __mmu_notifier_register(&npu_context->mn, mm);
718 kref_init(&npu_context->kref);
719 } else {
720 kref_get(&npu_context->kref);
723 npu_context->release_cb = cb;
724 npu_context->priv = priv;
725 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
726 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
727 &nvlink_index)))
728 return ERR_PTR(-ENODEV);
729 npu_context->npdev[npu->index][nvlink_index] = npdev;
731 if (!nphb->npu.nmmu_flush) {
733 * If we're not explicitly flushing ourselves we need to mark
734 * the thread for global flushes
736 npu_context->nmmu_flush = false;
737 mm_context_add_copro(mm);
738 } else
739 npu_context->nmmu_flush = true;
741 return npu_context;
743 EXPORT_SYMBOL(pnv_npu2_init_context);
745 static void pnv_npu2_release_context(struct kref *kref)
747 struct npu_context *npu_context =
748 container_of(kref, struct npu_context, kref);
750 if (!npu_context->nmmu_flush)
751 mm_context_remove_copro(npu_context->mm);
753 npu_context->mm->context.npu_context = NULL;
754 mmu_notifier_unregister(&npu_context->mn,
755 npu_context->mm);
757 kfree(npu_context);
760 void pnv_npu2_destroy_context(struct npu_context *npu_context,
761 struct pci_dev *gpdev)
763 struct pnv_phb *nphb;
764 struct npu *npu;
765 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
766 struct device_node *nvlink_dn;
767 u32 nvlink_index;
769 if (WARN_ON(!npdev))
770 return;
772 if (!firmware_has_feature(FW_FEATURE_OPAL))
773 return;
775 nphb = pci_bus_to_host(npdev->bus)->private_data;
776 npu = &nphb->npu;
777 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
778 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
779 &nvlink_index)))
780 return;
781 npu_context->npdev[npu->index][nvlink_index] = NULL;
782 opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
783 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
784 kref_put(&npu_context->kref, pnv_npu2_release_context);
786 EXPORT_SYMBOL(pnv_npu2_destroy_context);
789 * Assumes mmap_sem is held for the contexts associated mm.
791 int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
792 unsigned long *flags, unsigned long *status, int count)
794 u64 rc = 0, result = 0;
795 int i, is_write;
796 struct page *page[1];
798 /* mmap_sem should be held so the struct_mm must be present */
799 struct mm_struct *mm = context->mm;
801 if (!firmware_has_feature(FW_FEATURE_OPAL))
802 return -ENODEV;
804 WARN_ON(!rwsem_is_locked(&mm->mmap_sem));
806 for (i = 0; i < count; i++) {
807 is_write = flags[i] & NPU2_WRITE;
808 rc = get_user_pages_remote(NULL, mm, ea[i], 1,
809 is_write ? FOLL_WRITE : 0,
810 page, NULL, NULL);
813 * To support virtualised environments we will have to do an
814 * access to the page to ensure it gets faulted into the
815 * hypervisor. For the moment virtualisation is not supported in
816 * other areas so leave the access out.
818 if (rc != 1) {
819 status[i] = rc;
820 result = -EFAULT;
821 continue;
824 status[i] = 0;
825 put_page(page[0]);
828 return result;
830 EXPORT_SYMBOL(pnv_npu2_handle_fault);
832 int pnv_npu2_init(struct pnv_phb *phb)
834 unsigned int i;
835 u64 mmio_atsd;
836 struct device_node *dn;
837 struct pci_dev *gpdev;
838 static int npu_index;
839 uint64_t rc = 0;
841 phb->npu.nmmu_flush =
842 of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
843 for_each_child_of_node(phb->hose->dn, dn) {
844 gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
845 if (gpdev) {
846 rc = opal_npu_map_lpar(phb->opal_id,
847 PCI_DEVID(gpdev->bus->number, gpdev->devfn),
848 0, 0);
849 if (rc)
850 dev_err(&gpdev->dev,
851 "Error %lld mapping device to LPAR\n",
852 rc);
856 for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
857 i, &mmio_atsd); i++)
858 phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
860 pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
861 phb->npu.mmio_atsd_count = i;
862 phb->npu.mmio_atsd_usage = 0;
863 npu_index++;
864 if (WARN_ON(npu_index >= NV_MAX_NPUS))
865 return -ENOSPC;
866 max_npu2_index = npu_index;
867 phb->npu.index = npu_index;
869 return 0;