1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2017-2018 Broadcom */
5 * DOC: Broadcom V3D MMU
7 * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has
8 * a single level of page tables for the V3D's 4GB address space to
9 * map to AXI bus addresses, thus it could need up to 4MB of
10 * physically contiguous memory to store the PTEs.
12 * Because the 4MB of contiguous memory for page tables is precious,
13 * and switching between them is expensive, we load all BOs into the
14 * same 4GB address space.
16 * To protect clients from each other, we should use the GMP to
17 * quickly mask out (at 128kb granularity) what pages are available to
18 * each client. This is not yet implemented.
24 #define V3D_MMU_PAGE_SHIFT 12
26 /* Note: All PTEs for the 1MB superpage must be filled with the
29 #define V3D_PTE_SUPERPAGE BIT(31)
30 #define V3D_PTE_WRITEABLE BIT(29)
31 #define V3D_PTE_VALID BIT(28)
33 static int v3d_mmu_flush_all(struct v3d_dev
*v3d
)
37 /* Make sure that another flush isn't already running when we
40 ret
= wait_for(!(V3D_READ(V3D_MMU_CTL
) &
41 V3D_MMU_CTL_TLB_CLEARING
), 100);
43 dev_err(v3d
->drm
.dev
, "TLB clear wait idle pre-wait failed\n");
45 V3D_WRITE(V3D_MMU_CTL
, V3D_READ(V3D_MMU_CTL
) |
46 V3D_MMU_CTL_TLB_CLEAR
);
48 V3D_WRITE(V3D_MMUC_CONTROL
,
49 V3D_MMUC_CONTROL_FLUSH
|
50 V3D_MMUC_CONTROL_ENABLE
);
52 ret
= wait_for(!(V3D_READ(V3D_MMU_CTL
) &
53 V3D_MMU_CTL_TLB_CLEARING
), 100);
55 dev_err(v3d
->drm
.dev
, "TLB clear wait idle failed\n");
59 ret
= wait_for(!(V3D_READ(V3D_MMUC_CONTROL
) &
60 V3D_MMUC_CONTROL_FLUSHING
), 100);
62 dev_err(v3d
->drm
.dev
, "MMUC flush wait idle failed\n");
67 int v3d_mmu_set_page_table(struct v3d_dev
*v3d
)
69 V3D_WRITE(V3D_MMU_PT_PA_BASE
, v3d
->pt_paddr
>> V3D_MMU_PAGE_SHIFT
);
70 V3D_WRITE(V3D_MMU_CTL
,
72 V3D_MMU_CTL_PT_INVALID_ENABLE
|
73 V3D_MMU_CTL_PT_INVALID_ABORT
|
74 V3D_MMU_CTL_PT_INVALID_INT
|
75 V3D_MMU_CTL_WRITE_VIOLATION_ABORT
|
76 V3D_MMU_CTL_WRITE_VIOLATION_INT
|
77 V3D_MMU_CTL_CAP_EXCEEDED_ABORT
|
78 V3D_MMU_CTL_CAP_EXCEEDED_INT
);
79 V3D_WRITE(V3D_MMU_ILLEGAL_ADDR
,
80 (v3d
->mmu_scratch_paddr
>> V3D_MMU_PAGE_SHIFT
) |
81 V3D_MMU_ILLEGAL_ADDR_ENABLE
);
82 V3D_WRITE(V3D_MMUC_CONTROL
, V3D_MMUC_CONTROL_ENABLE
);
84 return v3d_mmu_flush_all(v3d
);
87 void v3d_mmu_insert_ptes(struct v3d_bo
*bo
)
89 struct drm_gem_shmem_object
*shmem_obj
= &bo
->base
;
90 struct v3d_dev
*v3d
= to_v3d_dev(shmem_obj
->base
.dev
);
91 u32 page
= bo
->node
.start
;
92 u32 page_prot
= V3D_PTE_WRITEABLE
| V3D_PTE_VALID
;
93 struct sg_dma_page_iter dma_iter
;
95 for_each_sgtable_dma_page(shmem_obj
->sgt
, &dma_iter
, 0) {
96 dma_addr_t dma_addr
= sg_page_iter_dma_address(&dma_iter
);
97 u32 page_address
= dma_addr
>> V3D_MMU_PAGE_SHIFT
;
98 u32 pte
= page_prot
| page_address
;
101 BUG_ON(page_address
+ (PAGE_SIZE
>> V3D_MMU_PAGE_SHIFT
) >=
103 for (i
= 0; i
< PAGE_SIZE
>> V3D_MMU_PAGE_SHIFT
; i
++)
104 v3d
->pt
[page
++] = pte
+ i
;
107 WARN_ON_ONCE(page
- bo
->node
.start
!=
108 shmem_obj
->base
.size
>> V3D_MMU_PAGE_SHIFT
);
110 if (v3d_mmu_flush_all(v3d
))
111 dev_err(v3d
->drm
.dev
, "MMU flush timeout\n");
114 void v3d_mmu_remove_ptes(struct v3d_bo
*bo
)
116 struct v3d_dev
*v3d
= to_v3d_dev(bo
->base
.base
.dev
);
117 u32 npages
= bo
->base
.base
.size
>> V3D_MMU_PAGE_SHIFT
;
120 for (page
= bo
->node
.start
; page
< bo
->node
.start
+ npages
; page
++)
123 if (v3d_mmu_flush_all(v3d
))
124 dev_err(v3d
->drm
.dev
, "MMU flush timeout\n");