1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2024 Intel Corporation
6 #include <linux/circ_buf.h>
7 #include <linux/highmem.h>
11 #include "ivpu_hw_reg_io.h"
13 #include "ivpu_mmu_context.h"
16 #define IVPU_MMU_REG_IDR0 0x00200000u
17 #define IVPU_MMU_REG_IDR1 0x00200004u
18 #define IVPU_MMU_REG_IDR3 0x0020000cu
19 #define IVPU_MMU_REG_IDR5 0x00200014u
20 #define IVPU_MMU_REG_CR0 0x00200020u
21 #define IVPU_MMU_REG_CR0ACK 0x00200024u
22 #define IVPU_MMU_REG_CR0ACK_VAL_MASK GENMASK(31, 0)
23 #define IVPU_MMU_REG_CR1 0x00200028u
24 #define IVPU_MMU_REG_CR2 0x0020002cu
25 #define IVPU_MMU_REG_IRQ_CTRL 0x00200050u
26 #define IVPU_MMU_REG_IRQ_CTRLACK 0x00200054u
27 #define IVPU_MMU_REG_IRQ_CTRLACK_VAL_MASK GENMASK(31, 0)
29 #define IVPU_MMU_REG_GERROR 0x00200060u
30 #define IVPU_MMU_REG_GERROR_CMDQ_MASK BIT_MASK(0)
31 #define IVPU_MMU_REG_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
32 #define IVPU_MMU_REG_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
33 #define IVPU_MMU_REG_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
34 #define IVPU_MMU_REG_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
35 #define IVPU_MMU_REG_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
36 #define IVPU_MMU_REG_GERROR_MSI_ABT_MASK BIT_MASK(7)
38 #define IVPU_MMU_REG_GERRORN 0x00200064u
40 #define IVPU_MMU_REG_STRTAB_BASE 0x00200080u
41 #define IVPU_MMU_REG_STRTAB_BASE_CFG 0x00200088u
42 #define IVPU_MMU_REG_CMDQ_BASE 0x00200090u
43 #define IVPU_MMU_REG_CMDQ_PROD 0x00200098u
44 #define IVPU_MMU_REG_CMDQ_CONS 0x0020009cu
45 #define IVPU_MMU_REG_CMDQ_CONS_VAL_MASK GENMASK(23, 0)
46 #define IVPU_MMU_REG_CMDQ_CONS_ERR_MASK GENMASK(30, 24)
47 #define IVPU_MMU_REG_EVTQ_BASE 0x002000a0u
48 #define IVPU_MMU_REG_EVTQ_PROD 0x002000a8u
49 #define IVPU_MMU_REG_EVTQ_CONS 0x002000acu
50 #define IVPU_MMU_REG_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
51 #define IVPU_MMU_REG_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
53 #define IVPU_MMU_IDR0_REF 0x080f3e0f
54 #define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f
55 #define IVPU_MMU_IDR1_REF 0x0e739d18
56 #define IVPU_MMU_IDR3_REF 0x0000003c
57 #define IVPU_MMU_IDR5_REF 0x00040070
58 #define IVPU_MMU_IDR5_REF_SIMICS 0x00000075
59 #define IVPU_MMU_IDR5_REF_FPGA 0x00800075
61 #define IVPU_MMU_CDTAB_ENT_SIZE 64
62 #define IVPU_MMU_CDTAB_ENT_COUNT_LOG2 8 /* 256 entries */
63 #define IVPU_MMU_CDTAB_ENT_COUNT ((u32)1 << IVPU_MMU_CDTAB_ENT_COUNT_LOG2)
65 #define IVPU_MMU_STREAM_ID0 0
66 #define IVPU_MMU_STREAM_ID3 3
68 #define IVPU_MMU_STRTAB_ENT_SIZE 64
69 #define IVPU_MMU_STRTAB_ENT_COUNT 4
70 #define IVPU_MMU_STRTAB_CFG_LOG2SIZE 2
71 #define IVPU_MMU_STRTAB_CFG IVPU_MMU_STRTAB_CFG_LOG2SIZE
73 #define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */
74 #define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
75 #define IVPU_MMU_Q_WRAP_MASK GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0)
76 #define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
77 #define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK)
78 #define IVPU_MMU_Q_WRP(val) ((val) & IVPU_MMU_Q_COUNT)
80 #define IVPU_MMU_CMDQ_CMD_SIZE 16
81 #define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
83 #define IVPU_MMU_EVTQ_CMD_SIZE 32
84 #define IVPU_MMU_EVTQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_EVTQ_CMD_SIZE)
86 #define IVPU_MMU_CMD_OPCODE GENMASK(7, 0)
88 #define IVPU_MMU_CMD_SYNC_0_CS GENMASK(13, 12)
89 #define IVPU_MMU_CMD_SYNC_0_MSH GENMASK(23, 22)
90 #define IVPU_MMU_CMD_SYNC_0_MSI_ATTR GENMASK(27, 24)
91 #define IVPU_MMU_CMD_SYNC_0_MSI_ATTR GENMASK(27, 24)
92 #define IVPU_MMU_CMD_SYNC_0_MSI_DATA GENMASK(63, 32)
94 #define IVPU_MMU_CMD_CFGI_0_SSEC BIT(10)
95 #define IVPU_MMU_CMD_CFGI_0_SSV BIT(11)
96 #define IVPU_MMU_CMD_CFGI_0_SSID GENMASK(31, 12)
97 #define IVPU_MMU_CMD_CFGI_0_SID GENMASK(63, 32)
98 #define IVPU_MMU_CMD_CFGI_1_RANGE GENMASK(4, 0)
100 #define IVPU_MMU_CMD_TLBI_0_ASID GENMASK(63, 48)
101 #define IVPU_MMU_CMD_TLBI_0_VMID GENMASK(47, 32)
103 #define CMD_PREFETCH_CFG 0x1
104 #define CMD_CFGI_STE 0x3
105 #define CMD_CFGI_ALL 0x4
106 #define CMD_CFGI_CD 0x5
107 #define CMD_CFGI_CD_ALL 0x6
108 #define CMD_TLBI_NH_ASID 0x11
109 #define CMD_TLBI_EL2_ALL 0x20
110 #define CMD_TLBI_NSNH_ALL 0x30
111 #define CMD_SYNC 0x46
113 #define IVPU_MMU_EVT_F_UUT 0x01
114 #define IVPU_MMU_EVT_C_BAD_STREAMID 0x02
115 #define IVPU_MMU_EVT_F_STE_FETCH 0x03
116 #define IVPU_MMU_EVT_C_BAD_STE 0x04
117 #define IVPU_MMU_EVT_F_BAD_ATS_TREQ 0x05
118 #define IVPU_MMU_EVT_F_STREAM_DISABLED 0x06
119 #define IVPU_MMU_EVT_F_TRANSL_FORBIDDEN 0x07
120 #define IVPU_MMU_EVT_C_BAD_SUBSTREAMID 0x08
121 #define IVPU_MMU_EVT_F_CD_FETCH 0x09
122 #define IVPU_MMU_EVT_C_BAD_CD 0x0a
123 #define IVPU_MMU_EVT_F_WALK_EABT 0x0b
124 #define IVPU_MMU_EVT_F_TRANSLATION 0x10
125 #define IVPU_MMU_EVT_F_ADDR_SIZE 0x11
126 #define IVPU_MMU_EVT_F_ACCESS 0x12
127 #define IVPU_MMU_EVT_F_PERMISSION 0x13
128 #define IVPU_MMU_EVT_F_TLB_CONFLICT 0x20
129 #define IVPU_MMU_EVT_F_CFG_CONFLICT 0x21
130 #define IVPU_MMU_EVT_E_PAGE_REQUEST 0x24
131 #define IVPU_MMU_EVT_F_VMS_FETCH 0x25
133 #define IVPU_MMU_EVT_OP_MASK GENMASK_ULL(7, 0)
134 #define IVPU_MMU_EVT_SSID_MASK GENMASK_ULL(31, 12)
136 #define IVPU_MMU_Q_BASE_RWA BIT(62)
137 #define IVPU_MMU_Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
138 #define IVPU_MMU_STRTAB_BASE_RA BIT(62)
139 #define IVPU_MMU_STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6)
141 #define IVPU_MMU_IRQ_EVTQ_EN BIT(2)
142 #define IVPU_MMU_IRQ_GERROR_EN BIT(0)
144 #define IVPU_MMU_CR0_ATSCHK BIT(4)
145 #define IVPU_MMU_CR0_CMDQEN BIT(3)
146 #define IVPU_MMU_CR0_EVTQEN BIT(2)
147 #define IVPU_MMU_CR0_PRIQEN BIT(1)
148 #define IVPU_MMU_CR0_SMMUEN BIT(0)
150 #define IVPU_MMU_CR1_TABLE_SH GENMASK(11, 10)
151 #define IVPU_MMU_CR1_TABLE_OC GENMASK(9, 8)
152 #define IVPU_MMU_CR1_TABLE_IC GENMASK(7, 6)
153 #define IVPU_MMU_CR1_QUEUE_SH GENMASK(5, 4)
154 #define IVPU_MMU_CR1_QUEUE_OC GENMASK(3, 2)
155 #define IVPU_MMU_CR1_QUEUE_IC GENMASK(1, 0)
156 #define IVPU_MMU_CACHE_NC 0
157 #define IVPU_MMU_CACHE_WB 1
158 #define IVPU_MMU_CACHE_WT 2
159 #define IVPU_MMU_SH_NSH 0
160 #define IVPU_MMU_SH_OSH 2
161 #define IVPU_MMU_SH_ISH 3
163 #define IVPU_MMU_CMDQ_OP GENMASK_ULL(7, 0)
165 #define IVPU_MMU_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
166 #define IVPU_MMU_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
167 #define IVPU_MMU_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
168 #define IVPU_MMU_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
169 #define IVPU_MMU_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
170 #define IVPU_MMU_CD_0_TCR_EPD0 BIT_ULL(14)
171 #define IVPU_MMU_CD_0_TCR_EPD1 BIT_ULL(30)
172 #define IVPU_MMU_CD_0_ENDI BIT(15)
173 #define IVPU_MMU_CD_0_V BIT(31)
174 #define IVPU_MMU_CD_0_TCR_IPS GENMASK_ULL(34, 32)
175 #define IVPU_MMU_CD_0_TCR_TBI0 BIT_ULL(38)
176 #define IVPU_MMU_CD_0_AA64 BIT(41)
177 #define IVPU_MMU_CD_0_S BIT(44)
178 #define IVPU_MMU_CD_0_R BIT(45)
179 #define IVPU_MMU_CD_0_A BIT(46)
180 #define IVPU_MMU_CD_0_ASET BIT(47)
181 #define IVPU_MMU_CD_0_ASID GENMASK_ULL(63, 48)
183 #define IVPU_MMU_T0SZ_48BIT 16
184 #define IVPU_MMU_T0SZ_38BIT 26
186 #define IVPU_MMU_IPS_48BIT 5
187 #define IVPU_MMU_IPS_44BIT 4
188 #define IVPU_MMU_IPS_42BIT 3
189 #define IVPU_MMU_IPS_40BIT 2
190 #define IVPU_MMU_IPS_36BIT 1
191 #define IVPU_MMU_IPS_32BIT 0
193 #define IVPU_MMU_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
195 #define IVPU_MMU_STE_0_S1CDMAX GENMASK_ULL(63, 59)
196 #define IVPU_MMU_STE_0_S1FMT GENMASK_ULL(5, 4)
197 #define IVPU_MMU_STE_0_S1FMT_LINEAR 0
198 #define IVPU_MMU_STE_DWORDS 8
199 #define IVPU_MMU_STE_0_CFG_S1_TRANS 5
200 #define IVPU_MMU_STE_0_CFG GENMASK_ULL(3, 1)
201 #define IVPU_MMU_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
202 #define IVPU_MMU_STE_0_V BIT(0)
204 #define IVPU_MMU_STE_1_STRW_NSEL1 0ul
205 #define IVPU_MMU_STE_1_CONT GENMASK_ULL(16, 13)
206 #define IVPU_MMU_STE_1_STRW GENMASK_ULL(31, 30)
207 #define IVPU_MMU_STE_1_PRIVCFG GENMASK_ULL(49, 48)
208 #define IVPU_MMU_STE_1_PRIVCFG_UNPRIV 2ul
209 #define IVPU_MMU_STE_1_INSTCFG GENMASK_ULL(51, 50)
210 #define IVPU_MMU_STE_1_INSTCFG_DATA 2ul
211 #define IVPU_MMU_STE_1_MEV BIT(19)
212 #define IVPU_MMU_STE_1_S1STALLD BIT(27)
213 #define IVPU_MMU_STE_1_S1C_CACHE_NC 0ul
214 #define IVPU_MMU_STE_1_S1C_CACHE_WBRA 1ul
215 #define IVPU_MMU_STE_1_S1C_CACHE_WT 2ul
216 #define IVPU_MMU_STE_1_S1C_CACHE_WB 3ul
217 #define IVPU_MMU_STE_1_S1CIR GENMASK_ULL(3, 2)
218 #define IVPU_MMU_STE_1_S1COR GENMASK_ULL(5, 4)
219 #define IVPU_MMU_STE_1_S1CSH GENMASK_ULL(7, 6)
220 #define IVPU_MMU_STE_1_S1DSS GENMASK_ULL(1, 0)
221 #define IVPU_MMU_STE_1_S1DSS_TERMINATE 0x0
223 #define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
224 #define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
226 #define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \
227 (REG_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT)) | \
228 (REG_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT)) | \
229 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT)) | \
230 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT)) | \
231 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \
232 (REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT)))
234 #define IVPU_MMU_CERROR_NONE 0x0
235 #define IVPU_MMU_CERROR_ILL 0x1
236 #define IVPU_MMU_CERROR_ABT 0x2
237 #define IVPU_MMU_CERROR_ATC_INV_SYNC 0x3
239 static const char *ivpu_mmu_event_to_str(u32 cmd
)
242 case IVPU_MMU_EVT_F_UUT
:
243 return "Unsupported Upstream Transaction";
244 case IVPU_MMU_EVT_C_BAD_STREAMID
:
245 return "Transaction StreamID out of range";
246 case IVPU_MMU_EVT_F_STE_FETCH
:
247 return "Fetch of STE caused external abort";
248 case IVPU_MMU_EVT_C_BAD_STE
:
249 return "Used STE invalid";
250 case IVPU_MMU_EVT_F_BAD_ATS_TREQ
:
251 return "Address Request disallowed for a StreamID";
252 case IVPU_MMU_EVT_F_STREAM_DISABLED
:
253 return "Transaction marks non-substream disabled";
254 case IVPU_MMU_EVT_F_TRANSL_FORBIDDEN
:
255 return "MMU bypass is disallowed for this StreamID";
256 case IVPU_MMU_EVT_C_BAD_SUBSTREAMID
:
257 return "Invalid StreamID";
258 case IVPU_MMU_EVT_F_CD_FETCH
:
259 return "Fetch of CD caused external abort";
260 case IVPU_MMU_EVT_C_BAD_CD
:
261 return "Fetched CD invalid";
262 case IVPU_MMU_EVT_F_WALK_EABT
:
263 return " An external abort occurred fetching a TLB";
264 case IVPU_MMU_EVT_F_TRANSLATION
:
265 return "Translation fault";
266 case IVPU_MMU_EVT_F_ADDR_SIZE
:
267 return " Output address caused address size fault";
268 case IVPU_MMU_EVT_F_ACCESS
:
269 return "Access flag fault";
270 case IVPU_MMU_EVT_F_PERMISSION
:
271 return "Permission fault occurred on page access";
272 case IVPU_MMU_EVT_F_TLB_CONFLICT
:
273 return "A TLB conflict";
274 case IVPU_MMU_EVT_F_CFG_CONFLICT
:
275 return "A configuration cache conflict";
276 case IVPU_MMU_EVT_E_PAGE_REQUEST
:
277 return "Page request hint from a client device";
278 case IVPU_MMU_EVT_F_VMS_FETCH
:
279 return "Fetch of VMS caused external abort";
281 return "Unknown event";
285 static const char *ivpu_mmu_cmdq_err_to_str(u32 err
)
288 case IVPU_MMU_CERROR_NONE
:
290 case IVPU_MMU_CERROR_ILL
:
291 return "Illegal command";
292 case IVPU_MMU_CERROR_ABT
:
293 return "External abort on command queue read";
294 case IVPU_MMU_CERROR_ATC_INV_SYNC
:
295 return "Sync failed to complete ATS invalidation";
297 return "Unknown error";
301 static void ivpu_mmu_config_check(struct ivpu_device
*vdev
)
306 if (ivpu_is_simics(vdev
))
307 val_ref
= IVPU_MMU_IDR0_REF_SIMICS
;
309 val_ref
= IVPU_MMU_IDR0_REF
;
311 val
= REGV_RD32(IVPU_MMU_REG_IDR0
);
313 ivpu_dbg(vdev
, MMU
, "IDR0 0x%x != IDR0_REF 0x%x\n", val
, val_ref
);
315 val
= REGV_RD32(IVPU_MMU_REG_IDR1
);
316 if (val
!= IVPU_MMU_IDR1_REF
)
317 ivpu_dbg(vdev
, MMU
, "IDR1 0x%x != IDR1_REF 0x%x\n", val
, IVPU_MMU_IDR1_REF
);
319 val
= REGV_RD32(IVPU_MMU_REG_IDR3
);
320 if (val
!= IVPU_MMU_IDR3_REF
)
321 ivpu_dbg(vdev
, MMU
, "IDR3 0x%x != IDR3_REF 0x%x\n", val
, IVPU_MMU_IDR3_REF
);
323 if (ivpu_is_simics(vdev
))
324 val_ref
= IVPU_MMU_IDR5_REF_SIMICS
;
325 else if (ivpu_is_fpga(vdev
))
326 val_ref
= IVPU_MMU_IDR5_REF_FPGA
;
328 val_ref
= IVPU_MMU_IDR5_REF
;
330 val
= REGV_RD32(IVPU_MMU_REG_IDR5
);
332 ivpu_dbg(vdev
, MMU
, "IDR5 0x%x != IDR5_REF 0x%x\n", val
, val_ref
);
335 static int ivpu_mmu_cdtab_alloc(struct ivpu_device
*vdev
)
337 struct ivpu_mmu_info
*mmu
= vdev
->mmu
;
338 struct ivpu_mmu_cdtab
*cdtab
= &mmu
->cdtab
;
339 size_t size
= IVPU_MMU_CDTAB_ENT_COUNT
* IVPU_MMU_CDTAB_ENT_SIZE
;
341 cdtab
->base
= dmam_alloc_coherent(vdev
->drm
.dev
, size
, &cdtab
->dma
, GFP_KERNEL
);
345 ivpu_dbg(vdev
, MMU
, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab
->dma
, size
);
350 static int ivpu_mmu_strtab_alloc(struct ivpu_device
*vdev
)
352 struct ivpu_mmu_info
*mmu
= vdev
->mmu
;
353 struct ivpu_mmu_strtab
*strtab
= &mmu
->strtab
;
354 size_t size
= IVPU_MMU_STRTAB_ENT_COUNT
* IVPU_MMU_STRTAB_ENT_SIZE
;
356 strtab
->base
= dmam_alloc_coherent(vdev
->drm
.dev
, size
, &strtab
->dma
, GFP_KERNEL
);
360 strtab
->base_cfg
= IVPU_MMU_STRTAB_CFG
;
361 strtab
->dma_q
= IVPU_MMU_STRTAB_BASE_RA
;
362 strtab
->dma_q
|= strtab
->dma
& IVPU_MMU_STRTAB_BASE_ADDR_MASK
;
364 ivpu_dbg(vdev
, MMU
, "STRTAB alloc: dma=%pad dma_q=%pad size=%zu\n",
365 &strtab
->dma
, &strtab
->dma_q
, size
);
370 static int ivpu_mmu_cmdq_alloc(struct ivpu_device
*vdev
)
372 struct ivpu_mmu_info
*mmu
= vdev
->mmu
;
373 struct ivpu_mmu_queue
*q
= &mmu
->cmdq
;
375 q
->base
= dmam_alloc_coherent(vdev
->drm
.dev
, IVPU_MMU_CMDQ_SIZE
, &q
->dma
, GFP_KERNEL
);
379 q
->dma_q
= IVPU_MMU_Q_BASE_RWA
;
380 q
->dma_q
|= q
->dma
& IVPU_MMU_Q_BASE_ADDR_MASK
;
381 q
->dma_q
|= IVPU_MMU_Q_COUNT_LOG2
;
383 ivpu_dbg(vdev
, MMU
, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n",
384 &q
->dma
, &q
->dma_q
, IVPU_MMU_CMDQ_SIZE
);
389 static int ivpu_mmu_evtq_alloc(struct ivpu_device
*vdev
)
391 struct ivpu_mmu_info
*mmu
= vdev
->mmu
;
392 struct ivpu_mmu_queue
*q
= &mmu
->evtq
;
394 q
->base
= dmam_alloc_coherent(vdev
->drm
.dev
, IVPU_MMU_EVTQ_SIZE
, &q
->dma
, GFP_KERNEL
);
398 q
->dma_q
= IVPU_MMU_Q_BASE_RWA
;
399 q
->dma_q
|= q
->dma
& IVPU_MMU_Q_BASE_ADDR_MASK
;
400 q
->dma_q
|= IVPU_MMU_Q_COUNT_LOG2
;
402 ivpu_dbg(vdev
, MMU
, "EVTQ alloc: dma=%pad dma_q=%pad size=%u\n",
403 &q
->dma
, &q
->dma_q
, IVPU_MMU_EVTQ_SIZE
);
408 static int ivpu_mmu_structs_alloc(struct ivpu_device
*vdev
)
412 ret
= ivpu_mmu_cdtab_alloc(vdev
);
414 ivpu_err(vdev
, "Failed to allocate cdtab: %d\n", ret
);
418 ret
= ivpu_mmu_strtab_alloc(vdev
);
420 ivpu_err(vdev
, "Failed to allocate strtab: %d\n", ret
);
424 ret
= ivpu_mmu_cmdq_alloc(vdev
);
426 ivpu_err(vdev
, "Failed to allocate cmdq: %d\n", ret
);
430 ret
= ivpu_mmu_evtq_alloc(vdev
);
432 ivpu_err(vdev
, "Failed to allocate evtq: %d\n", ret
);
437 static int ivpu_mmu_reg_write_cr0(struct ivpu_device
*vdev
, u32 val
)
439 REGV_WR32(IVPU_MMU_REG_CR0
, val
);
441 return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK
, VAL
, val
, IVPU_MMU_REG_TIMEOUT_US
);
444 static int ivpu_mmu_reg_write_irq_ctrl(struct ivpu_device
*vdev
, u32 val
)
446 REGV_WR32(IVPU_MMU_REG_IRQ_CTRL
, val
);
448 return REGV_POLL_FLD(IVPU_MMU_REG_IRQ_CTRLACK
, VAL
, val
, IVPU_MMU_REG_TIMEOUT_US
);
451 static int ivpu_mmu_irqs_setup(struct ivpu_device
*vdev
)
453 u32 irq_ctrl
= IVPU_MMU_IRQ_EVTQ_EN
| IVPU_MMU_IRQ_GERROR_EN
;
456 ret
= ivpu_mmu_reg_write_irq_ctrl(vdev
, 0);
460 return ivpu_mmu_reg_write_irq_ctrl(vdev
, irq_ctrl
);
463 static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device
*vdev
)
465 struct ivpu_mmu_queue
*cmdq
= &vdev
->mmu
->cmdq
;
468 ret
= REGV_POLL_FLD(IVPU_MMU_REG_CMDQ_CONS
, VAL
, cmdq
->prod
,
469 IVPU_MMU_QUEUE_TIMEOUT_US
);
473 cmdq
->cons
= cmdq
->prod
;
478 static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue
*q
)
480 return ((IVPU_MMU_Q_IDX(q
->prod
) == IVPU_MMU_Q_IDX(q
->cons
)) &&
481 (IVPU_MMU_Q_WRP(q
->prod
) != IVPU_MMU_Q_WRP(q
->cons
)));
484 static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue
*q
)
486 return ((IVPU_MMU_Q_IDX(q
->prod
) == IVPU_MMU_Q_IDX(q
->cons
)) &&
487 (IVPU_MMU_Q_WRP(q
->prod
) == IVPU_MMU_Q_WRP(q
->cons
)));
490 static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device
*vdev
, const char *name
, u64 data0
, u64 data1
)
492 struct ivpu_mmu_queue
*cmdq
= &vdev
->mmu
->cmdq
;
493 u64
*queue_buffer
= cmdq
->base
;
494 int idx
= IVPU_MMU_Q_IDX(cmdq
->prod
) * (IVPU_MMU_CMDQ_CMD_SIZE
/ sizeof(*queue_buffer
));
496 if (ivpu_mmu_queue_is_full(cmdq
)) {
497 ivpu_err(vdev
, "Failed to write MMU CMD %s\n", name
);
501 queue_buffer
[idx
] = data0
;
502 queue_buffer
[idx
+ 1] = data1
;
503 cmdq
->prod
= (cmdq
->prod
+ 1) & IVPU_MMU_Q_WRAP_MASK
;
505 ivpu_dbg(vdev
, MMU
, "CMD write: %s data: 0x%llx 0x%llx\n", name
, data0
, data1
);
510 static int ivpu_mmu_cmdq_sync(struct ivpu_device
*vdev
)
512 struct ivpu_mmu_queue
*q
= &vdev
->mmu
->cmdq
;
516 val
= FIELD_PREP(IVPU_MMU_CMD_OPCODE
, CMD_SYNC
);
518 ret
= ivpu_mmu_cmdq_cmd_write(vdev
, "SYNC", val
, 0);
522 if (!ivpu_is_force_snoop_enabled(vdev
))
523 clflush_cache_range(q
->base
, IVPU_MMU_CMDQ_SIZE
);
524 REGV_WR32(IVPU_MMU_REG_CMDQ_PROD
, q
->prod
);
526 ret
= ivpu_mmu_cmdq_wait_for_cons(vdev
);
530 val
= REGV_RD32(IVPU_MMU_REG_CMDQ_CONS
);
531 err
= REG_GET_FLD(IVPU_MMU_REG_CMDQ_CONS
, ERR
, val
);
533 ivpu_err(vdev
, "Timed out waiting for MMU consumer: %d, error: %s\n", ret
,
534 ivpu_mmu_cmdq_err_to_str(err
));
535 ivpu_hw_diagnose_failure(vdev
);
541 static int ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device
*vdev
)
543 u64 data0
= FIELD_PREP(IVPU_MMU_CMD_OPCODE
, CMD_CFGI_ALL
);
544 u64 data1
= FIELD_PREP(IVPU_MMU_CMD_CFGI_1_RANGE
, 0x1f);
546 return ivpu_mmu_cmdq_cmd_write(vdev
, "CFGI_ALL", data0
, data1
);
549 static int ivpu_mmu_cmdq_write_tlbi_nh_asid(struct ivpu_device
*vdev
, u16 ssid
)
551 u64 val
= FIELD_PREP(IVPU_MMU_CMD_OPCODE
, CMD_TLBI_NH_ASID
) |
552 FIELD_PREP(IVPU_MMU_CMD_TLBI_0_ASID
, ssid
);
554 return ivpu_mmu_cmdq_cmd_write(vdev
, "TLBI_NH_ASID", val
, 0);
557 static int ivpu_mmu_cmdq_write_tlbi_nsnh_all(struct ivpu_device
*vdev
)
559 u64 val
= FIELD_PREP(IVPU_MMU_CMD_OPCODE
, CMD_TLBI_NSNH_ALL
);
561 return ivpu_mmu_cmdq_cmd_write(vdev
, "TLBI_NSNH_ALL", val
, 0);
564 static int ivpu_mmu_reset(struct ivpu_device
*vdev
)
566 struct ivpu_mmu_info
*mmu
= vdev
->mmu
;
570 memset(mmu
->cmdq
.base
, 0, IVPU_MMU_CMDQ_SIZE
);
571 if (!ivpu_is_force_snoop_enabled(vdev
))
572 clflush_cache_range(mmu
->cmdq
.base
, IVPU_MMU_CMDQ_SIZE
);
576 memset(mmu
->evtq
.base
, 0, IVPU_MMU_EVTQ_SIZE
);
580 ret
= ivpu_mmu_reg_write_cr0(vdev
, 0);
584 val
= FIELD_PREP(IVPU_MMU_CR1_TABLE_SH
, IVPU_MMU_SH_ISH
) |
585 FIELD_PREP(IVPU_MMU_CR1_TABLE_OC
, IVPU_MMU_CACHE_WB
) |
586 FIELD_PREP(IVPU_MMU_CR1_TABLE_IC
, IVPU_MMU_CACHE_WB
) |
587 FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH
, IVPU_MMU_SH_ISH
) |
588 FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC
, IVPU_MMU_CACHE_WB
) |
589 FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC
, IVPU_MMU_CACHE_WB
);
590 REGV_WR32(IVPU_MMU_REG_CR1
, val
);
592 REGV_WR64(IVPU_MMU_REG_STRTAB_BASE
, mmu
->strtab
.dma_q
);
593 REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG
, mmu
->strtab
.base_cfg
);
595 REGV_WR64(IVPU_MMU_REG_CMDQ_BASE
, mmu
->cmdq
.dma_q
);
596 REGV_WR32(IVPU_MMU_REG_CMDQ_PROD
, 0);
597 REGV_WR32(IVPU_MMU_REG_CMDQ_CONS
, 0);
599 val
= IVPU_MMU_CR0_CMDQEN
;
600 ret
= ivpu_mmu_reg_write_cr0(vdev
, val
);
604 ret
= ivpu_mmu_cmdq_write_cfgi_all(vdev
);
608 ret
= ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev
);
612 ret
= ivpu_mmu_cmdq_sync(vdev
);
616 REGV_WR64(IVPU_MMU_REG_EVTQ_BASE
, mmu
->evtq
.dma_q
);
617 REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC
, 0);
618 REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC
, 0);
620 val
|= IVPU_MMU_CR0_EVTQEN
;
621 ret
= ivpu_mmu_reg_write_cr0(vdev
, val
);
625 val
|= IVPU_MMU_CR0_ATSCHK
;
626 ret
= ivpu_mmu_reg_write_cr0(vdev
, val
);
630 ret
= ivpu_mmu_irqs_setup(vdev
);
634 val
|= IVPU_MMU_CR0_SMMUEN
;
635 return ivpu_mmu_reg_write_cr0(vdev
, val
);
638 static void ivpu_mmu_strtab_link_cd(struct ivpu_device
*vdev
, u32 sid
)
640 struct ivpu_mmu_info
*mmu
= vdev
->mmu
;
641 struct ivpu_mmu_strtab
*strtab
= &mmu
->strtab
;
642 struct ivpu_mmu_cdtab
*cdtab
= &mmu
->cdtab
;
643 u64
*entry
= strtab
->base
+ (sid
* IVPU_MMU_STRTAB_ENT_SIZE
);
646 str
[0] = FIELD_PREP(IVPU_MMU_STE_0_CFG
, IVPU_MMU_STE_0_CFG_S1_TRANS
) |
647 FIELD_PREP(IVPU_MMU_STE_0_S1CDMAX
, IVPU_MMU_CDTAB_ENT_COUNT_LOG2
) |
648 FIELD_PREP(IVPU_MMU_STE_0_S1FMT
, IVPU_MMU_STE_0_S1FMT_LINEAR
) |
650 (cdtab
->dma
& IVPU_MMU_STE_0_S1CTXPTR_MASK
);
652 str
[1] = FIELD_PREP(IVPU_MMU_STE_1_S1DSS
, IVPU_MMU_STE_1_S1DSS_TERMINATE
) |
653 FIELD_PREP(IVPU_MMU_STE_1_S1CIR
, IVPU_MMU_STE_1_S1C_CACHE_NC
) |
654 FIELD_PREP(IVPU_MMU_STE_1_S1COR
, IVPU_MMU_STE_1_S1C_CACHE_NC
) |
655 FIELD_PREP(IVPU_MMU_STE_1_S1CSH
, IVPU_MMU_SH_NSH
) |
656 FIELD_PREP(IVPU_MMU_STE_1_PRIVCFG
, IVPU_MMU_STE_1_PRIVCFG_UNPRIV
) |
657 FIELD_PREP(IVPU_MMU_STE_1_INSTCFG
, IVPU_MMU_STE_1_INSTCFG_DATA
) |
658 FIELD_PREP(IVPU_MMU_STE_1_STRW
, IVPU_MMU_STE_1_STRW_NSEL1
) |
659 FIELD_PREP(IVPU_MMU_STE_1_CONT
, IVPU_MMU_STRTAB_CFG_LOG2SIZE
) |
661 IVPU_MMU_STE_1_S1STALLD
;
663 WRITE_ONCE(entry
[1], str
[1]);
664 WRITE_ONCE(entry
[0], str
[0]);
666 if (!ivpu_is_force_snoop_enabled(vdev
))
667 clflush_cache_range(entry
, IVPU_MMU_STRTAB_ENT_SIZE
);
669 ivpu_dbg(vdev
, MMU
, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid
, str
[0], str
[1]);
672 static int ivpu_mmu_strtab_init(struct ivpu_device
*vdev
)
674 ivpu_mmu_strtab_link_cd(vdev
, IVPU_MMU_STREAM_ID0
);
675 ivpu_mmu_strtab_link_cd(vdev
, IVPU_MMU_STREAM_ID3
);
680 int ivpu_mmu_invalidate_tlb(struct ivpu_device
*vdev
, u16 ssid
)
682 struct ivpu_mmu_info
*mmu
= vdev
->mmu
;
685 mutex_lock(&mmu
->lock
);
689 ret
= ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev
, ssid
);
693 ret
= ivpu_mmu_cmdq_sync(vdev
);
695 mutex_unlock(&mmu
->lock
);
699 static int ivpu_mmu_cdtab_entry_set(struct ivpu_device
*vdev
, u32 ssid
, u64 cd_dma
, bool valid
)
701 struct ivpu_mmu_info
*mmu
= vdev
->mmu
;
702 struct ivpu_mmu_cdtab
*cdtab
= &mmu
->cdtab
;
707 if (ssid
> IVPU_MMU_CDTAB_ENT_COUNT
)
710 entry
= cdtab
->base
+ (ssid
* IVPU_MMU_CDTAB_ENT_SIZE
);
711 drm_WARN_ON(&vdev
->drm
, (entry
[0] & IVPU_MMU_CD_0_V
) == valid
);
713 cd
[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ
, IVPU_MMU_T0SZ_48BIT
) |
714 FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0
, 0) |
715 FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0
, 0) |
716 FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0
, 0) |
717 FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0
, 0) |
718 FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS
, IVPU_MMU_IPS_48BIT
) |
719 FIELD_PREP(IVPU_MMU_CD_0_ASID
, ssid
) |
720 IVPU_MMU_CD_0_TCR_EPD1
|
724 cd
[1] = cd_dma
& IVPU_MMU_CD_1_TTB0_MASK
;
726 cd
[3] = 0x0000000000007444;
728 /* For global context generate memory fault on VPU */
729 if (ssid
== IVPU_GLOBAL_CONTEXT_MMU_SSID
)
730 cd
[0] |= IVPU_MMU_CD_0_A
;
733 cd
[0] |= IVPU_MMU_CD_0_V
;
735 WRITE_ONCE(entry
[1], cd
[1]);
736 WRITE_ONCE(entry
[2], cd
[2]);
737 WRITE_ONCE(entry
[3], cd
[3]);
738 WRITE_ONCE(entry
[0], cd
[0]);
740 if (!ivpu_is_force_snoop_enabled(vdev
))
741 clflush_cache_range(entry
, IVPU_MMU_CDTAB_ENT_SIZE
);
743 ivpu_dbg(vdev
, MMU
, "CDTAB set %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
744 valid
? "valid" : "invalid", ssid
, &cd_dma
, cd
[0], cd
[1], cd
[2], cd
[3]);
746 mutex_lock(&mmu
->lock
);
750 ret
= ivpu_mmu_cmdq_write_cfgi_all(vdev
);
754 ret
= ivpu_mmu_cmdq_sync(vdev
);
758 mutex_unlock(&mmu
->lock
);
762 WRITE_ONCE(entry
[0], 0);
763 mutex_unlock(&mmu
->lock
);
767 int ivpu_mmu_init(struct ivpu_device
*vdev
)
769 struct ivpu_mmu_info
*mmu
= vdev
->mmu
;
772 ivpu_dbg(vdev
, MMU
, "Init..\n");
774 ivpu_mmu_config_check(vdev
);
776 ret
= drmm_mutex_init(&vdev
->drm
, &mmu
->lock
);
780 ret
= ivpu_mmu_structs_alloc(vdev
);
784 ret
= ivpu_mmu_strtab_init(vdev
);
786 ivpu_err(vdev
, "Failed to initialize strtab: %d\n", ret
);
790 ret
= ivpu_mmu_enable(vdev
);
792 ivpu_err(vdev
, "Failed to resume MMU: %d\n", ret
);
796 ivpu_dbg(vdev
, MMU
, "Init done\n");
801 int ivpu_mmu_enable(struct ivpu_device
*vdev
)
803 struct ivpu_mmu_info
*mmu
= vdev
->mmu
;
806 mutex_lock(&mmu
->lock
);
810 ret
= ivpu_mmu_reset(vdev
);
812 ivpu_err(vdev
, "Failed to reset MMU: %d\n", ret
);
816 ret
= ivpu_mmu_cmdq_write_cfgi_all(vdev
);
820 ret
= ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev
);
824 ret
= ivpu_mmu_cmdq_sync(vdev
);
828 mutex_unlock(&mmu
->lock
);
833 mutex_unlock(&mmu
->lock
);
837 void ivpu_mmu_disable(struct ivpu_device
*vdev
)
839 struct ivpu_mmu_info
*mmu
= vdev
->mmu
;
841 mutex_lock(&mmu
->lock
);
843 mutex_unlock(&mmu
->lock
);
846 static void ivpu_mmu_dump_event(struct ivpu_device
*vdev
, u32
*event
)
848 u32 ssid
= FIELD_GET(IVPU_MMU_EVT_SSID_MASK
, event
[0]);
849 u32 op
= FIELD_GET(IVPU_MMU_EVT_OP_MASK
, event
[0]);
850 u64 fetch_addr
= ((u64
)event
[7]) << 32 | event
[6];
851 u64 in_addr
= ((u64
)event
[5]) << 32 | event
[4];
854 ivpu_err_ratelimited(vdev
, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
855 op
, ivpu_mmu_event_to_str(op
), ssid
, sid
,
856 event
[2], event
[3], in_addr
, fetch_addr
);
859 static u32
*ivpu_mmu_get_event(struct ivpu_device
*vdev
)
861 struct ivpu_mmu_queue
*evtq
= &vdev
->mmu
->evtq
;
862 u32 idx
= IVPU_MMU_Q_IDX(evtq
->cons
);
863 u32
*evt
= evtq
->base
+ (idx
* IVPU_MMU_EVTQ_CMD_SIZE
);
865 evtq
->prod
= REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC
);
866 if (ivpu_mmu_queue_is_empty(evtq
))
869 evtq
->cons
= (evtq
->cons
+ 1) & IVPU_MMU_Q_WRAP_MASK
;
873 void ivpu_mmu_irq_evtq_handler(struct ivpu_device
*vdev
)
878 ivpu_dbg(vdev
, IRQ
, "MMU event queue\n");
880 while ((event
= ivpu_mmu_get_event(vdev
)) != NULL
) {
881 ivpu_mmu_dump_event(vdev
, event
);
883 ssid
= FIELD_GET(IVPU_MMU_EVT_SSID_MASK
, event
[0]);
884 if (ssid
== IVPU_GLOBAL_CONTEXT_MMU_SSID
) {
885 ivpu_pm_trigger_recovery(vdev
, "MMU event");
889 ivpu_mmu_user_context_mark_invalid(vdev
, ssid
);
890 REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC
, vdev
->mmu
->evtq
.cons
);
893 if (!kfifo_put(&vdev
->hw
->irq
.fifo
, IVPU_HW_IRQ_SRC_MMU_EVTQ
))
894 ivpu_err_ratelimited(vdev
, "IRQ FIFO full\n");
897 void ivpu_mmu_evtq_dump(struct ivpu_device
*vdev
)
901 while ((event
= ivpu_mmu_get_event(vdev
)) != NULL
)
902 ivpu_mmu_dump_event(vdev
, event
);
905 void ivpu_mmu_irq_gerr_handler(struct ivpu_device
*vdev
)
907 u32 gerror_val
, gerrorn_val
, active
;
909 ivpu_dbg(vdev
, IRQ
, "MMU error\n");
911 gerror_val
= REGV_RD32(IVPU_MMU_REG_GERROR
);
912 gerrorn_val
= REGV_RD32(IVPU_MMU_REG_GERRORN
);
914 active
= gerror_val
^ gerrorn_val
;
915 if (!(active
& IVPU_MMU_GERROR_ERR_MASK
))
918 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR
, MSI_ABT
, active
))
919 ivpu_warn_ratelimited(vdev
, "MMU MSI ABT write aborted\n");
921 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR
, MSI_PRIQ_ABT
, active
))
922 ivpu_warn_ratelimited(vdev
, "MMU PRIQ MSI ABT write aborted\n");
924 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR
, MSI_EVTQ_ABT
, active
))
925 ivpu_warn_ratelimited(vdev
, "MMU EVTQ MSI ABT write aborted\n");
927 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR
, MSI_CMDQ_ABT
, active
))
928 ivpu_warn_ratelimited(vdev
, "MMU CMDQ MSI ABT write aborted\n");
930 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR
, PRIQ_ABT
, active
))
931 ivpu_err_ratelimited(vdev
, "MMU PRIQ write aborted\n");
933 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR
, EVTQ_ABT
, active
))
934 ivpu_err_ratelimited(vdev
, "MMU EVTQ write aborted\n");
936 if (REG_TEST_FLD(IVPU_MMU_REG_GERROR
, CMDQ
, active
))
937 ivpu_err_ratelimited(vdev
, "MMU CMDQ write aborted\n");
939 REGV_WR32(IVPU_MMU_REG_GERRORN
, gerror_val
);
942 int ivpu_mmu_cd_set(struct ivpu_device
*vdev
, int ssid
, struct ivpu_mmu_pgtable
*pgtable
)
944 return ivpu_mmu_cdtab_entry_set(vdev
, ssid
, pgtable
->pgd_dma
, true);
947 void ivpu_mmu_cd_clear(struct ivpu_device
*vdev
, int ssid
)
949 ivpu_mmu_cdtab_entry_set(vdev
, ssid
, 0, false);