1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021-2024 NVIDIA CORPORATION & AFFILIATES. */
4 #define dev_fmt(fmt) "tegra241_cmdqv: " fmt
6 #include <linux/acpi.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/iommu.h>
11 #include <linux/iopoll.h>
13 #include <acpi/acpixf.h>
15 #include "arm-smmu-v3.h"
17 /* CMDQV register page base and size defines */
18 #define TEGRA241_CMDQV_CONFIG_BASE (0)
19 #define TEGRA241_CMDQV_CONFIG_SIZE (SZ_64K)
20 #define TEGRA241_VCMDQ_PAGE0_BASE (TEGRA241_CMDQV_CONFIG_BASE + SZ_64K)
21 #define TEGRA241_VCMDQ_PAGE1_BASE (TEGRA241_VCMDQ_PAGE0_BASE + SZ_64K)
22 #define TEGRA241_VINTF_PAGE_BASE (TEGRA241_VCMDQ_PAGE1_BASE + SZ_64K)
24 /* CMDQV global base regs */
25 #define TEGRA241_CMDQV_CONFIG 0x0000
26 #define CMDQV_EN BIT(0)
28 #define TEGRA241_CMDQV_PARAM 0x0004
29 #define CMDQV_NUM_VINTF_LOG2 GENMASK(11, 8)
30 #define CMDQV_NUM_VCMDQ_LOG2 GENMASK(7, 4)
32 #define TEGRA241_CMDQV_STATUS 0x0008
33 #define CMDQV_ENABLED BIT(0)
35 #define TEGRA241_CMDQV_VINTF_ERR_MAP 0x0014
36 #define TEGRA241_CMDQV_VINTF_INT_MASK 0x001C
37 #define TEGRA241_CMDQV_CMDQ_ERR_MAP(m) (0x0024 + 0x4*(m))
39 #define TEGRA241_CMDQV_CMDQ_ALLOC(q) (0x0200 + 0x4*(q))
40 #define CMDQV_CMDQ_ALLOC_VINTF GENMASK(20, 15)
41 #define CMDQV_CMDQ_ALLOC_LVCMDQ GENMASK(7, 1)
42 #define CMDQV_CMDQ_ALLOCATED BIT(0)
45 #define TEGRA241_VINTF(v) (0x1000 + 0x100*(v))
47 #define TEGRA241_VINTF_CONFIG 0x0000
48 #define VINTF_HYP_OWN BIT(17)
49 #define VINTF_VMID GENMASK(16, 1)
50 #define VINTF_EN BIT(0)
52 #define TEGRA241_VINTF_STATUS 0x0004
53 #define VINTF_STATUS GENMASK(3, 1)
54 #define VINTF_ENABLED BIT(0)
56 #define TEGRA241_VINTF_LVCMDQ_ERR_MAP_64(m) \
58 #define LVCMDQ_ERR_MAP_NUM_64 2
62 #define TEGRA241_VCMDQ_PAGE0(q) (TEGRA241_VCMDQ_PAGE0_BASE + 0x80*(q))
64 #define TEGRA241_VCMDQ_CONS 0x00000
65 #define VCMDQ_CONS_ERR GENMASK(30, 24)
67 #define TEGRA241_VCMDQ_PROD 0x00004
69 #define TEGRA241_VCMDQ_CONFIG 0x00008
70 #define VCMDQ_EN BIT(0)
72 #define TEGRA241_VCMDQ_STATUS 0x0000C
73 #define VCMDQ_ENABLED BIT(0)
75 #define TEGRA241_VCMDQ_GERROR 0x00010
76 #define TEGRA241_VCMDQ_GERRORN 0x00014
79 #define TEGRA241_VCMDQ_PAGE1(q) (TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
80 #define VCMDQ_ADDR GENMASK(47, 5)
81 #define VCMDQ_LOG2SIZE GENMASK(4, 0)
82 #define VCMDQ_LOG2SIZE_MAX 19
84 #define TEGRA241_VCMDQ_BASE 0x00000
85 #define TEGRA241_VCMDQ_CONS_INDX_BASE 0x00008
87 /* VINTF logical-VCMDQ pages */
88 #define TEGRA241_VINTFi_PAGE0(i) (TEGRA241_VINTF_PAGE_BASE + SZ_128K*(i))
89 #define TEGRA241_VINTFi_PAGE1(i) (TEGRA241_VINTFi_PAGE0(i) + SZ_64K)
90 #define TEGRA241_VINTFi_LVCMDQ_PAGE0(i, q) \
91 (TEGRA241_VINTFi_PAGE0(i) + 0x80*(q))
92 #define TEGRA241_VINTFi_LVCMDQ_PAGE1(i, q) \
93 (TEGRA241_VINTFi_PAGE1(i) + 0x80*(q))
96 #define REG_CMDQV(_cmdqv, _regname) \
97 ((_cmdqv)->base + TEGRA241_CMDQV_##_regname)
98 #define REG_VINTF(_vintf, _regname) \
99 ((_vintf)->base + TEGRA241_VINTF_##_regname)
100 #define REG_VCMDQ_PAGE0(_vcmdq, _regname) \
101 ((_vcmdq)->page0 + TEGRA241_VCMDQ_##_regname)
102 #define REG_VCMDQ_PAGE1(_vcmdq, _regname) \
103 ((_vcmdq)->page1 + TEGRA241_VCMDQ_##_regname)
106 static bool disable_cmdqv
;
107 module_param(disable_cmdqv
, bool, 0444);
108 MODULE_PARM_DESC(disable_cmdqv
,
109 "This allows to disable CMDQV HW and use default SMMU internal CMDQ.");
111 static bool bypass_vcmdq
;
112 module_param(bypass_vcmdq
, bool, 0444);
113 MODULE_PARM_DESC(bypass_vcmdq
,
114 "This allows to bypass VCMDQ for debugging use or perf comparison.");
117 * struct tegra241_vcmdq - Virtual Command Queue
118 * @idx: Global index in the CMDQV
119 * @lidx: Local index in the VINTF
120 * @enabled: Enable status
121 * @cmdqv: Parent CMDQV pointer
122 * @vintf: Parent VINTF pointer
123 * @cmdq: Command Queue struct
124 * @page0: MMIO Page0 base address
125 * @page1: MMIO Page1 base address
127 struct tegra241_vcmdq
{
133 struct tegra241_cmdqv
*cmdqv
;
134 struct tegra241_vintf
*vintf
;
135 struct arm_smmu_cmdq cmdq
;
142 * struct tegra241_vintf - Virtual Interface
143 * @idx: Global index in the CMDQV
144 * @enabled: Enable status
145 * @hyp_own: Owned by hypervisor (in-kernel)
146 * @cmdqv: Parent CMDQV pointer
147 * @lvcmdqs: List of logical VCMDQ pointers
148 * @base: MMIO base address
150 struct tegra241_vintf
{
156 struct tegra241_cmdqv
*cmdqv
;
157 struct tegra241_vcmdq
**lvcmdqs
;
163 * struct tegra241_cmdqv - CMDQ-V for SMMUv3
164 * @smmu: SMMUv3 device
166 * @base: MMIO base address
168 * @num_vintfs: Total number of VINTFs
169 * @num_vcmdqs: Total number of VCMDQs
170 * @num_lvcmdqs_per_vintf: Number of logical VCMDQs per VINTF
171 * @vintf_ids: VINTF id allocator
172 * @vintfs: List of VINTFs
174 struct tegra241_cmdqv
{
175 struct arm_smmu_device smmu
;
181 /* CMDQV Hardware Params */
184 u16 num_lvcmdqs_per_vintf
;
186 struct ida vintf_ids
;
188 struct tegra241_vintf
**vintfs
;
191 /* Config and Polling Helpers */
193 static inline int tegra241_cmdqv_write_config(struct tegra241_cmdqv
*cmdqv
,
194 void __iomem
*addr_config
,
195 void __iomem
*addr_status
,
196 u32 regval
, const char *header
,
199 bool en
= regval
& BIT(0);
202 writel(regval
, addr_config
);
203 ret
= readl_poll_timeout(addr_status
, regval
,
204 en
? regval
& BIT(0) : !(regval
& BIT(0)),
205 1, ARM_SMMU_POLL_TIMEOUT_US
);
207 dev_err(cmdqv
->dev
, "%sfailed to %sable, STATUS=0x%08X\n",
208 header
, en
? "en" : "dis", regval
);
210 WRITE_ONCE(*out_enabled
, regval
& BIT(0));
214 static inline int cmdqv_write_config(struct tegra241_cmdqv
*cmdqv
, u32 regval
)
216 return tegra241_cmdqv_write_config(cmdqv
,
217 REG_CMDQV(cmdqv
, CONFIG
),
218 REG_CMDQV(cmdqv
, STATUS
),
219 regval
, "CMDQV: ", NULL
);
222 static inline int vintf_write_config(struct tegra241_vintf
*vintf
, u32 regval
)
226 snprintf(header
, 16, "VINTF%u: ", vintf
->idx
);
227 return tegra241_cmdqv_write_config(vintf
->cmdqv
,
228 REG_VINTF(vintf
, CONFIG
),
229 REG_VINTF(vintf
, STATUS
),
230 regval
, header
, &vintf
->enabled
);
233 static inline char *lvcmdq_error_header(struct tegra241_vcmdq
*vcmdq
,
234 char *header
, int hlen
)
237 if (WARN_ON(!vcmdq
->vintf
))
239 snprintf(header
, hlen
, "VINTF%u: VCMDQ%u/LVCMDQ%u: ",
240 vcmdq
->vintf
->idx
, vcmdq
->idx
, vcmdq
->lidx
);
244 static inline int vcmdq_write_config(struct tegra241_vcmdq
*vcmdq
, u32 regval
)
246 char header
[64], *h
= lvcmdq_error_header(vcmdq
, header
, 64);
248 return tegra241_cmdqv_write_config(vcmdq
->cmdqv
,
249 REG_VCMDQ_PAGE0(vcmdq
, CONFIG
),
250 REG_VCMDQ_PAGE0(vcmdq
, STATUS
),
251 regval
, h
, &vcmdq
->enabled
);
256 static void tegra241_vintf0_handle_error(struct tegra241_vintf
*vintf
)
260 for (i
= 0; i
< LVCMDQ_ERR_MAP_NUM_64
; i
++) {
261 u64 map
= readq_relaxed(REG_VINTF(vintf
, LVCMDQ_ERR_MAP_64(i
)));
264 unsigned long lidx
= __ffs64(map
);
265 struct tegra241_vcmdq
*vcmdq
= vintf
->lvcmdqs
[lidx
];
266 u32 gerror
= readl_relaxed(REG_VCMDQ_PAGE0(vcmdq
, GERROR
));
268 __arm_smmu_cmdq_skip_err(&vintf
->cmdqv
->smmu
, &vcmdq
->cmdq
);
269 writel(gerror
, REG_VCMDQ_PAGE0(vcmdq
, GERRORN
));
270 map
&= ~BIT_ULL(lidx
);
275 static irqreturn_t
tegra241_cmdqv_isr(int irq
, void *devid
)
277 struct tegra241_cmdqv
*cmdqv
= (struct tegra241_cmdqv
*)devid
;
278 void __iomem
*reg_vintf_map
= REG_CMDQV(cmdqv
, VINTF_ERR_MAP
);
282 /* Use readl_relaxed() as register addresses are not 64-bit aligned */
283 vintf_map
= (u64
)readl_relaxed(reg_vintf_map
+ 0x4) << 32 |
284 (u64
)readl_relaxed(reg_vintf_map
);
286 snprintf(err_str
, sizeof(err_str
),
287 "vintf_map: %016llx, vcmdq_map %08x:%08x:%08x:%08x", vintf_map
,
288 readl_relaxed(REG_CMDQV(cmdqv
, CMDQ_ERR_MAP(3))),
289 readl_relaxed(REG_CMDQV(cmdqv
, CMDQ_ERR_MAP(2))),
290 readl_relaxed(REG_CMDQV(cmdqv
, CMDQ_ERR_MAP(1))),
291 readl_relaxed(REG_CMDQV(cmdqv
, CMDQ_ERR_MAP(0))));
293 dev_warn(cmdqv
->dev
, "unexpected error reported. %s\n", err_str
);
295 /* Handle VINTF0 and its LVCMDQs */
296 if (vintf_map
& BIT_ULL(0)) {
297 tegra241_vintf0_handle_error(cmdqv
->vintfs
[0]);
298 vintf_map
&= ~BIT_ULL(0);
304 /* Command Queue Function */
306 static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent
*ent
)
308 switch (ent
->opcode
) {
309 case CMDQ_OP_TLBI_NH_ASID
:
310 case CMDQ_OP_TLBI_NH_VA
:
311 case CMDQ_OP_ATC_INV
:
318 static struct arm_smmu_cmdq
*
319 tegra241_cmdqv_get_cmdq(struct arm_smmu_device
*smmu
,
320 struct arm_smmu_cmdq_ent
*ent
)
322 struct tegra241_cmdqv
*cmdqv
=
323 container_of(smmu
, struct tegra241_cmdqv
, smmu
);
324 struct tegra241_vintf
*vintf
= cmdqv
->vintfs
[0];
325 struct tegra241_vcmdq
*vcmdq
;
328 if (READ_ONCE(bypass_vcmdq
))
331 /* Use SMMU CMDQ if VINTF0 is uninitialized */
332 if (!READ_ONCE(vintf
->enabled
))
336 * Select a LVCMDQ to use. Here we use a temporal solution to
337 * balance out traffic on cmdq issuing: each cmdq has its own
338 * lock, if all cpus issue cmdlist using the same cmdq, only
339 * one CPU at a time can enter the process, while the others
340 * will be spinning at the same lock.
342 lidx
= smp_processor_id() % cmdqv
->num_lvcmdqs_per_vintf
;
343 vcmdq
= vintf
->lvcmdqs
[lidx
];
344 if (!vcmdq
|| !READ_ONCE(vcmdq
->enabled
))
347 /* Unsupported CMD goes for smmu->cmdq pathway */
348 if (!arm_smmu_cmdq_supports_cmd(&vcmdq
->cmdq
, ent
))
353 /* HW Reset Functions */
355 static void tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq
*vcmdq
)
357 char header
[64], *h
= lvcmdq_error_header(vcmdq
, header
, 64);
360 if (vcmdq_write_config(vcmdq
, 0)) {
361 dev_err(vcmdq
->cmdqv
->dev
,
362 "%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h
,
363 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq
, GERRORN
)),
364 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq
, GERROR
)),
365 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq
, CONS
)));
367 writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq
, PROD
));
368 writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq
, CONS
));
369 writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq
, BASE
));
370 writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq
, CONS_INDX_BASE
));
372 gerrorn
= readl_relaxed(REG_VCMDQ_PAGE0(vcmdq
, GERRORN
));
373 gerror
= readl_relaxed(REG_VCMDQ_PAGE0(vcmdq
, GERROR
));
374 if (gerror
!= gerrorn
) {
375 dev_warn(vcmdq
->cmdqv
->dev
,
376 "%suncleared error detected, resetting\n", h
);
377 writel(gerror
, REG_VCMDQ_PAGE0(vcmdq
, GERRORN
));
380 dev_dbg(vcmdq
->cmdqv
->dev
, "%sdeinited\n", h
);
383 static int tegra241_vcmdq_hw_init(struct tegra241_vcmdq
*vcmdq
)
385 char header
[64], *h
= lvcmdq_error_header(vcmdq
, header
, 64);
389 tegra241_vcmdq_hw_deinit(vcmdq
);
391 /* Configure and enable VCMDQ */
392 writeq_relaxed(vcmdq
->cmdq
.q
.q_base
, REG_VCMDQ_PAGE1(vcmdq
, BASE
));
394 ret
= vcmdq_write_config(vcmdq
, VCMDQ_EN
);
396 dev_err(vcmdq
->cmdqv
->dev
,
397 "%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h
,
398 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq
, GERRORN
)),
399 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq
, GERROR
)),
400 readl_relaxed(REG_VCMDQ_PAGE0(vcmdq
, CONS
)));
404 dev_dbg(vcmdq
->cmdqv
->dev
, "%sinited\n", h
);
408 static void tegra241_vintf_hw_deinit(struct tegra241_vintf
*vintf
)
412 for (lidx
= 0; lidx
< vintf
->cmdqv
->num_lvcmdqs_per_vintf
; lidx
++)
413 if (vintf
->lvcmdqs
&& vintf
->lvcmdqs
[lidx
])
414 tegra241_vcmdq_hw_deinit(vintf
->lvcmdqs
[lidx
]);
415 vintf_write_config(vintf
, 0);
418 static int tegra241_vintf_hw_init(struct tegra241_vintf
*vintf
, bool hyp_own
)
425 tegra241_vintf_hw_deinit(vintf
);
427 /* Configure and enable VINTF */
429 * Note that HYP_OWN bit is wired to zero when running in guest kernel,
430 * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a
431 * restricted set of supported commands.
433 regval
= FIELD_PREP(VINTF_HYP_OWN
, hyp_own
);
434 writel(regval
, REG_VINTF(vintf
, CONFIG
));
436 ret
= vintf_write_config(vintf
, regval
| VINTF_EN
);
440 * As being mentioned above, HYP_OWN bit is wired to zero for a guest
441 * kernel, so read it back from HW to ensure that reflects in hyp_own
443 vintf
->hyp_own
= !!(VINTF_HYP_OWN
& readl(REG_VINTF(vintf
, CONFIG
)));
445 for (lidx
= 0; lidx
< vintf
->cmdqv
->num_lvcmdqs_per_vintf
; lidx
++) {
446 if (vintf
->lvcmdqs
&& vintf
->lvcmdqs
[lidx
]) {
447 ret
= tegra241_vcmdq_hw_init(vintf
->lvcmdqs
[lidx
]);
449 tegra241_vintf_hw_deinit(vintf
);
458 static int tegra241_cmdqv_hw_reset(struct arm_smmu_device
*smmu
)
460 struct tegra241_cmdqv
*cmdqv
=
461 container_of(smmu
, struct tegra241_cmdqv
, smmu
);
467 regval
= readl_relaxed(REG_CMDQV(cmdqv
, CONFIG
));
468 ret
= cmdqv_write_config(cmdqv
, regval
& ~CMDQV_EN
);
471 ret
= cmdqv_write_config(cmdqv
, regval
| CMDQV_EN
);
475 /* Assign preallocated global VCMDQs to each VINTF as LVCMDQs */
476 for (idx
= 0, qidx
= 0; idx
< cmdqv
->num_vintfs
; idx
++) {
477 for (lidx
= 0; lidx
< cmdqv
->num_lvcmdqs_per_vintf
; lidx
++) {
478 regval
= FIELD_PREP(CMDQV_CMDQ_ALLOC_VINTF
, idx
);
479 regval
|= FIELD_PREP(CMDQV_CMDQ_ALLOC_LVCMDQ
, lidx
);
480 regval
|= CMDQV_CMDQ_ALLOCATED
;
481 writel_relaxed(regval
,
482 REG_CMDQV(cmdqv
, CMDQ_ALLOC(qidx
++)));
486 return tegra241_vintf_hw_init(cmdqv
->vintfs
[0], true);
489 /* VCMDQ Resource Helpers */
491 static void tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq
*vcmdq
)
493 struct arm_smmu_queue
*q
= &vcmdq
->cmdq
.q
;
494 size_t nents
= 1 << q
->llq
.max_n_shift
;
495 size_t qsz
= nents
<< CMDQ_ENT_SZ_SHIFT
;
499 dmam_free_coherent(vcmdq
->cmdqv
->smmu
.dev
, qsz
, q
->base
, q
->base_dma
);
502 static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq
*vcmdq
)
504 struct arm_smmu_device
*smmu
= &vcmdq
->cmdqv
->smmu
;
505 struct arm_smmu_cmdq
*cmdq
= &vcmdq
->cmdq
;
506 struct arm_smmu_queue
*q
= &cmdq
->q
;
510 snprintf(name
, 16, "vcmdq%u", vcmdq
->idx
);
512 /* Queue size, capped to ensure natural alignment */
513 q
->llq
.max_n_shift
= min_t(u32
, CMDQ_MAX_SZ_SHIFT
, VCMDQ_LOG2SIZE_MAX
);
515 /* Use the common helper to init the VCMDQ, and then... */
516 ret
= arm_smmu_init_one_queue(smmu
, q
, vcmdq
->page0
,
517 TEGRA241_VCMDQ_PROD
, TEGRA241_VCMDQ_CONS
,
518 CMDQ_ENT_DWORDS
, name
);
522 /* ...override q_base to write VCMDQ_BASE registers */
523 q
->q_base
= q
->base_dma
& VCMDQ_ADDR
;
524 q
->q_base
|= FIELD_PREP(VCMDQ_LOG2SIZE
, q
->llq
.max_n_shift
);
526 if (!vcmdq
->vintf
->hyp_own
)
527 cmdq
->supports_cmd
= tegra241_guest_vcmdq_supports_cmd
;
529 return arm_smmu_cmdq_init(smmu
, cmdq
);
532 /* VINTF Logical VCMDQ Resource Helpers */
534 static void tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf
*vintf
, u16 lidx
)
536 vintf
->lvcmdqs
[lidx
] = NULL
;
539 static int tegra241_vintf_init_lvcmdq(struct tegra241_vintf
*vintf
, u16 lidx
,
540 struct tegra241_vcmdq
*vcmdq
)
542 struct tegra241_cmdqv
*cmdqv
= vintf
->cmdqv
;
543 u16 idx
= vintf
->idx
;
545 vcmdq
->idx
= idx
* cmdqv
->num_lvcmdqs_per_vintf
+ lidx
;
547 vcmdq
->cmdqv
= cmdqv
;
548 vcmdq
->vintf
= vintf
;
549 vcmdq
->page0
= cmdqv
->base
+ TEGRA241_VINTFi_LVCMDQ_PAGE0(idx
, lidx
);
550 vcmdq
->page1
= cmdqv
->base
+ TEGRA241_VINTFi_LVCMDQ_PAGE1(idx
, lidx
);
552 vintf
->lvcmdqs
[lidx
] = vcmdq
;
556 static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf
*vintf
, u16 lidx
)
558 struct tegra241_vcmdq
*vcmdq
= vintf
->lvcmdqs
[lidx
];
561 tegra241_vcmdq_free_smmu_cmdq(vcmdq
);
562 tegra241_vintf_deinit_lvcmdq(vintf
, lidx
);
564 dev_dbg(vintf
->cmdqv
->dev
,
565 "%sdeallocated\n", lvcmdq_error_header(vcmdq
, header
, 64));
569 static struct tegra241_vcmdq
*
570 tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf
*vintf
, u16 lidx
)
572 struct tegra241_cmdqv
*cmdqv
= vintf
->cmdqv
;
573 struct tegra241_vcmdq
*vcmdq
;
577 vcmdq
= kzalloc(sizeof(*vcmdq
), GFP_KERNEL
);
579 return ERR_PTR(-ENOMEM
);
581 ret
= tegra241_vintf_init_lvcmdq(vintf
, lidx
, vcmdq
);
585 /* Build an arm_smmu_cmdq for each LVCMDQ */
586 ret
= tegra241_vcmdq_alloc_smmu_cmdq(vcmdq
);
591 "%sallocated\n", lvcmdq_error_header(vcmdq
, header
, 64));
595 tegra241_vintf_deinit_lvcmdq(vintf
, lidx
);
601 /* VINTF Resource Helpers */
603 static void tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv
*cmdqv
, u16 idx
)
605 kfree(cmdqv
->vintfs
[idx
]->lvcmdqs
);
606 ida_free(&cmdqv
->vintf_ids
, idx
);
607 cmdqv
->vintfs
[idx
] = NULL
;
610 static int tegra241_cmdqv_init_vintf(struct tegra241_cmdqv
*cmdqv
, u16 max_idx
,
611 struct tegra241_vintf
*vintf
)
617 ret
= ida_alloc_max(&cmdqv
->vintf_ids
, max_idx
, GFP_KERNEL
);
623 vintf
->cmdqv
= cmdqv
;
624 vintf
->base
= cmdqv
->base
+ TEGRA241_VINTF(idx
);
626 vintf
->lvcmdqs
= kcalloc(cmdqv
->num_lvcmdqs_per_vintf
,
627 sizeof(*vintf
->lvcmdqs
), GFP_KERNEL
);
628 if (!vintf
->lvcmdqs
) {
629 ida_free(&cmdqv
->vintf_ids
, idx
);
633 cmdqv
->vintfs
[idx
] = vintf
;
639 static void tegra241_vintf_remove_lvcmdq(struct tegra241_vintf
*vintf
, u16 lidx
)
641 tegra241_vcmdq_hw_deinit(vintf
->lvcmdqs
[lidx
]);
642 tegra241_vintf_free_lvcmdq(vintf
, lidx
);
645 static void tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv
*cmdqv
, u16 idx
)
647 struct tegra241_vintf
*vintf
= cmdqv
->vintfs
[idx
];
650 /* Remove LVCMDQ resources */
651 for (lidx
= 0; lidx
< vintf
->cmdqv
->num_lvcmdqs_per_vintf
; lidx
++)
652 if (vintf
->lvcmdqs
[lidx
])
653 tegra241_vintf_remove_lvcmdq(vintf
, lidx
);
655 /* Remove VINTF resources */
656 tegra241_vintf_hw_deinit(vintf
);
658 dev_dbg(cmdqv
->dev
, "VINTF%u: deallocated\n", vintf
->idx
);
659 tegra241_cmdqv_deinit_vintf(cmdqv
, idx
);
663 static void tegra241_cmdqv_remove(struct arm_smmu_device
*smmu
)
665 struct tegra241_cmdqv
*cmdqv
=
666 container_of(smmu
, struct tegra241_cmdqv
, smmu
);
669 /* Remove VINTF resources */
670 for (idx
= 0; idx
< cmdqv
->num_vintfs
; idx
++) {
671 if (cmdqv
->vintfs
[idx
]) {
672 /* Only vintf0 should remain at this stage */
674 tegra241_cmdqv_remove_vintf(cmdqv
, idx
);
678 /* Remove cmdqv resources */
679 ida_destroy(&cmdqv
->vintf_ids
);
682 free_irq(cmdqv
->irq
, cmdqv
);
683 iounmap(cmdqv
->base
);
684 kfree(cmdqv
->vintfs
);
685 put_device(cmdqv
->dev
); /* smmu->impl_dev */
688 static struct arm_smmu_impl_ops tegra241_cmdqv_impl_ops
= {
689 .get_secondary_cmdq
= tegra241_cmdqv_get_cmdq
,
690 .device_reset
= tegra241_cmdqv_hw_reset
,
691 .device_remove
= tegra241_cmdqv_remove
,
694 /* Probe Functions */
696 static int tegra241_cmdqv_acpi_is_memory(struct acpi_resource
*res
, void *data
)
698 struct resource_win win
;
700 return !acpi_dev_resource_address_space(res
, &win
);
703 static int tegra241_cmdqv_acpi_get_irqs(struct acpi_resource
*ares
, void *data
)
708 if (*irq
<= 0 && acpi_dev_resource_interrupt(ares
, 0, &r
))
710 return 1; /* No need to add resource to the list */
713 static struct resource
*
714 tegra241_cmdqv_find_acpi_resource(struct device
*dev
, int *irq
)
716 struct acpi_device
*adev
= to_acpi_device(dev
);
717 struct list_head resource_list
;
718 struct resource_entry
*rentry
;
719 struct resource
*res
= NULL
;
722 INIT_LIST_HEAD(&resource_list
);
723 ret
= acpi_dev_get_resources(adev
, &resource_list
,
724 tegra241_cmdqv_acpi_is_memory
, NULL
);
726 dev_err(dev
, "failed to get memory resource: %d\n", ret
);
730 rentry
= list_first_entry_or_null(&resource_list
,
731 struct resource_entry
, node
);
733 dev_err(dev
, "failed to get memory resource entry\n");
737 /* Caller must free the res */
738 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
744 acpi_dev_free_resource_list(&resource_list
);
746 INIT_LIST_HEAD(&resource_list
);
749 ret
= acpi_dev_get_resources(adev
, &resource_list
,
750 tegra241_cmdqv_acpi_get_irqs
, irq
);
751 if (ret
< 0 || !irq
|| *irq
<= 0)
752 dev_warn(dev
, "no interrupt. errors will not be reported\n");
755 acpi_dev_free_resource_list(&resource_list
);
759 static int tegra241_cmdqv_init_structures(struct arm_smmu_device
*smmu
)
761 struct tegra241_cmdqv
*cmdqv
=
762 container_of(smmu
, struct tegra241_cmdqv
, smmu
);
763 struct tegra241_vintf
*vintf
;
767 vintf
= kzalloc(sizeof(*vintf
), GFP_KERNEL
);
771 /* Init VINTF0 for in-kernel use */
772 ret
= tegra241_cmdqv_init_vintf(cmdqv
, 0, vintf
);
774 dev_err(cmdqv
->dev
, "failed to init vintf0: %d\n", ret
);
778 /* Preallocate logical VCMDQs to VINTF0 */
779 for (lidx
= 0; lidx
< cmdqv
->num_lvcmdqs_per_vintf
; lidx
++) {
780 struct tegra241_vcmdq
*vcmdq
;
782 vcmdq
= tegra241_vintf_alloc_lvcmdq(vintf
, lidx
);
787 /* Now, we are ready to run all the impl ops */
788 smmu
->impl_ops
= &tegra241_cmdqv_impl_ops
;
792 for (lidx
--; lidx
>= 0; lidx
--)
793 tegra241_vintf_free_lvcmdq(vintf
, lidx
);
794 tegra241_cmdqv_deinit_vintf(cmdqv
, vintf
->idx
);
798 dev_info(smmu
->impl_dev
, "Falling back to standard SMMU CMDQ\n");
799 smmu
->options
&= ~ARM_SMMU_OPT_TEGRA241_CMDQV
;
800 tegra241_cmdqv_remove(smmu
);
804 #ifdef CONFIG_IOMMU_DEBUGFS
805 static struct dentry
*cmdqv_debugfs_dir
;
808 static struct arm_smmu_device
*
809 __tegra241_cmdqv_probe(struct arm_smmu_device
*smmu
, struct resource
*res
,
812 static const struct arm_smmu_impl_ops init_ops
= {
813 .init_structures
= tegra241_cmdqv_init_structures
,
814 .device_remove
= tegra241_cmdqv_remove
,
816 struct tegra241_cmdqv
*cmdqv
= NULL
;
817 struct arm_smmu_device
*new_smmu
;
822 static_assert(offsetof(struct tegra241_cmdqv
, smmu
) == 0);
824 base
= ioremap(res
->start
, resource_size(res
));
826 dev_err(smmu
->dev
, "failed to ioremap\n");
830 regval
= readl(base
+ TEGRA241_CMDQV_CONFIG
);
832 dev_info(smmu
->dev
, "Detected disable_cmdqv=true\n");
833 writel(regval
& ~CMDQV_EN
, base
+ TEGRA241_CMDQV_CONFIG
);
837 cmdqv
= devm_krealloc(smmu
->dev
, smmu
, sizeof(*cmdqv
), GFP_KERNEL
);
840 new_smmu
= &cmdqv
->smmu
;
844 cmdqv
->dev
= smmu
->impl_dev
;
846 if (cmdqv
->irq
> 0) {
847 ret
= request_irq(irq
, tegra241_cmdqv_isr
, 0, "tegra241-cmdqv",
850 dev_err(cmdqv
->dev
, "failed to request irq (%d): %d\n",
856 regval
= readl_relaxed(REG_CMDQV(cmdqv
, PARAM
));
857 cmdqv
->num_vintfs
= 1 << FIELD_GET(CMDQV_NUM_VINTF_LOG2
, regval
);
858 cmdqv
->num_vcmdqs
= 1 << FIELD_GET(CMDQV_NUM_VCMDQ_LOG2
, regval
);
859 cmdqv
->num_lvcmdqs_per_vintf
= cmdqv
->num_vcmdqs
/ cmdqv
->num_vintfs
;
862 kcalloc(cmdqv
->num_vintfs
, sizeof(*cmdqv
->vintfs
), GFP_KERNEL
);
866 ida_init(&cmdqv
->vintf_ids
);
868 #ifdef CONFIG_IOMMU_DEBUGFS
869 if (!cmdqv_debugfs_dir
) {
871 debugfs_create_dir("tegra241_cmdqv", iommu_debugfs_dir
);
872 debugfs_create_bool("bypass_vcmdq", 0644, cmdqv_debugfs_dir
,
877 /* Provide init-level ops only, until tegra241_cmdqv_init_structures */
878 new_smmu
->impl_ops
= &init_ops
;
884 free_irq(cmdqv
->irq
, cmdqv
);
890 struct arm_smmu_device
*tegra241_cmdqv_probe(struct arm_smmu_device
*smmu
)
892 struct arm_smmu_device
*new_smmu
;
893 struct resource
*res
= NULL
;
896 if (!smmu
->dev
->of_node
)
897 res
= tegra241_cmdqv_find_acpi_resource(smmu
->impl_dev
, &irq
);
901 new_smmu
= __tegra241_cmdqv_probe(smmu
, res
, irq
);
908 dev_info(smmu
->impl_dev
, "Falling back to standard SMMU CMDQ\n");
909 smmu
->options
&= ~ARM_SMMU_OPT_TEGRA241_CMDQV
;
910 put_device(smmu
->impl_dev
);
911 return ERR_PTR(-ENODEV
);