1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #include <linux/devcoredump.h>
7 #include <linux/moduleparam.h>
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_dump.h"
11 #include "etnaviv_gem.h"
12 #include "etnaviv_gpu.h"
13 #include "etnaviv_mmu.h"
14 #include "etnaviv_sched.h"
15 #include "state.xml.h"
16 #include "state_hi.xml.h"
18 static bool etnaviv_dump_core
= true;
19 module_param_named(dump_core
, etnaviv_dump_core
, bool, 0600);
21 struct core_dump_iterator
{
23 struct etnaviv_dump_object_header
*hdr
;
27 static const unsigned short etnaviv_dump_registers
[] = {
29 VIVS_HI_CLOCK_CONTROL
,
33 VIVS_HI_CHIP_IDENTITY
,
39 VIVS_HI_CHIP_MINOR_FEATURE_0
,
40 VIVS_HI_CACHE_CONTROL
,
42 VIVS_PM_POWER_CONTROLS
,
43 VIVS_PM_MODULE_CONTROLS
,
44 VIVS_PM_MODULE_STATUS
,
46 VIVS_MC_MMU_FE_PAGE_TABLE
,
47 VIVS_MC_MMU_TX_PAGE_TABLE
,
48 VIVS_MC_MMU_PE_PAGE_TABLE
,
49 VIVS_MC_MMU_PEZ_PAGE_TABLE
,
50 VIVS_MC_MMU_RA_PAGE_TABLE
,
52 VIVS_MC_MEMORY_BASE_ADDR_RA
,
53 VIVS_MC_MEMORY_BASE_ADDR_FE
,
54 VIVS_MC_MEMORY_BASE_ADDR_TX
,
55 VIVS_MC_MEMORY_BASE_ADDR_PEZ
,
56 VIVS_MC_MEMORY_BASE_ADDR_PE
,
57 VIVS_MC_MEMORY_TIMING_CONTROL
,
60 VIVS_FE_DMA_DEBUG_STATE
,
67 static void etnaviv_core_dump_header(struct core_dump_iterator
*iter
,
68 u32 type
, void *data_end
)
70 struct etnaviv_dump_object_header
*hdr
= iter
->hdr
;
72 hdr
->magic
= cpu_to_le32(ETDUMP_MAGIC
);
73 hdr
->type
= cpu_to_le32(type
);
74 hdr
->file_offset
= cpu_to_le32(iter
->data
- iter
->start
);
75 hdr
->file_size
= cpu_to_le32(data_end
- iter
->data
);
78 iter
->data
+= hdr
->file_size
;
81 static void etnaviv_core_dump_registers(struct core_dump_iterator
*iter
,
82 struct etnaviv_gpu
*gpu
)
84 struct etnaviv_dump_registers
*reg
= iter
->data
;
87 for (i
= 0; i
< ARRAY_SIZE(etnaviv_dump_registers
); i
++, reg
++) {
88 reg
->reg
= etnaviv_dump_registers
[i
];
89 reg
->value
= gpu_read(gpu
, etnaviv_dump_registers
[i
]);
92 etnaviv_core_dump_header(iter
, ETDUMP_BUF_REG
, reg
);
95 static void etnaviv_core_dump_mmu(struct core_dump_iterator
*iter
,
96 struct etnaviv_iommu_context
*mmu
, size_t mmu_size
)
98 etnaviv_iommu_dump(mmu
, iter
->data
);
100 etnaviv_core_dump_header(iter
, ETDUMP_BUF_MMU
, iter
->data
+ mmu_size
);
103 static void etnaviv_core_dump_mem(struct core_dump_iterator
*iter
, u32 type
,
104 void *ptr
, size_t size
, u64 iova
)
106 memcpy(iter
->data
, ptr
, size
);
108 iter
->hdr
->iova
= cpu_to_le64(iova
);
110 etnaviv_core_dump_header(iter
, type
, iter
->data
+ size
);
113 void etnaviv_core_dump(struct etnaviv_gem_submit
*submit
)
115 struct etnaviv_gpu
*gpu
= submit
->gpu
;
116 struct core_dump_iterator iter
;
117 struct etnaviv_gem_object
*obj
;
118 unsigned int n_obj
, n_bomap_pages
;
119 size_t file_size
, mmu_size
;
120 __le64
*bomap
, *bomap_start
;
123 /* Only catch the first event, or when manually re-armed */
124 if (!etnaviv_dump_core
)
126 etnaviv_dump_core
= false;
128 mutex_lock(&gpu
->mmu_context
->lock
);
130 mmu_size
= etnaviv_iommu_dump_size(gpu
->mmu_context
);
132 /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
135 file_size
= ARRAY_SIZE(etnaviv_dump_registers
) *
136 sizeof(struct etnaviv_dump_registers
) +
137 mmu_size
+ gpu
->buffer
.size
+ submit
->cmdbuf
.size
;
139 /* Add in the active buffer objects */
140 for (i
= 0; i
< submit
->nr_bos
; i
++) {
141 obj
= submit
->bos
[i
].obj
;
142 file_size
+= obj
->base
.size
;
143 n_bomap_pages
+= obj
->base
.size
>> PAGE_SHIFT
;
147 /* If we have any buffer objects, add a bomap object */
149 file_size
+= n_bomap_pages
* sizeof(__le64
);
153 /* Add the size of the headers */
154 file_size
+= sizeof(*iter
.hdr
) * n_obj
;
156 /* Allocate the file in vmalloc memory, it's likely to be big */
157 iter
.start
= __vmalloc(file_size
, GFP_KERNEL
| __GFP_NOWARN
|
160 mutex_unlock(&gpu
->mmu_context
->lock
);
161 dev_warn(gpu
->dev
, "failed to allocate devcoredump file\n");
165 /* Point the data member after the headers */
166 iter
.hdr
= iter
.start
;
167 iter
.data
= &iter
.hdr
[n_obj
];
169 memset(iter
.hdr
, 0, iter
.data
- iter
.start
);
171 etnaviv_core_dump_registers(&iter
, gpu
);
172 etnaviv_core_dump_mmu(&iter
, gpu
->mmu_context
, mmu_size
);
173 etnaviv_core_dump_mem(&iter
, ETDUMP_BUF_RING
, gpu
->buffer
.vaddr
,
175 etnaviv_cmdbuf_get_va(&gpu
->buffer
,
176 &gpu
->mmu_context
->cmdbuf_mapping
));
178 etnaviv_core_dump_mem(&iter
, ETDUMP_BUF_CMD
,
179 submit
->cmdbuf
.vaddr
, submit
->cmdbuf
.size
,
180 etnaviv_cmdbuf_get_va(&submit
->cmdbuf
,
181 &gpu
->mmu_context
->cmdbuf_mapping
));
183 mutex_unlock(&gpu
->mmu_context
->lock
);
185 /* Reserve space for the bomap */
187 bomap_start
= bomap
= iter
.data
;
188 memset(bomap
, 0, sizeof(*bomap
) * n_bomap_pages
);
189 etnaviv_core_dump_header(&iter
, ETDUMP_BUF_BOMAP
,
190 bomap
+ n_bomap_pages
);
192 /* Silence warning */
193 bomap_start
= bomap
= NULL
;
196 for (i
= 0; i
< submit
->nr_bos
; i
++) {
197 struct etnaviv_vram_mapping
*vram
;
201 obj
= submit
->bos
[i
].obj
;
202 vram
= submit
->bos
[i
].mapping
;
204 mutex_lock(&obj
->lock
);
205 pages
= etnaviv_gem_get_pages(obj
);
206 mutex_unlock(&obj
->lock
);
207 if (!IS_ERR(pages
)) {
210 iter
.hdr
->data
[0] = bomap
- bomap_start
;
212 for (j
= 0; j
< obj
->base
.size
>> PAGE_SHIFT
; j
++)
213 *bomap
++ = cpu_to_le64(page_to_phys(*pages
++));
216 iter
.hdr
->iova
= cpu_to_le64(vram
->iova
);
218 vaddr
= etnaviv_gem_vmap(&obj
->base
);
220 memcpy(iter
.data
, vaddr
, obj
->base
.size
);
222 etnaviv_core_dump_header(&iter
, ETDUMP_BUF_BO
, iter
.data
+
226 etnaviv_core_dump_header(&iter
, ETDUMP_BUF_END
, iter
.data
);
228 dev_coredumpv(gpu
->dev
, iter
.start
, iter
.data
- iter
.start
, GFP_KERNEL
);