Merge pull request #218 from saper/build-fixes
[envytools.git] / demmt / mmt_bin_decode_nvidia.c
blobf22f8c9437429cad3e208d00f4c6323d8f7ef4e3
1 /*
2 * Copyright (C) 2014 Marcin Ĺšlusarz <marcin.slusarz@gmail.com>.
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "mmt_bin_decode_nvidia.h"
26 #include <stdio.h>
27 #include <stdlib.h>
29 static unsigned int load_create_object(struct mmt_nvidia_create_object **create_, unsigned int pfx)
31 unsigned int size = sizeof(struct mmt_nvidia_create_object) + 1;
32 struct mmt_nvidia_create_object *create;
33 create = mmt_load_data_with_prefix(size, pfx, 0);
34 mmt_buf_check_sanity(&create->name);
35 size += create->name.len;
36 create = mmt_load_data_with_prefix(size, pfx, 0);
38 mmt_check_eor(size + pfx);
40 if (create_)
41 *create_ = create;
42 return size;
45 #define CREATE_LOADER(name, type) \
46 static unsigned int name(type **obj_, unsigned int pfx) \
47 { \
48 unsigned int size = sizeof(type) + 1; \
49 type *obj = mmt_load_data_with_prefix(size, pfx, 0); \
51 mmt_check_eor(size + pfx); \
53 if (obj_) \
54 *obj_ = obj; \
55 return size; \
58 CREATE_LOADER(load_destroy_object, struct mmt_nvidia_destroy_object)
59 CREATE_LOADER(load_call_method, struct mmt_nvidia_call_method)
60 CREATE_LOADER(load_create_mapped_object, struct mmt_nvidia_create_mapped_object)
61 CREATE_LOADER(load_create_dma_object, struct mmt_nvidia_create_dma_object)
62 CREATE_LOADER(load_alloc_map, struct mmt_nvidia_alloc_map)
63 CREATE_LOADER(load_gpu_map, struct mmt_nvidia_gpu_map)
64 CREATE_LOADER(load_gpu_map2, struct mmt_nvidia_gpu_map2)
65 CREATE_LOADER(load_gpu_unmap, struct mmt_nvidia_gpu_unmap)
66 CREATE_LOADER(load_gpu_unmap2, struct mmt_nvidia_gpu_unmap2)
67 CREATE_LOADER(load_mmap2, struct mmt_nvidia_mmap2)
68 CREATE_LOADER(load_mmap, struct mmt_nvidia_mmap)
69 CREATE_LOADER(load_unmap, struct mmt_nvidia_unmap)
70 CREATE_LOADER(load_bind, struct mmt_nvidia_bind)
71 CREATE_LOADER(load_create_driver_object, struct mmt_nvidia_create_driver_object)
72 CREATE_LOADER(load_create_device_object, struct mmt_nvidia_create_device_object)
73 CREATE_LOADER(load_create_context_object, struct mmt_nvidia_create_context_object)
75 static unsigned int load_memory_dump(unsigned int pfx, struct mmt_memory_dump_prefix **dump,
76 struct mmt_buf **buf, struct mmt_nvidia_decode_funcs *funcs)
78 unsigned int size1, size2, omitted = 0;
79 struct mmt_memory_dump_prefix *d;
80 struct mmt_buf *b;
81 *dump = NULL;
82 *buf = NULL;
83 struct mmt_message_nv *nv;
87 struct mmt_message *msg = mmt_load_data_with_prefix(1, pfx, 1);
88 if (msg == NULL || msg->type != 'n')
89 return 0;
91 mmt_load_data_with_prefix(sizeof(struct mmt_message_nv), pfx, 0);
93 nv = mmt_load_data_with_prefix(sizeof(struct mmt_message_nv), pfx, 0);
94 if (nv == NULL)
95 return 0;
97 if (nv->subtype != 'o')
99 unsigned int omit;
100 if (nv->subtype == 'c' && funcs->create_object == NULL)
101 omit = load_create_object(NULL, pfx);
102 else if (nv->subtype == 'd' && funcs->destroy_object == NULL)
103 omit = load_destroy_object(NULL, pfx);
104 else if (nv->subtype == 'l' && funcs->call_method == NULL)
105 omit = load_call_method(NULL, pfx);
106 else if (nv->subtype == 'p' && funcs->create_mapped == NULL)
107 omit = load_create_mapped_object(NULL, pfx);
108 else if (nv->subtype == 't' && funcs->create_dma_object == NULL)
109 omit = load_create_dma_object(NULL, pfx);
110 else if (nv->subtype == 'a' && funcs->alloc_map == NULL)
111 omit = load_alloc_map(NULL, pfx);
112 else if (nv->subtype == 'g' && funcs->gpu_map == NULL)
113 omit = load_gpu_map(NULL, pfx);
114 else if (nv->subtype == 'G' && funcs->gpu_map2 == NULL)
115 omit = load_gpu_map2(NULL, pfx);
116 else if (nv->subtype == 'h' && funcs->gpu_unmap == NULL)
117 omit = load_gpu_unmap(NULL, pfx);
118 else if (nv->subtype == 'H' && funcs->gpu_unmap2 == NULL)
119 omit = load_gpu_unmap2(NULL, pfx);
120 else if (nv->subtype == 'm' /*&& funcs->mmap == NULL*/)
121 omit = load_mmap(NULL, pfx);
122 else if (nv->subtype == 'e' && funcs->unmap == NULL)
123 omit = load_unmap(NULL, pfx);
124 else if (nv->subtype == 'b' && funcs->bind == NULL)
125 omit = load_bind(NULL, pfx);
126 else if (nv->subtype == 'r' && funcs->create_driver_object == NULL)
127 omit = load_create_driver_object(NULL, pfx);
128 else if (nv->subtype == 'v' && funcs->create_device_object == NULL)
129 omit = load_create_device_object(NULL, pfx);
130 else if (nv->subtype == 'x' && funcs->create_context_object == NULL)
131 omit = load_create_context_object(NULL, pfx);
132 else
134 //if (nv->subtype != 'j' && nv->subtype != 'i')
135 // printf("%d '%c'\n", nv->subtype, nv->subtype);
136 return 0;
139 omitted += omit;
140 pfx += omit;
143 while (nv->subtype != 'o');
145 size1 = sizeof(struct mmt_memory_dump_prefix);
146 d = mmt_load_data_with_prefix(size1, pfx, 0);
147 mmt_buf_check_sanity(&d->str);
148 size1 += d->str.len;
149 d = mmt_load_data_with_prefix(size1, pfx, 0);
151 size2 = 4;
152 b = mmt_load_data_with_prefix(size2, size1 + pfx, 0);
153 mmt_buf_check_sanity(b);
154 size2 += b->len + 1;
155 b = mmt_load_data_with_prefix(size2, size1 + pfx, 0);
157 mmt_check_eor(size2 + size1 + pfx);
159 *dump = d;
160 *buf = b;
162 return size1 + size2 + omitted;
165 #define GENERATE_HANDLER(subtype_, type, loader, func) \
166 else if (nv->subtype == subtype_) \
168 type *obj; \
169 size = loader(&obj, 0); \
171 if (funcs->func) \
172 funcs->func(obj, state); \
174 mmt_idx += size; \
177 #define MAX_ARGS 10
178 void mmt_decode_nvidia(struct mmt_nvidia_decode_funcs *funcs, void *state)
180 struct mmt_message_nv *nv = mmt_load_data(sizeof(struct mmt_message_nv));
181 unsigned int size;
183 if (nv->subtype == 'i')
185 unsigned int size2, pfx;
186 struct mmt_memory_dump args[MAX_ARGS];
187 struct mmt_ioctl_pre *ctl;
188 int argc;
192 size = sizeof(struct mmt_ioctl_pre) + 1;
193 ctl = mmt_load_data(size);
194 mmt_buf_check_sanity(&ctl->data);
195 size += ctl->data.len;
196 ctl = mmt_load_data(size);
198 mmt_check_eor(size);
200 argc = 0;
202 struct mmt_memory_dump_prefix *d;
203 struct mmt_buf *b;
204 pfx = size;
206 while ((size2 = load_memory_dump(pfx, &d, &b, funcs)))
208 args[argc].addr = d->addr;
209 args[argc].str = &d->str;
210 args[argc].data = b;
211 argc++;
212 pfx += size2;
213 if (argc == MAX_ARGS)
214 break;
217 while (ctl != mmt_load_data(size));
219 if (funcs->ioctl_pre)
220 funcs->ioctl_pre(ctl, state, args, argc);
222 mmt_idx += pfx;
224 else if (nv->subtype == 'j')
226 unsigned int size2, pfx;
227 struct mmt_memory_dump args[MAX_ARGS];
228 struct mmt_ioctl_post *ctl;
229 int argc;
233 size = sizeof(struct mmt_ioctl_post) + 1;
234 ctl = mmt_load_data(size);
235 mmt_buf_check_sanity(&ctl->data);
236 size += ctl->data.len;
237 ctl = mmt_load_data(size);
239 mmt_check_eor(size);
241 argc = 0;
243 struct mmt_memory_dump_prefix *d;
244 struct mmt_buf *b;
245 pfx = size;
247 while ((size2 = load_memory_dump(pfx, &d, &b, funcs)))
249 args[argc].addr = d->addr;
250 args[argc].str = &d->str;
251 args[argc].data = b;
252 argc++;
253 pfx += size2;
254 if (argc == MAX_ARGS)
255 break;
258 while (ctl != mmt_load_data(size));
260 if (funcs->ioctl_post)
261 funcs->ioctl_post(ctl, state, args, argc);
263 mmt_idx += pfx;
265 else if (nv->subtype == 'o')
267 struct mmt_memory_dump_prefix *d;
268 struct mmt_buf *b;
270 size = load_memory_dump(0, &d, &b, funcs);
272 if (funcs->memory_dump)
273 funcs->memory_dump(d, b, state);
275 mmt_idx += size;
277 GENERATE_HANDLER('c', struct mmt_nvidia_create_object, load_create_object, create_object)
278 GENERATE_HANDLER('d', struct mmt_nvidia_destroy_object, load_destroy_object, destroy_object)
279 GENERATE_HANDLER('l', struct mmt_nvidia_call_method, load_call_method, call_method)
280 GENERATE_HANDLER('p', struct mmt_nvidia_create_mapped_object, load_create_mapped_object, create_mapped)
281 GENERATE_HANDLER('t', struct mmt_nvidia_create_dma_object, load_create_dma_object, create_dma_object)
282 GENERATE_HANDLER('a', struct mmt_nvidia_alloc_map, load_alloc_map, alloc_map)
283 GENERATE_HANDLER('g', struct mmt_nvidia_gpu_map, load_gpu_map, gpu_map)
284 GENERATE_HANDLER('G', struct mmt_nvidia_gpu_map2, load_gpu_map2, gpu_map2)
285 GENERATE_HANDLER('h', struct mmt_nvidia_gpu_unmap, load_gpu_unmap, gpu_unmap)
286 GENERATE_HANDLER('H', struct mmt_nvidia_gpu_unmap2, load_gpu_unmap2, gpu_unmap2)
287 GENERATE_HANDLER('M', struct mmt_nvidia_mmap2, load_mmap2, mmap2)
288 GENERATE_HANDLER('m', struct mmt_nvidia_mmap, load_mmap, mmap)
289 GENERATE_HANDLER('b', struct mmt_nvidia_bind, load_bind, bind)
290 GENERATE_HANDLER('e', struct mmt_nvidia_unmap, load_unmap, unmap)
291 GENERATE_HANDLER('r', struct mmt_nvidia_create_driver_object, load_create_driver_object, create_driver_object)
292 GENERATE_HANDLER('v', struct mmt_nvidia_create_device_object, load_create_device_object, create_device_object)
293 GENERATE_HANDLER('x', struct mmt_nvidia_create_context_object, load_create_context_object, create_context_object)
294 else if (nv->subtype == '1')
296 struct mmt_nvidia_call_method_data *call;
297 size = sizeof(struct mmt_nvidia_call_method_data) + 1;
298 call = mmt_load_data(size);
299 mmt_buf_check_sanity(&call->data);
300 size += call->data.len;
301 call = mmt_load_data(size);
303 mmt_check_eor(size);
305 if (funcs->call_method_data)
306 funcs->call_method_data(call, state);
308 mmt_idx += size;
310 else if (nv->subtype == '4')
312 struct mmt_nvidia_ioctl_4d *ctl;
313 size = sizeof(struct mmt_nvidia_ioctl_4d) + 1;
314 ctl = mmt_load_data(size);
315 mmt_buf_check_sanity(&ctl->str);
316 size += ctl->str.len;
317 ctl = mmt_load_data(size);
319 mmt_check_eor(size);
321 if (funcs->ioctl_4d)
322 funcs->ioctl_4d(ctl, state);
324 mmt_idx += size;
326 else if (nv->subtype == 'k')
328 struct mmt_nvidia_mmiotrace_mark *mark;
329 size = sizeof(struct mmt_nvidia_mmiotrace_mark) + 1;
330 mark = mmt_load_data(size);
331 mmt_buf_check_sanity(&mark->str);
332 size += mark->str.len;
333 mark = mmt_load_data(size);
335 mmt_check_eor(size);
337 if (funcs->mmiotrace_mark)
338 funcs->mmiotrace_mark(mark, state);
340 mmt_idx += size;
342 else if (nv->subtype == 'P')
344 struct mmt_nouveau_pushbuf_data *data;
345 size = sizeof(struct mmt_nouveau_pushbuf_data) + 1;
346 data = mmt_load_data(size);
347 mmt_buf_check_sanity(&data->data);
348 size += data->data.len;
349 data = mmt_load_data(size);
351 mmt_check_eor(size);
353 if (funcs->nouveau_gem_pushbuf_data)
354 funcs->nouveau_gem_pushbuf_data(data, state);
356 mmt_idx += size;
358 else
360 fflush(stdout);
361 fprintf(stderr, "ioctl\n");
362 mmt_dump_next();
363 exit(1);