x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / gpu / drm / radeon / r600_cs.c
blob745e66eacd476044cb8d13c30d3b4647143a65ee
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
28 #include <linux/kernel.h>
29 #include <drm/drmP.h>
30 #include "radeon.h"
31 #include "r600d.h"
32 #include "r600_reg_safe.h"
34 static int r600_nomm;
35 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
38 struct r600_cs_track {
39 /* configuration we miror so that we use same code btw kms/ums */
40 u32 group_size;
41 u32 nbanks;
42 u32 npipes;
43 /* value we track */
44 u32 sq_config;
45 u32 log_nsamples;
46 u32 nsamples;
47 u32 cb_color_base_last[8];
48 struct radeon_bo *cb_color_bo[8];
49 u64 cb_color_bo_mc[8];
50 u64 cb_color_bo_offset[8];
51 struct radeon_bo *cb_color_frag_bo[8];
52 u64 cb_color_frag_offset[8];
53 struct radeon_bo *cb_color_tile_bo[8];
54 u64 cb_color_tile_offset[8];
55 u32 cb_color_mask[8];
56 u32 cb_color_info[8];
57 u32 cb_color_view[8];
58 u32 cb_color_size_idx[8]; /* unused */
59 u32 cb_target_mask;
60 u32 cb_shader_mask; /* unused */
61 bool is_resolve;
62 u32 cb_color_size[8];
63 u32 vgt_strmout_en;
64 u32 vgt_strmout_buffer_en;
65 struct radeon_bo *vgt_strmout_bo[4];
66 u64 vgt_strmout_bo_mc[4]; /* unused */
67 u32 vgt_strmout_bo_offset[4];
68 u32 vgt_strmout_size[4];
69 u32 db_depth_control;
70 u32 db_depth_info;
71 u32 db_depth_size_idx;
72 u32 db_depth_view;
73 u32 db_depth_size;
74 u32 db_offset;
75 struct radeon_bo *db_bo;
76 u64 db_bo_mc;
77 bool sx_misc_kill_all_prims;
78 bool cb_dirty;
79 bool db_dirty;
80 bool streamout_dirty;
81 struct radeon_bo *htile_bo;
82 u64 htile_offset;
83 u32 htile_surface;
86 #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
87 #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
88 #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 }
89 #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
90 #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 }
91 #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
92 #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
93 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
95 struct gpu_formats {
96 unsigned blockwidth;
97 unsigned blockheight;
98 unsigned blocksize;
99 unsigned valid_color;
100 enum radeon_family min_family;
103 static const struct gpu_formats color_formats_table[] = {
104 /* 8 bit */
105 FMT_8_BIT(V_038004_COLOR_8, 1),
106 FMT_8_BIT(V_038004_COLOR_4_4, 1),
107 FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
108 FMT_8_BIT(V_038004_FMT_1, 0),
110 /* 16-bit */
111 FMT_16_BIT(V_038004_COLOR_16, 1),
112 FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
113 FMT_16_BIT(V_038004_COLOR_8_8, 1),
114 FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
115 FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
116 FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
117 FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
118 FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
120 /* 24-bit */
121 FMT_24_BIT(V_038004_FMT_8_8_8),
123 /* 32-bit */
124 FMT_32_BIT(V_038004_COLOR_32, 1),
125 FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
126 FMT_32_BIT(V_038004_COLOR_16_16, 1),
127 FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
128 FMT_32_BIT(V_038004_COLOR_8_24, 1),
129 FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
130 FMT_32_BIT(V_038004_COLOR_24_8, 1),
131 FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
132 FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
133 FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
134 FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
135 FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
136 FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
137 FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
138 FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
139 FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
140 FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
141 FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
143 /* 48-bit */
144 FMT_48_BIT(V_038004_FMT_16_16_16),
145 FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
147 /* 64-bit */
148 FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
149 FMT_64_BIT(V_038004_COLOR_32_32, 1),
150 FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
151 FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
152 FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
154 FMT_96_BIT(V_038004_FMT_32_32_32),
155 FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
157 /* 128-bit */
158 FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
159 FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
161 [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
162 [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
164 /* block compressed formats */
165 [V_038004_FMT_BC1] = { 4, 4, 8, 0 },
166 [V_038004_FMT_BC2] = { 4, 4, 16, 0 },
167 [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
168 [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
169 [V_038004_FMT_BC5] = { 4, 4, 16, 0},
170 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
171 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
173 /* The other Evergreen formats */
174 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
177 bool r600_fmt_is_valid_color(u32 format)
179 if (format >= ARRAY_SIZE(color_formats_table))
180 return false;
182 if (color_formats_table[format].valid_color)
183 return true;
185 return false;
188 bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
190 if (format >= ARRAY_SIZE(color_formats_table))
191 return false;
193 if (family < color_formats_table[format].min_family)
194 return false;
196 if (color_formats_table[format].blockwidth > 0)
197 return true;
199 return false;
202 int r600_fmt_get_blocksize(u32 format)
204 if (format >= ARRAY_SIZE(color_formats_table))
205 return 0;
207 return color_formats_table[format].blocksize;
210 int r600_fmt_get_nblocksx(u32 format, u32 w)
212 unsigned bw;
214 if (format >= ARRAY_SIZE(color_formats_table))
215 return 0;
217 bw = color_formats_table[format].blockwidth;
218 if (bw == 0)
219 return 0;
221 return (w + bw - 1) / bw;
224 int r600_fmt_get_nblocksy(u32 format, u32 h)
226 unsigned bh;
228 if (format >= ARRAY_SIZE(color_formats_table))
229 return 0;
231 bh = color_formats_table[format].blockheight;
232 if (bh == 0)
233 return 0;
235 return (h + bh - 1) / bh;
238 struct array_mode_checker {
239 int array_mode;
240 u32 group_size;
241 u32 nbanks;
242 u32 npipes;
243 u32 nsamples;
244 u32 blocksize;
247 /* returns alignment in pixels for pitch/height/depth and bytes for base */
248 static int r600_get_array_mode_alignment(struct array_mode_checker *values,
249 u32 *pitch_align,
250 u32 *height_align,
251 u32 *depth_align,
252 u64 *base_align)
254 u32 tile_width = 8;
255 u32 tile_height = 8;
256 u32 macro_tile_width = values->nbanks;
257 u32 macro_tile_height = values->npipes;
258 u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
259 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
261 switch (values->array_mode) {
262 case ARRAY_LINEAR_GENERAL:
263 /* technically tile_width/_height for pitch/height */
264 *pitch_align = 1; /* tile_width */
265 *height_align = 1; /* tile_height */
266 *depth_align = 1;
267 *base_align = 1;
268 break;
269 case ARRAY_LINEAR_ALIGNED:
270 *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
271 *height_align = 1;
272 *depth_align = 1;
273 *base_align = values->group_size;
274 break;
275 case ARRAY_1D_TILED_THIN1:
276 *pitch_align = max((u32)tile_width,
277 (u32)(values->group_size /
278 (tile_height * values->blocksize * values->nsamples)));
279 *height_align = tile_height;
280 *depth_align = 1;
281 *base_align = values->group_size;
282 break;
283 case ARRAY_2D_TILED_THIN1:
284 *pitch_align = max((u32)macro_tile_width * tile_width,
285 (u32)((values->group_size * values->nbanks) /
286 (values->blocksize * values->nsamples * tile_width)));
287 *height_align = macro_tile_height * tile_height;
288 *depth_align = 1;
289 *base_align = max(macro_tile_bytes,
290 (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
291 break;
292 default:
293 return -EINVAL;
296 return 0;
299 static void r600_cs_track_init(struct r600_cs_track *track)
301 int i;
303 /* assume DX9 mode */
304 track->sq_config = DX9_CONSTS;
305 for (i = 0; i < 8; i++) {
306 track->cb_color_base_last[i] = 0;
307 track->cb_color_size[i] = 0;
308 track->cb_color_size_idx[i] = 0;
309 track->cb_color_info[i] = 0;
310 track->cb_color_view[i] = 0xFFFFFFFF;
311 track->cb_color_bo[i] = NULL;
312 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
313 track->cb_color_bo_mc[i] = 0xFFFFFFFF;
314 track->cb_color_frag_bo[i] = NULL;
315 track->cb_color_frag_offset[i] = 0xFFFFFFFF;
316 track->cb_color_tile_bo[i] = NULL;
317 track->cb_color_tile_offset[i] = 0xFFFFFFFF;
318 track->cb_color_mask[i] = 0xFFFFFFFF;
320 track->is_resolve = false;
321 track->nsamples = 16;
322 track->log_nsamples = 4;
323 track->cb_target_mask = 0xFFFFFFFF;
324 track->cb_shader_mask = 0xFFFFFFFF;
325 track->cb_dirty = true;
326 track->db_bo = NULL;
327 track->db_bo_mc = 0xFFFFFFFF;
328 /* assume the biggest format and that htile is enabled */
329 track->db_depth_info = 7 | (1 << 25);
330 track->db_depth_view = 0xFFFFC000;
331 track->db_depth_size = 0xFFFFFFFF;
332 track->db_depth_size_idx = 0;
333 track->db_depth_control = 0xFFFFFFFF;
334 track->db_dirty = true;
335 track->htile_bo = NULL;
336 track->htile_offset = 0xFFFFFFFF;
337 track->htile_surface = 0;
339 for (i = 0; i < 4; i++) {
340 track->vgt_strmout_size[i] = 0;
341 track->vgt_strmout_bo[i] = NULL;
342 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
343 track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
345 track->streamout_dirty = true;
346 track->sx_misc_kill_all_prims = false;
349 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
351 struct r600_cs_track *track = p->track;
352 u32 slice_tile_max, size, tmp;
353 u32 height, height_align, pitch, pitch_align, depth_align;
354 u64 base_offset, base_align;
355 struct array_mode_checker array_check;
356 volatile u32 *ib = p->ib.ptr;
357 unsigned array_mode;
358 u32 format;
359 /* When resolve is used, the second colorbuffer has always 1 sample. */
360 unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
362 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
363 format = G_0280A0_FORMAT(track->cb_color_info[i]);
364 if (!r600_fmt_is_valid_color(format)) {
365 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
366 __func__, __LINE__, format,
367 i, track->cb_color_info[i]);
368 return -EINVAL;
370 /* pitch in pixels */
371 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
372 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
373 slice_tile_max *= 64;
374 height = slice_tile_max / pitch;
375 if (height > 8192)
376 height = 8192;
377 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
379 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
380 array_check.array_mode = array_mode;
381 array_check.group_size = track->group_size;
382 array_check.nbanks = track->nbanks;
383 array_check.npipes = track->npipes;
384 array_check.nsamples = nsamples;
385 array_check.blocksize = r600_fmt_get_blocksize(format);
386 if (r600_get_array_mode_alignment(&array_check,
387 &pitch_align, &height_align, &depth_align, &base_align)) {
388 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
389 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
390 track->cb_color_info[i]);
391 return -EINVAL;
393 switch (array_mode) {
394 case V_0280A0_ARRAY_LINEAR_GENERAL:
395 break;
396 case V_0280A0_ARRAY_LINEAR_ALIGNED:
397 break;
398 case V_0280A0_ARRAY_1D_TILED_THIN1:
399 /* avoid breaking userspace */
400 if (height > 7)
401 height &= ~0x7;
402 break;
403 case V_0280A0_ARRAY_2D_TILED_THIN1:
404 break;
405 default:
406 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
407 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
408 track->cb_color_info[i]);
409 return -EINVAL;
412 if (!IS_ALIGNED(pitch, pitch_align)) {
413 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
414 __func__, __LINE__, pitch, pitch_align, array_mode);
415 return -EINVAL;
417 if (!IS_ALIGNED(height, height_align)) {
418 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
419 __func__, __LINE__, height, height_align, array_mode);
420 return -EINVAL;
422 if (!IS_ALIGNED(base_offset, base_align)) {
423 dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
424 base_offset, base_align, array_mode);
425 return -EINVAL;
428 /* check offset */
429 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
430 r600_fmt_get_blocksize(format) * nsamples;
431 switch (array_mode) {
432 default:
433 case V_0280A0_ARRAY_LINEAR_GENERAL:
434 case V_0280A0_ARRAY_LINEAR_ALIGNED:
435 tmp += track->cb_color_view[i] & 0xFF;
436 break;
437 case V_0280A0_ARRAY_1D_TILED_THIN1:
438 case V_0280A0_ARRAY_2D_TILED_THIN1:
439 tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
440 break;
442 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
443 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
444 /* the initial DDX does bad things with the CB size occasionally */
445 /* it rounds up height too far for slice tile max but the BO is smaller */
446 /* r600c,g also seem to flush at bad times in some apps resulting in
447 * bogus values here. So for linear just allow anything to avoid breaking
448 * broken userspace.
450 } else {
451 dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
452 __func__, i, array_mode,
453 track->cb_color_bo_offset[i], tmp,
454 radeon_bo_size(track->cb_color_bo[i]),
455 pitch, height, r600_fmt_get_nblocksx(format, pitch),
456 r600_fmt_get_nblocksy(format, height),
457 r600_fmt_get_blocksize(format));
458 return -EINVAL;
461 /* limit max tile */
462 tmp = (height * pitch) >> 6;
463 if (tmp < slice_tile_max)
464 slice_tile_max = tmp;
465 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
466 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
467 ib[track->cb_color_size_idx[i]] = tmp;
469 /* FMASK/CMASK */
470 switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
471 case V_0280A0_TILE_DISABLE:
472 break;
473 case V_0280A0_FRAG_ENABLE:
474 if (track->nsamples > 1) {
475 uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
476 /* the tile size is 8x8, but the size is in units of bits.
477 * for bytes, do just * 8. */
478 uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
480 if (bytes + track->cb_color_frag_offset[i] >
481 radeon_bo_size(track->cb_color_frag_bo[i])) {
482 dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
483 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
484 __func__, tile_max, bytes,
485 track->cb_color_frag_offset[i],
486 radeon_bo_size(track->cb_color_frag_bo[i]));
487 return -EINVAL;
490 /* fall through */
491 case V_0280A0_CLEAR_ENABLE:
493 uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
494 /* One block = 128x128 pixels, one 8x8 tile has 4 bits..
495 * (128*128) / (8*8) / 2 = 128 bytes per block. */
496 uint32_t bytes = (block_max + 1) * 128;
498 if (bytes + track->cb_color_tile_offset[i] >
499 radeon_bo_size(track->cb_color_tile_bo[i])) {
500 dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
501 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
502 __func__, block_max, bytes,
503 track->cb_color_tile_offset[i],
504 radeon_bo_size(track->cb_color_tile_bo[i]));
505 return -EINVAL;
507 break;
509 default:
510 dev_warn(p->dev, "%s invalid tile mode\n", __func__);
511 return -EINVAL;
513 return 0;
516 static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
518 struct r600_cs_track *track = p->track;
519 u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
520 u32 height_align, pitch_align, depth_align;
521 u32 pitch = 8192;
522 u32 height = 8192;
523 u64 base_offset, base_align;
524 struct array_mode_checker array_check;
525 int array_mode;
526 volatile u32 *ib = p->ib.ptr;
529 if (track->db_bo == NULL) {
530 dev_warn(p->dev, "z/stencil with no depth buffer\n");
531 return -EINVAL;
533 switch (G_028010_FORMAT(track->db_depth_info)) {
534 case V_028010_DEPTH_16:
535 bpe = 2;
536 break;
537 case V_028010_DEPTH_X8_24:
538 case V_028010_DEPTH_8_24:
539 case V_028010_DEPTH_X8_24_FLOAT:
540 case V_028010_DEPTH_8_24_FLOAT:
541 case V_028010_DEPTH_32_FLOAT:
542 bpe = 4;
543 break;
544 case V_028010_DEPTH_X24_8_32_FLOAT:
545 bpe = 8;
546 break;
547 default:
548 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
549 return -EINVAL;
551 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
552 if (!track->db_depth_size_idx) {
553 dev_warn(p->dev, "z/stencil buffer size not set\n");
554 return -EINVAL;
556 tmp = radeon_bo_size(track->db_bo) - track->db_offset;
557 tmp = (tmp / bpe) >> 6;
558 if (!tmp) {
559 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
560 track->db_depth_size, bpe, track->db_offset,
561 radeon_bo_size(track->db_bo));
562 return -EINVAL;
564 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
565 } else {
566 size = radeon_bo_size(track->db_bo);
567 /* pitch in pixels */
568 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
569 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
570 slice_tile_max *= 64;
571 height = slice_tile_max / pitch;
572 if (height > 8192)
573 height = 8192;
574 base_offset = track->db_bo_mc + track->db_offset;
575 array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
576 array_check.array_mode = array_mode;
577 array_check.group_size = track->group_size;
578 array_check.nbanks = track->nbanks;
579 array_check.npipes = track->npipes;
580 array_check.nsamples = track->nsamples;
581 array_check.blocksize = bpe;
582 if (r600_get_array_mode_alignment(&array_check,
583 &pitch_align, &height_align, &depth_align, &base_align)) {
584 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
585 G_028010_ARRAY_MODE(track->db_depth_info),
586 track->db_depth_info);
587 return -EINVAL;
589 switch (array_mode) {
590 case V_028010_ARRAY_1D_TILED_THIN1:
591 /* don't break userspace */
592 height &= ~0x7;
593 break;
594 case V_028010_ARRAY_2D_TILED_THIN1:
595 break;
596 default:
597 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
598 G_028010_ARRAY_MODE(track->db_depth_info),
599 track->db_depth_info);
600 return -EINVAL;
603 if (!IS_ALIGNED(pitch, pitch_align)) {
604 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
605 __func__, __LINE__, pitch, pitch_align, array_mode);
606 return -EINVAL;
608 if (!IS_ALIGNED(height, height_align)) {
609 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
610 __func__, __LINE__, height, height_align, array_mode);
611 return -EINVAL;
613 if (!IS_ALIGNED(base_offset, base_align)) {
614 dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
615 base_offset, base_align, array_mode);
616 return -EINVAL;
619 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
620 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
621 tmp = ntiles * bpe * 64 * nviews * track->nsamples;
622 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
623 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
624 array_mode,
625 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
626 radeon_bo_size(track->db_bo));
627 return -EINVAL;
631 /* hyperz */
632 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
633 unsigned long size;
634 unsigned nbx, nby;
636 if (track->htile_bo == NULL) {
637 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
638 __func__, __LINE__, track->db_depth_info);
639 return -EINVAL;
641 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
642 dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
643 __func__, __LINE__, track->db_depth_size);
644 return -EINVAL;
647 nbx = pitch;
648 nby = height;
649 if (G_028D24_LINEAR(track->htile_surface)) {
650 /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
651 nbx = round_up(nbx, 16 * 8);
652 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
653 nby = round_up(nby, track->npipes * 8);
654 } else {
655 /* always assume 8x8 htile */
656 /* align is htile align * 8, htile align vary according to
657 * number of pipe and tile width and nby
659 switch (track->npipes) {
660 case 8:
661 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
662 nbx = round_up(nbx, 64 * 8);
663 nby = round_up(nby, 64 * 8);
664 break;
665 case 4:
666 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
667 nbx = round_up(nbx, 64 * 8);
668 nby = round_up(nby, 32 * 8);
669 break;
670 case 2:
671 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
672 nbx = round_up(nbx, 32 * 8);
673 nby = round_up(nby, 32 * 8);
674 break;
675 case 1:
676 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
677 nbx = round_up(nbx, 32 * 8);
678 nby = round_up(nby, 16 * 8);
679 break;
680 default:
681 dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
682 __func__, __LINE__, track->npipes);
683 return -EINVAL;
686 /* compute number of htile */
687 nbx = nbx >> 3;
688 nby = nby >> 3;
689 /* size must be aligned on npipes * 2K boundary */
690 size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
691 size += track->htile_offset;
693 if (size > radeon_bo_size(track->htile_bo)) {
694 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
695 __func__, __LINE__, radeon_bo_size(track->htile_bo),
696 size, nbx, nby);
697 return -EINVAL;
701 track->db_dirty = false;
702 return 0;
705 static int r600_cs_track_check(struct radeon_cs_parser *p)
707 struct r600_cs_track *track = p->track;
708 u32 tmp;
709 int r, i;
711 /* on legacy kernel we don't perform advanced check */
712 if (p->rdev == NULL)
713 return 0;
715 /* check streamout */
716 if (track->streamout_dirty && track->vgt_strmout_en) {
717 for (i = 0; i < 4; i++) {
718 if (track->vgt_strmout_buffer_en & (1 << i)) {
719 if (track->vgt_strmout_bo[i]) {
720 u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
721 (u64)track->vgt_strmout_size[i];
722 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
723 DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
724 i, offset,
725 radeon_bo_size(track->vgt_strmout_bo[i]));
726 return -EINVAL;
728 } else {
729 dev_warn(p->dev, "No buffer for streamout %d\n", i);
730 return -EINVAL;
734 track->streamout_dirty = false;
737 if (track->sx_misc_kill_all_prims)
738 return 0;
740 /* check that we have a cb for each enabled target, we don't check
741 * shader_mask because it seems mesa isn't always setting it :(
743 if (track->cb_dirty) {
744 tmp = track->cb_target_mask;
746 /* We must check both colorbuffers for RESOLVE. */
747 if (track->is_resolve) {
748 tmp |= 0xff;
751 for (i = 0; i < 8; i++) {
752 u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
754 if (format != V_0280A0_COLOR_INVALID &&
755 (tmp >> (i * 4)) & 0xF) {
756 /* at least one component is enabled */
757 if (track->cb_color_bo[i] == NULL) {
758 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
759 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
760 return -EINVAL;
762 /* perform rewrite of CB_COLOR[0-7]_SIZE */
763 r = r600_cs_track_validate_cb(p, i);
764 if (r)
765 return r;
768 track->cb_dirty = false;
771 /* Check depth buffer */
772 if (track->db_dirty &&
773 G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
774 (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
775 G_028800_Z_ENABLE(track->db_depth_control))) {
776 r = r600_cs_track_validate_db(p);
777 if (r)
778 return r;
781 return 0;
785 * r600_cs_packet_parse_vline() - parse userspace VLINE packet
786 * @parser: parser structure holding parsing context.
788 * This is an R600-specific function for parsing VLINE packets.
789 * Real work is done by r600_cs_common_vline_parse function.
790 * Here we just set up ASIC-specific register table and call
791 * the common implementation function.
793 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
795 static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
796 AVIVO_D2MODE_VLINE_START_END};
797 static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
798 AVIVO_D2MODE_VLINE_STATUS};
800 return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
804 * r600_cs_common_vline_parse() - common vline parser
805 * @parser: parser structure holding parsing context.
806 * @vline_start_end: table of vline_start_end registers
807 * @vline_status: table of vline_status registers
809 * Userspace sends a special sequence for VLINE waits.
810 * PACKET0 - VLINE_START_END + value
811 * PACKET3 - WAIT_REG_MEM poll vline status reg
812 * RELOC (P3) - crtc_id in reloc.
814 * This function parses this and relocates the VLINE START END
815 * and WAIT_REG_MEM packets to the correct crtc.
816 * It also detects a switched off crtc and nulls out the
817 * wait in that case. This function is common for all ASICs that
818 * are R600 and newer. The parsing algorithm is the same, and only
819 * differs in which registers are used.
821 * Caller is the ASIC-specific function which passes the parser
822 * context and ASIC-specific register table
824 int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
825 uint32_t *vline_start_end,
826 uint32_t *vline_status)
828 struct drm_mode_object *obj;
829 struct drm_crtc *crtc;
830 struct radeon_crtc *radeon_crtc;
831 struct radeon_cs_packet p3reloc, wait_reg_mem;
832 int crtc_id;
833 int r;
834 uint32_t header, h_idx, reg, wait_reg_mem_info;
835 volatile uint32_t *ib;
837 ib = p->ib.ptr;
839 /* parse the WAIT_REG_MEM */
840 r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
841 if (r)
842 return r;
844 /* check its a WAIT_REG_MEM */
845 if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
846 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
847 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
848 return -EINVAL;
851 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
852 /* bit 4 is reg (0) or mem (1) */
853 if (wait_reg_mem_info & 0x10) {
854 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
855 return -EINVAL;
857 /* bit 8 is me (0) or pfp (1) */
858 if (wait_reg_mem_info & 0x100) {
859 DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
860 return -EINVAL;
862 /* waiting for value to be equal */
863 if ((wait_reg_mem_info & 0x7) != 0x3) {
864 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
865 return -EINVAL;
867 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
868 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
869 return -EINVAL;
872 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
873 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
874 return -EINVAL;
877 /* jump over the NOP */
878 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
879 if (r)
880 return r;
882 h_idx = p->idx - 2;
883 p->idx += wait_reg_mem.count + 2;
884 p->idx += p3reloc.count + 2;
886 header = radeon_get_ib_value(p, h_idx);
887 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
888 reg = R600_CP_PACKET0_GET_REG(header);
890 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
891 if (!obj) {
892 DRM_ERROR("cannot find crtc %d\n", crtc_id);
893 return -EINVAL;
895 crtc = obj_to_crtc(obj);
896 radeon_crtc = to_radeon_crtc(crtc);
897 crtc_id = radeon_crtc->crtc_id;
899 if (!crtc->enabled) {
900 /* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
901 ib[h_idx + 2] = PACKET2(0);
902 ib[h_idx + 3] = PACKET2(0);
903 ib[h_idx + 4] = PACKET2(0);
904 ib[h_idx + 5] = PACKET2(0);
905 ib[h_idx + 6] = PACKET2(0);
906 ib[h_idx + 7] = PACKET2(0);
907 ib[h_idx + 8] = PACKET2(0);
908 } else if (reg == vline_start_end[0]) {
909 header &= ~R600_CP_PACKET0_REG_MASK;
910 header |= vline_start_end[crtc_id] >> 2;
911 ib[h_idx] = header;
912 ib[h_idx + 4] = vline_status[crtc_id] >> 2;
913 } else {
914 DRM_ERROR("unknown crtc reloc\n");
915 return -EINVAL;
917 return 0;
920 static int r600_packet0_check(struct radeon_cs_parser *p,
921 struct radeon_cs_packet *pkt,
922 unsigned idx, unsigned reg)
924 int r;
926 switch (reg) {
927 case AVIVO_D1MODE_VLINE_START_END:
928 r = r600_cs_packet_parse_vline(p);
929 if (r) {
930 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
931 idx, reg);
932 return r;
934 break;
935 default:
936 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
937 reg, idx);
938 return -EINVAL;
940 return 0;
943 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
944 struct radeon_cs_packet *pkt)
946 unsigned reg, i;
947 unsigned idx;
948 int r;
950 idx = pkt->idx + 1;
951 reg = pkt->reg;
952 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
953 r = r600_packet0_check(p, pkt, idx, reg);
954 if (r) {
955 return r;
958 return 0;
962 * r600_cs_check_reg() - check if register is authorized or not
963 * @parser: parser structure holding parsing context
964 * @reg: register we are testing
965 * @idx: index into the cs buffer
967 * This function will test against r600_reg_safe_bm and return 0
968 * if register is safe. If register is not flag as safe this function
969 * will test it against a list of register needind special handling.
971 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
973 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
974 struct radeon_cs_reloc *reloc;
975 u32 m, i, tmp, *ib;
976 int r;
978 i = (reg >> 7);
979 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
980 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
981 return -EINVAL;
983 m = 1 << ((reg >> 2) & 31);
984 if (!(r600_reg_safe_bm[i] & m))
985 return 0;
986 ib = p->ib.ptr;
987 switch (reg) {
988 /* force following reg to 0 in an attempt to disable out buffer
989 * which will need us to better understand how it works to perform
990 * security check on it (Jerome)
992 case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
993 case R_008C44_SQ_ESGS_RING_SIZE:
994 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
995 case R_008C54_SQ_ESTMP_RING_SIZE:
996 case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
997 case R_008C74_SQ_FBUF_RING_SIZE:
998 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
999 case R_008C5C_SQ_GSTMP_RING_SIZE:
1000 case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
1001 case R_008C4C_SQ_GSVS_RING_SIZE:
1002 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
1003 case R_008C6C_SQ_PSTMP_RING_SIZE:
1004 case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
1005 case R_008C7C_SQ_REDUC_RING_SIZE:
1006 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
1007 case R_008C64_SQ_VSTMP_RING_SIZE:
1008 case R_0288C8_SQ_GS_VERT_ITEMSIZE:
1009 /* get value to populate the IB don't remove */
1010 tmp =radeon_get_ib_value(p, idx);
1011 ib[idx] = 0;
1012 break;
1013 case SQ_CONFIG:
1014 track->sq_config = radeon_get_ib_value(p, idx);
1015 break;
1016 case R_028800_DB_DEPTH_CONTROL:
1017 track->db_depth_control = radeon_get_ib_value(p, idx);
1018 track->db_dirty = true;
1019 break;
1020 case R_028010_DB_DEPTH_INFO:
1021 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1022 radeon_cs_packet_next_is_pkt3_nop(p)) {
1023 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1024 if (r) {
1025 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1026 "0x%04X\n", reg);
1027 return -EINVAL;
1029 track->db_depth_info = radeon_get_ib_value(p, idx);
1030 ib[idx] &= C_028010_ARRAY_MODE;
1031 track->db_depth_info &= C_028010_ARRAY_MODE;
1032 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1033 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1034 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1035 } else {
1036 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1037 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1039 } else {
1040 track->db_depth_info = radeon_get_ib_value(p, idx);
1042 track->db_dirty = true;
1043 break;
1044 case R_028004_DB_DEPTH_VIEW:
1045 track->db_depth_view = radeon_get_ib_value(p, idx);
1046 track->db_dirty = true;
1047 break;
1048 case R_028000_DB_DEPTH_SIZE:
1049 track->db_depth_size = radeon_get_ib_value(p, idx);
1050 track->db_depth_size_idx = idx;
1051 track->db_dirty = true;
1052 break;
1053 case R_028AB0_VGT_STRMOUT_EN:
1054 track->vgt_strmout_en = radeon_get_ib_value(p, idx);
1055 track->streamout_dirty = true;
1056 break;
1057 case R_028B20_VGT_STRMOUT_BUFFER_EN:
1058 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
1059 track->streamout_dirty = true;
1060 break;
1061 case VGT_STRMOUT_BUFFER_BASE_0:
1062 case VGT_STRMOUT_BUFFER_BASE_1:
1063 case VGT_STRMOUT_BUFFER_BASE_2:
1064 case VGT_STRMOUT_BUFFER_BASE_3:
1065 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1066 if (r) {
1067 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1068 "0x%04X\n", reg);
1069 return -EINVAL;
1071 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1072 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1073 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1074 track->vgt_strmout_bo[tmp] = reloc->robj;
1075 track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
1076 track->streamout_dirty = true;
1077 break;
1078 case VGT_STRMOUT_BUFFER_SIZE_0:
1079 case VGT_STRMOUT_BUFFER_SIZE_1:
1080 case VGT_STRMOUT_BUFFER_SIZE_2:
1081 case VGT_STRMOUT_BUFFER_SIZE_3:
1082 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1083 /* size in register is DWs, convert to bytes */
1084 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1085 track->streamout_dirty = true;
1086 break;
1087 case CP_COHER_BASE:
1088 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1089 if (r) {
1090 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1091 "0x%04X\n", reg);
1092 return -EINVAL;
1094 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1095 break;
1096 case R_028238_CB_TARGET_MASK:
1097 track->cb_target_mask = radeon_get_ib_value(p, idx);
1098 track->cb_dirty = true;
1099 break;
1100 case R_02823C_CB_SHADER_MASK:
1101 track->cb_shader_mask = radeon_get_ib_value(p, idx);
1102 break;
1103 case R_028C04_PA_SC_AA_CONFIG:
1104 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
1105 track->log_nsamples = tmp;
1106 track->nsamples = 1 << tmp;
1107 track->cb_dirty = true;
1108 break;
1109 case R_028808_CB_COLOR_CONTROL:
1110 tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
1111 track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
1112 track->cb_dirty = true;
1113 break;
1114 case R_0280A0_CB_COLOR0_INFO:
1115 case R_0280A4_CB_COLOR1_INFO:
1116 case R_0280A8_CB_COLOR2_INFO:
1117 case R_0280AC_CB_COLOR3_INFO:
1118 case R_0280B0_CB_COLOR4_INFO:
1119 case R_0280B4_CB_COLOR5_INFO:
1120 case R_0280B8_CB_COLOR6_INFO:
1121 case R_0280BC_CB_COLOR7_INFO:
1122 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1123 radeon_cs_packet_next_is_pkt3_nop(p)) {
1124 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1125 if (r) {
1126 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1127 return -EINVAL;
1129 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1130 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1131 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1132 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1133 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1134 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1135 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1136 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1138 } else {
1139 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1140 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1142 track->cb_dirty = true;
1143 break;
1144 case R_028080_CB_COLOR0_VIEW:
1145 case R_028084_CB_COLOR1_VIEW:
1146 case R_028088_CB_COLOR2_VIEW:
1147 case R_02808C_CB_COLOR3_VIEW:
1148 case R_028090_CB_COLOR4_VIEW:
1149 case R_028094_CB_COLOR5_VIEW:
1150 case R_028098_CB_COLOR6_VIEW:
1151 case R_02809C_CB_COLOR7_VIEW:
1152 tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
1153 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1154 track->cb_dirty = true;
1155 break;
1156 case R_028060_CB_COLOR0_SIZE:
1157 case R_028064_CB_COLOR1_SIZE:
1158 case R_028068_CB_COLOR2_SIZE:
1159 case R_02806C_CB_COLOR3_SIZE:
1160 case R_028070_CB_COLOR4_SIZE:
1161 case R_028074_CB_COLOR5_SIZE:
1162 case R_028078_CB_COLOR6_SIZE:
1163 case R_02807C_CB_COLOR7_SIZE:
1164 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
1165 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
1166 track->cb_color_size_idx[tmp] = idx;
1167 track->cb_dirty = true;
1168 break;
1169 /* This register were added late, there is userspace
1170 * which does provide relocation for those but set
1171 * 0 offset. In order to avoid breaking old userspace
1172 * we detect this and set address to point to last
1173 * CB_COLOR0_BASE, note that if userspace doesn't set
1174 * CB_COLOR0_BASE before this register we will report
1175 * error. Old userspace always set CB_COLOR0_BASE
1176 * before any of this.
1178 case R_0280E0_CB_COLOR0_FRAG:
1179 case R_0280E4_CB_COLOR1_FRAG:
1180 case R_0280E8_CB_COLOR2_FRAG:
1181 case R_0280EC_CB_COLOR3_FRAG:
1182 case R_0280F0_CB_COLOR4_FRAG:
1183 case R_0280F4_CB_COLOR5_FRAG:
1184 case R_0280F8_CB_COLOR6_FRAG:
1185 case R_0280FC_CB_COLOR7_FRAG:
1186 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
1187 if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1188 if (!track->cb_color_base_last[tmp]) {
1189 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1190 return -EINVAL;
1192 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1193 track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
1194 ib[idx] = track->cb_color_base_last[tmp];
1195 } else {
1196 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1197 if (r) {
1198 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1199 return -EINVAL;
1201 track->cb_color_frag_bo[tmp] = reloc->robj;
1202 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
1203 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1205 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1206 track->cb_dirty = true;
1208 break;
1209 case R_0280C0_CB_COLOR0_TILE:
1210 case R_0280C4_CB_COLOR1_TILE:
1211 case R_0280C8_CB_COLOR2_TILE:
1212 case R_0280CC_CB_COLOR3_TILE:
1213 case R_0280D0_CB_COLOR4_TILE:
1214 case R_0280D4_CB_COLOR5_TILE:
1215 case R_0280D8_CB_COLOR6_TILE:
1216 case R_0280DC_CB_COLOR7_TILE:
1217 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
1218 if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1219 if (!track->cb_color_base_last[tmp]) {
1220 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1221 return -EINVAL;
1223 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1224 track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
1225 ib[idx] = track->cb_color_base_last[tmp];
1226 } else {
1227 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1228 if (r) {
1229 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1230 return -EINVAL;
1232 track->cb_color_tile_bo[tmp] = reloc->robj;
1233 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
1234 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1236 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1237 track->cb_dirty = true;
1239 break;
1240 case R_028100_CB_COLOR0_MASK:
1241 case R_028104_CB_COLOR1_MASK:
1242 case R_028108_CB_COLOR2_MASK:
1243 case R_02810C_CB_COLOR3_MASK:
1244 case R_028110_CB_COLOR4_MASK:
1245 case R_028114_CB_COLOR5_MASK:
1246 case R_028118_CB_COLOR6_MASK:
1247 case R_02811C_CB_COLOR7_MASK:
1248 tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
1249 track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
1250 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1251 track->cb_dirty = true;
1253 break;
1254 case CB_COLOR0_BASE:
1255 case CB_COLOR1_BASE:
1256 case CB_COLOR2_BASE:
1257 case CB_COLOR3_BASE:
1258 case CB_COLOR4_BASE:
1259 case CB_COLOR5_BASE:
1260 case CB_COLOR6_BASE:
1261 case CB_COLOR7_BASE:
1262 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1263 if (r) {
1264 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1265 "0x%04X\n", reg);
1266 return -EINVAL;
1268 tmp = (reg - CB_COLOR0_BASE) / 4;
1269 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1270 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1271 track->cb_color_base_last[tmp] = ib[idx];
1272 track->cb_color_bo[tmp] = reloc->robj;
1273 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
1274 track->cb_dirty = true;
1275 break;
1276 case DB_DEPTH_BASE:
1277 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1278 if (r) {
1279 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1280 "0x%04X\n", reg);
1281 return -EINVAL;
1283 track->db_offset = radeon_get_ib_value(p, idx) << 8;
1284 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1285 track->db_bo = reloc->robj;
1286 track->db_bo_mc = reloc->lobj.gpu_offset;
1287 track->db_dirty = true;
1288 break;
1289 case DB_HTILE_DATA_BASE:
1290 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1291 if (r) {
1292 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1293 "0x%04X\n", reg);
1294 return -EINVAL;
1296 track->htile_offset = radeon_get_ib_value(p, idx) << 8;
1297 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1298 track->htile_bo = reloc->robj;
1299 track->db_dirty = true;
1300 break;
1301 case DB_HTILE_SURFACE:
1302 track->htile_surface = radeon_get_ib_value(p, idx);
1303 /* force 8x8 htile width and height */
1304 ib[idx] |= 3;
1305 track->db_dirty = true;
1306 break;
1307 case SQ_PGM_START_FS:
1308 case SQ_PGM_START_ES:
1309 case SQ_PGM_START_VS:
1310 case SQ_PGM_START_GS:
1311 case SQ_PGM_START_PS:
1312 case SQ_ALU_CONST_CACHE_GS_0:
1313 case SQ_ALU_CONST_CACHE_GS_1:
1314 case SQ_ALU_CONST_CACHE_GS_2:
1315 case SQ_ALU_CONST_CACHE_GS_3:
1316 case SQ_ALU_CONST_CACHE_GS_4:
1317 case SQ_ALU_CONST_CACHE_GS_5:
1318 case SQ_ALU_CONST_CACHE_GS_6:
1319 case SQ_ALU_CONST_CACHE_GS_7:
1320 case SQ_ALU_CONST_CACHE_GS_8:
1321 case SQ_ALU_CONST_CACHE_GS_9:
1322 case SQ_ALU_CONST_CACHE_GS_10:
1323 case SQ_ALU_CONST_CACHE_GS_11:
1324 case SQ_ALU_CONST_CACHE_GS_12:
1325 case SQ_ALU_CONST_CACHE_GS_13:
1326 case SQ_ALU_CONST_CACHE_GS_14:
1327 case SQ_ALU_CONST_CACHE_GS_15:
1328 case SQ_ALU_CONST_CACHE_PS_0:
1329 case SQ_ALU_CONST_CACHE_PS_1:
1330 case SQ_ALU_CONST_CACHE_PS_2:
1331 case SQ_ALU_CONST_CACHE_PS_3:
1332 case SQ_ALU_CONST_CACHE_PS_4:
1333 case SQ_ALU_CONST_CACHE_PS_5:
1334 case SQ_ALU_CONST_CACHE_PS_6:
1335 case SQ_ALU_CONST_CACHE_PS_7:
1336 case SQ_ALU_CONST_CACHE_PS_8:
1337 case SQ_ALU_CONST_CACHE_PS_9:
1338 case SQ_ALU_CONST_CACHE_PS_10:
1339 case SQ_ALU_CONST_CACHE_PS_11:
1340 case SQ_ALU_CONST_CACHE_PS_12:
1341 case SQ_ALU_CONST_CACHE_PS_13:
1342 case SQ_ALU_CONST_CACHE_PS_14:
1343 case SQ_ALU_CONST_CACHE_PS_15:
1344 case SQ_ALU_CONST_CACHE_VS_0:
1345 case SQ_ALU_CONST_CACHE_VS_1:
1346 case SQ_ALU_CONST_CACHE_VS_2:
1347 case SQ_ALU_CONST_CACHE_VS_3:
1348 case SQ_ALU_CONST_CACHE_VS_4:
1349 case SQ_ALU_CONST_CACHE_VS_5:
1350 case SQ_ALU_CONST_CACHE_VS_6:
1351 case SQ_ALU_CONST_CACHE_VS_7:
1352 case SQ_ALU_CONST_CACHE_VS_8:
1353 case SQ_ALU_CONST_CACHE_VS_9:
1354 case SQ_ALU_CONST_CACHE_VS_10:
1355 case SQ_ALU_CONST_CACHE_VS_11:
1356 case SQ_ALU_CONST_CACHE_VS_12:
1357 case SQ_ALU_CONST_CACHE_VS_13:
1358 case SQ_ALU_CONST_CACHE_VS_14:
1359 case SQ_ALU_CONST_CACHE_VS_15:
1360 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1361 if (r) {
1362 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1363 "0x%04X\n", reg);
1364 return -EINVAL;
1366 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1367 break;
1368 case SX_MEMORY_EXPORT_BASE:
1369 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1370 if (r) {
1371 dev_warn(p->dev, "bad SET_CONFIG_REG "
1372 "0x%04X\n", reg);
1373 return -EINVAL;
1375 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1376 break;
1377 case SX_MISC:
1378 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1379 break;
1380 default:
1381 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1382 return -EINVAL;
1384 return 0;
1387 unsigned r600_mip_minify(unsigned size, unsigned level)
1389 unsigned val;
1391 val = max(1U, size >> level);
1392 if (level > 0)
1393 val = roundup_pow_of_two(val);
1394 return val;
1397 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1398 unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
1399 unsigned block_align, unsigned height_align, unsigned base_align,
1400 unsigned *l0_size, unsigned *mipmap_size)
1402 unsigned offset, i, level;
1403 unsigned width, height, depth, size;
1404 unsigned blocksize;
1405 unsigned nbx, nby;
1406 unsigned nlevels = llevel - blevel + 1;
1408 *l0_size = -1;
1409 blocksize = r600_fmt_get_blocksize(format);
1411 w0 = r600_mip_minify(w0, 0);
1412 h0 = r600_mip_minify(h0, 0);
1413 d0 = r600_mip_minify(d0, 0);
1414 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1415 width = r600_mip_minify(w0, i);
1416 nbx = r600_fmt_get_nblocksx(format, width);
1418 nbx = round_up(nbx, block_align);
1420 height = r600_mip_minify(h0, i);
1421 nby = r600_fmt_get_nblocksy(format, height);
1422 nby = round_up(nby, height_align);
1424 depth = r600_mip_minify(d0, i);
1426 size = nbx * nby * blocksize * nsamples;
1427 if (nfaces)
1428 size *= nfaces;
1429 else
1430 size *= depth;
1432 if (i == 0)
1433 *l0_size = size;
1435 if (i == 0 || i == 1)
1436 offset = round_up(offset, base_align);
1438 offset += size;
1440 *mipmap_size = offset;
1441 if (llevel == 0)
1442 *mipmap_size = *l0_size;
1443 if (!blevel)
1444 *mipmap_size -= *l0_size;
1448 * r600_check_texture_resource() - check if register is authorized or not
1449 * @p: parser structure holding parsing context
1450 * @idx: index into the cs buffer
1451 * @texture: texture's bo structure
1452 * @mipmap: mipmap's bo structure
1454 * This function will check that the resource has valid field and that
1455 * the texture and mipmap bo object are big enough to cover this resource.
1457 static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1458 struct radeon_bo *texture,
1459 struct radeon_bo *mipmap,
1460 u64 base_offset,
1461 u64 mip_offset,
1462 u32 tiling_flags)
1464 struct r600_cs_track *track = p->track;
1465 u32 dim, nfaces, llevel, blevel, w0, h0, d0;
1466 u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
1467 u32 height_align, pitch, pitch_align, depth_align;
1468 u32 barray, larray;
1469 u64 base_align;
1470 struct array_mode_checker array_check;
1471 u32 format;
1472 bool is_array;
1474 /* on legacy kernel we don't perform advanced check */
1475 if (p->rdev == NULL)
1476 return 0;
1478 /* convert to bytes */
1479 base_offset <<= 8;
1480 mip_offset <<= 8;
1482 word0 = radeon_get_ib_value(p, idx + 0);
1483 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1484 if (tiling_flags & RADEON_TILING_MACRO)
1485 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1486 else if (tiling_flags & RADEON_TILING_MICRO)
1487 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1489 word1 = radeon_get_ib_value(p, idx + 1);
1490 word2 = radeon_get_ib_value(p, idx + 2) << 8;
1491 word3 = radeon_get_ib_value(p, idx + 3) << 8;
1492 word4 = radeon_get_ib_value(p, idx + 4);
1493 word5 = radeon_get_ib_value(p, idx + 5);
1494 dim = G_038000_DIM(word0);
1495 w0 = G_038000_TEX_WIDTH(word0) + 1;
1496 pitch = (G_038000_PITCH(word0) + 1) * 8;
1497 h0 = G_038004_TEX_HEIGHT(word1) + 1;
1498 d0 = G_038004_TEX_DEPTH(word1);
1499 format = G_038004_DATA_FORMAT(word1);
1500 blevel = G_038010_BASE_LEVEL(word4);
1501 llevel = G_038014_LAST_LEVEL(word5);
1502 /* pitch in texels */
1503 array_check.array_mode = G_038000_TILE_MODE(word0);
1504 array_check.group_size = track->group_size;
1505 array_check.nbanks = track->nbanks;
1506 array_check.npipes = track->npipes;
1507 array_check.nsamples = 1;
1508 array_check.blocksize = r600_fmt_get_blocksize(format);
1509 nfaces = 1;
1510 is_array = false;
1511 switch (dim) {
1512 case V_038000_SQ_TEX_DIM_1D:
1513 case V_038000_SQ_TEX_DIM_2D:
1514 case V_038000_SQ_TEX_DIM_3D:
1515 break;
1516 case V_038000_SQ_TEX_DIM_CUBEMAP:
1517 if (p->family >= CHIP_RV770)
1518 nfaces = 8;
1519 else
1520 nfaces = 6;
1521 break;
1522 case V_038000_SQ_TEX_DIM_1D_ARRAY:
1523 case V_038000_SQ_TEX_DIM_2D_ARRAY:
1524 is_array = true;
1525 break;
1526 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1527 is_array = true;
1528 /* fall through */
1529 case V_038000_SQ_TEX_DIM_2D_MSAA:
1530 array_check.nsamples = 1 << llevel;
1531 llevel = 0;
1532 break;
1533 default:
1534 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1535 return -EINVAL;
1537 if (!r600_fmt_is_valid_texture(format, p->family)) {
1538 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1539 __func__, __LINE__, format);
1540 return -EINVAL;
1543 if (r600_get_array_mode_alignment(&array_check,
1544 &pitch_align, &height_align, &depth_align, &base_align)) {
1545 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
1546 __func__, __LINE__, G_038000_TILE_MODE(word0));
1547 return -EINVAL;
1550 /* XXX check height as well... */
1552 if (!IS_ALIGNED(pitch, pitch_align)) {
1553 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1554 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1555 return -EINVAL;
1557 if (!IS_ALIGNED(base_offset, base_align)) {
1558 dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1559 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1560 return -EINVAL;
1562 if (!IS_ALIGNED(mip_offset, base_align)) {
1563 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1564 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1565 return -EINVAL;
1568 if (blevel > llevel) {
1569 dev_warn(p->dev, "texture blevel %d > llevel %d\n",
1570 blevel, llevel);
1572 if (is_array) {
1573 barray = G_038014_BASE_ARRAY(word5);
1574 larray = G_038014_LAST_ARRAY(word5);
1576 nfaces = larray - barray + 1;
1578 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
1579 pitch_align, height_align, base_align,
1580 &l0_size, &mipmap_size);
1581 /* using get ib will give us the offset into the texture bo */
1582 if ((l0_size + word2) > radeon_bo_size(texture)) {
1583 dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
1584 w0, h0, pitch_align, height_align,
1585 array_check.array_mode, format, word2,
1586 l0_size, radeon_bo_size(texture));
1587 dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
1588 return -EINVAL;
1590 /* using get ib will give us the offset into the mipmap bo */
1591 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1592 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1593 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1595 return 0;
1598 static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1600 u32 m, i;
1602 i = (reg >> 7);
1603 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
1604 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1605 return false;
1607 m = 1 << ((reg >> 2) & 31);
1608 if (!(r600_reg_safe_bm[i] & m))
1609 return true;
1610 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1611 return false;
1614 static int r600_packet3_check(struct radeon_cs_parser *p,
1615 struct radeon_cs_packet *pkt)
1617 struct radeon_cs_reloc *reloc;
1618 struct r600_cs_track *track;
1619 volatile u32 *ib;
1620 unsigned idx;
1621 unsigned i;
1622 unsigned start_reg, end_reg, reg;
1623 int r;
1624 u32 idx_value;
1626 track = (struct r600_cs_track *)p->track;
1627 ib = p->ib.ptr;
1628 idx = pkt->idx + 1;
1629 idx_value = radeon_get_ib_value(p, idx);
1631 switch (pkt->opcode) {
1632 case PACKET3_SET_PREDICATION:
1634 int pred_op;
1635 int tmp;
1636 uint64_t offset;
1638 if (pkt->count != 1) {
1639 DRM_ERROR("bad SET PREDICATION\n");
1640 return -EINVAL;
1643 tmp = radeon_get_ib_value(p, idx + 1);
1644 pred_op = (tmp >> 16) & 0x7;
1646 /* for the clear predicate operation */
1647 if (pred_op == 0)
1648 return 0;
1650 if (pred_op > 2) {
1651 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1652 return -EINVAL;
1655 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1656 if (r) {
1657 DRM_ERROR("bad SET PREDICATION\n");
1658 return -EINVAL;
1661 offset = reloc->lobj.gpu_offset +
1662 (idx_value & 0xfffffff0) +
1663 ((u64)(tmp & 0xff) << 32);
1665 ib[idx + 0] = offset;
1666 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1668 break;
1670 case PACKET3_START_3D_CMDBUF:
1671 if (p->family >= CHIP_RV770 || pkt->count) {
1672 DRM_ERROR("bad START_3D\n");
1673 return -EINVAL;
1675 break;
1676 case PACKET3_CONTEXT_CONTROL:
1677 if (pkt->count != 1) {
1678 DRM_ERROR("bad CONTEXT_CONTROL\n");
1679 return -EINVAL;
1681 break;
1682 case PACKET3_INDEX_TYPE:
1683 case PACKET3_NUM_INSTANCES:
1684 if (pkt->count) {
1685 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1686 return -EINVAL;
1688 break;
1689 case PACKET3_DRAW_INDEX:
1691 uint64_t offset;
1692 if (pkt->count != 3) {
1693 DRM_ERROR("bad DRAW_INDEX\n");
1694 return -EINVAL;
1696 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1697 if (r) {
1698 DRM_ERROR("bad DRAW_INDEX\n");
1699 return -EINVAL;
1702 offset = reloc->lobj.gpu_offset +
1703 idx_value +
1704 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1706 ib[idx+0] = offset;
1707 ib[idx+1] = upper_32_bits(offset) & 0xff;
1709 r = r600_cs_track_check(p);
1710 if (r) {
1711 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1712 return r;
1714 break;
1716 case PACKET3_DRAW_INDEX_AUTO:
1717 if (pkt->count != 1) {
1718 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1719 return -EINVAL;
1721 r = r600_cs_track_check(p);
1722 if (r) {
1723 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1724 return r;
1726 break;
1727 case PACKET3_DRAW_INDEX_IMMD_BE:
1728 case PACKET3_DRAW_INDEX_IMMD:
1729 if (pkt->count < 2) {
1730 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1731 return -EINVAL;
1733 r = r600_cs_track_check(p);
1734 if (r) {
1735 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1736 return r;
1738 break;
1739 case PACKET3_WAIT_REG_MEM:
1740 if (pkt->count != 5) {
1741 DRM_ERROR("bad WAIT_REG_MEM\n");
1742 return -EINVAL;
1744 /* bit 4 is reg (0) or mem (1) */
1745 if (idx_value & 0x10) {
1746 uint64_t offset;
1748 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1749 if (r) {
1750 DRM_ERROR("bad WAIT_REG_MEM\n");
1751 return -EINVAL;
1754 offset = reloc->lobj.gpu_offset +
1755 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
1756 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1758 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
1759 ib[idx+2] = upper_32_bits(offset) & 0xff;
1760 } else if (idx_value & 0x100) {
1761 DRM_ERROR("cannot use PFP on REG wait\n");
1762 return -EINVAL;
1764 break;
1765 case PACKET3_CP_DMA:
1767 u32 command, size;
1768 u64 offset, tmp;
1769 if (pkt->count != 4) {
1770 DRM_ERROR("bad CP DMA\n");
1771 return -EINVAL;
1773 command = radeon_get_ib_value(p, idx+4);
1774 size = command & 0x1fffff;
1775 if (command & PACKET3_CP_DMA_CMD_SAS) {
1776 /* src address space is register */
1777 DRM_ERROR("CP DMA SAS not supported\n");
1778 return -EINVAL;
1779 } else {
1780 if (command & PACKET3_CP_DMA_CMD_SAIC) {
1781 DRM_ERROR("CP DMA SAIC only supported for registers\n");
1782 return -EINVAL;
1784 /* src address space is memory */
1785 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1786 if (r) {
1787 DRM_ERROR("bad CP DMA SRC\n");
1788 return -EINVAL;
1791 tmp = radeon_get_ib_value(p, idx) +
1792 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1794 offset = reloc->lobj.gpu_offset + tmp;
1796 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1797 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
1798 tmp + size, radeon_bo_size(reloc->robj));
1799 return -EINVAL;
1802 ib[idx] = offset;
1803 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1805 if (command & PACKET3_CP_DMA_CMD_DAS) {
1806 /* dst address space is register */
1807 DRM_ERROR("CP DMA DAS not supported\n");
1808 return -EINVAL;
1809 } else {
1810 /* dst address space is memory */
1811 if (command & PACKET3_CP_DMA_CMD_DAIC) {
1812 DRM_ERROR("CP DMA DAIC only supported for registers\n");
1813 return -EINVAL;
1815 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1816 if (r) {
1817 DRM_ERROR("bad CP DMA DST\n");
1818 return -EINVAL;
1821 tmp = radeon_get_ib_value(p, idx+2) +
1822 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
1824 offset = reloc->lobj.gpu_offset + tmp;
1826 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1827 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
1828 tmp + size, radeon_bo_size(reloc->robj));
1829 return -EINVAL;
1832 ib[idx+2] = offset;
1833 ib[idx+3] = upper_32_bits(offset) & 0xff;
1835 break;
1837 case PACKET3_SURFACE_SYNC:
1838 if (pkt->count != 3) {
1839 DRM_ERROR("bad SURFACE_SYNC\n");
1840 return -EINVAL;
1842 /* 0xffffffff/0x0 is flush all cache flag */
1843 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1844 radeon_get_ib_value(p, idx + 2) != 0) {
1845 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1846 if (r) {
1847 DRM_ERROR("bad SURFACE_SYNC\n");
1848 return -EINVAL;
1850 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1852 break;
1853 case PACKET3_EVENT_WRITE:
1854 if (pkt->count != 2 && pkt->count != 0) {
1855 DRM_ERROR("bad EVENT_WRITE\n");
1856 return -EINVAL;
1858 if (pkt->count) {
1859 uint64_t offset;
1861 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1862 if (r) {
1863 DRM_ERROR("bad EVENT_WRITE\n");
1864 return -EINVAL;
1866 offset = reloc->lobj.gpu_offset +
1867 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
1868 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1870 ib[idx+1] = offset & 0xfffffff8;
1871 ib[idx+2] = upper_32_bits(offset) & 0xff;
1873 break;
1874 case PACKET3_EVENT_WRITE_EOP:
1876 uint64_t offset;
1878 if (pkt->count != 4) {
1879 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1880 return -EINVAL;
1882 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1883 if (r) {
1884 DRM_ERROR("bad EVENT_WRITE\n");
1885 return -EINVAL;
1888 offset = reloc->lobj.gpu_offset +
1889 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
1890 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1892 ib[idx+1] = offset & 0xfffffffc;
1893 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1894 break;
1896 case PACKET3_SET_CONFIG_REG:
1897 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1898 end_reg = 4 * pkt->count + start_reg - 4;
1899 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
1900 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1901 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1902 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1903 return -EINVAL;
1905 for (i = 0; i < pkt->count; i++) {
1906 reg = start_reg + (4 * i);
1907 r = r600_cs_check_reg(p, reg, idx+1+i);
1908 if (r)
1909 return r;
1911 break;
1912 case PACKET3_SET_CONTEXT_REG:
1913 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
1914 end_reg = 4 * pkt->count + start_reg - 4;
1915 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
1916 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1917 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1918 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1919 return -EINVAL;
1921 for (i = 0; i < pkt->count; i++) {
1922 reg = start_reg + (4 * i);
1923 r = r600_cs_check_reg(p, reg, idx+1+i);
1924 if (r)
1925 return r;
1927 break;
1928 case PACKET3_SET_RESOURCE:
1929 if (pkt->count % 7) {
1930 DRM_ERROR("bad SET_RESOURCE\n");
1931 return -EINVAL;
1933 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
1934 end_reg = 4 * pkt->count + start_reg - 4;
1935 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
1936 (start_reg >= PACKET3_SET_RESOURCE_END) ||
1937 (end_reg >= PACKET3_SET_RESOURCE_END)) {
1938 DRM_ERROR("bad SET_RESOURCE\n");
1939 return -EINVAL;
1941 for (i = 0; i < (pkt->count / 7); i++) {
1942 struct radeon_bo *texture, *mipmap;
1943 u32 size, offset, base_offset, mip_offset;
1945 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
1946 case SQ_TEX_VTX_VALID_TEXTURE:
1947 /* tex base */
1948 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1949 if (r) {
1950 DRM_ERROR("bad SET_RESOURCE\n");
1951 return -EINVAL;
1953 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1954 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1955 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1956 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1957 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1958 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1960 texture = reloc->robj;
1961 /* tex mip base */
1962 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1963 if (r) {
1964 DRM_ERROR("bad SET_RESOURCE\n");
1965 return -EINVAL;
1967 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1968 mipmap = reloc->robj;
1969 r = r600_check_texture_resource(p, idx+(i*7)+1,
1970 texture, mipmap,
1971 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
1972 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
1973 reloc->lobj.tiling_flags);
1974 if (r)
1975 return r;
1976 ib[idx+1+(i*7)+2] += base_offset;
1977 ib[idx+1+(i*7)+3] += mip_offset;
1978 break;
1979 case SQ_TEX_VTX_VALID_BUFFER:
1981 uint64_t offset64;
1982 /* vtx base */
1983 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1984 if (r) {
1985 DRM_ERROR("bad SET_RESOURCE\n");
1986 return -EINVAL;
1988 offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1989 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
1990 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1991 /* force size to size of the buffer */
1992 dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
1993 size + offset, radeon_bo_size(reloc->robj));
1994 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
1997 offset64 = reloc->lobj.gpu_offset + offset;
1998 ib[idx+1+(i*8)+0] = offset64;
1999 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2000 (upper_32_bits(offset64) & 0xff);
2001 break;
2003 case SQ_TEX_VTX_INVALID_TEXTURE:
2004 case SQ_TEX_VTX_INVALID_BUFFER:
2005 default:
2006 DRM_ERROR("bad SET_RESOURCE\n");
2007 return -EINVAL;
2010 break;
2011 case PACKET3_SET_ALU_CONST:
2012 if (track->sq_config & DX9_CONSTS) {
2013 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
2014 end_reg = 4 * pkt->count + start_reg - 4;
2015 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
2016 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
2017 (end_reg >= PACKET3_SET_ALU_CONST_END)) {
2018 DRM_ERROR("bad SET_ALU_CONST\n");
2019 return -EINVAL;
2022 break;
2023 case PACKET3_SET_BOOL_CONST:
2024 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
2025 end_reg = 4 * pkt->count + start_reg - 4;
2026 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
2027 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2028 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2029 DRM_ERROR("bad SET_BOOL_CONST\n");
2030 return -EINVAL;
2032 break;
2033 case PACKET3_SET_LOOP_CONST:
2034 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
2035 end_reg = 4 * pkt->count + start_reg - 4;
2036 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
2037 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2038 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2039 DRM_ERROR("bad SET_LOOP_CONST\n");
2040 return -EINVAL;
2042 break;
2043 case PACKET3_SET_CTL_CONST:
2044 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
2045 end_reg = 4 * pkt->count + start_reg - 4;
2046 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
2047 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2048 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2049 DRM_ERROR("bad SET_CTL_CONST\n");
2050 return -EINVAL;
2052 break;
2053 case PACKET3_SET_SAMPLER:
2054 if (pkt->count % 3) {
2055 DRM_ERROR("bad SET_SAMPLER\n");
2056 return -EINVAL;
2058 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
2059 end_reg = 4 * pkt->count + start_reg - 4;
2060 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
2061 (start_reg >= PACKET3_SET_SAMPLER_END) ||
2062 (end_reg >= PACKET3_SET_SAMPLER_END)) {
2063 DRM_ERROR("bad SET_SAMPLER\n");
2064 return -EINVAL;
2066 break;
2067 case PACKET3_STRMOUT_BASE_UPDATE:
2068 /* RS780 and RS880 also need this */
2069 if (p->family < CHIP_RS780) {
2070 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
2071 return -EINVAL;
2073 if (pkt->count != 1) {
2074 DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
2075 return -EINVAL;
2077 if (idx_value > 3) {
2078 DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
2079 return -EINVAL;
2082 u64 offset;
2084 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2085 if (r) {
2086 DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
2087 return -EINVAL;
2090 if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
2091 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
2092 return -EINVAL;
2095 offset = radeon_get_ib_value(p, idx+1) << 8;
2096 if (offset != track->vgt_strmout_bo_offset[idx_value]) {
2097 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
2098 offset, track->vgt_strmout_bo_offset[idx_value]);
2099 return -EINVAL;
2102 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2103 DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
2104 offset + 4, radeon_bo_size(reloc->robj));
2105 return -EINVAL;
2107 ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2109 break;
2110 case PACKET3_SURFACE_BASE_UPDATE:
2111 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
2112 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2113 return -EINVAL;
2115 if (pkt->count) {
2116 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2117 return -EINVAL;
2119 break;
2120 case PACKET3_STRMOUT_BUFFER_UPDATE:
2121 if (pkt->count != 4) {
2122 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2123 return -EINVAL;
2125 /* Updating memory at DST_ADDRESS. */
2126 if (idx_value & 0x1) {
2127 u64 offset;
2128 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2129 if (r) {
2130 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2131 return -EINVAL;
2133 offset = radeon_get_ib_value(p, idx+1);
2134 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2135 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2136 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2137 offset + 4, radeon_bo_size(reloc->robj));
2138 return -EINVAL;
2140 offset += reloc->lobj.gpu_offset;
2141 ib[idx+1] = offset;
2142 ib[idx+2] = upper_32_bits(offset) & 0xff;
2144 /* Reading data from SRC_ADDRESS. */
2145 if (((idx_value >> 1) & 0x3) == 2) {
2146 u64 offset;
2147 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2148 if (r) {
2149 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2150 return -EINVAL;
2152 offset = radeon_get_ib_value(p, idx+3);
2153 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2154 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2155 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2156 offset + 4, radeon_bo_size(reloc->robj));
2157 return -EINVAL;
2159 offset += reloc->lobj.gpu_offset;
2160 ib[idx+3] = offset;
2161 ib[idx+4] = upper_32_bits(offset) & 0xff;
2163 break;
2164 case PACKET3_MEM_WRITE:
2166 u64 offset;
2168 if (pkt->count != 3) {
2169 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2170 return -EINVAL;
2172 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2173 if (r) {
2174 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2175 return -EINVAL;
2177 offset = radeon_get_ib_value(p, idx+0);
2178 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2179 if (offset & 0x7) {
2180 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2181 return -EINVAL;
2183 if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2184 DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
2185 offset + 8, radeon_bo_size(reloc->robj));
2186 return -EINVAL;
2188 offset += reloc->lobj.gpu_offset;
2189 ib[idx+0] = offset;
2190 ib[idx+1] = upper_32_bits(offset) & 0xff;
2191 break;
2193 case PACKET3_COPY_DW:
2194 if (pkt->count != 4) {
2195 DRM_ERROR("bad COPY_DW (invalid count)\n");
2196 return -EINVAL;
2198 if (idx_value & 0x1) {
2199 u64 offset;
2200 /* SRC is memory. */
2201 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2202 if (r) {
2203 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2204 return -EINVAL;
2206 offset = radeon_get_ib_value(p, idx+1);
2207 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2208 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2209 DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2210 offset + 4, radeon_bo_size(reloc->robj));
2211 return -EINVAL;
2213 offset += reloc->lobj.gpu_offset;
2214 ib[idx+1] = offset;
2215 ib[idx+2] = upper_32_bits(offset) & 0xff;
2216 } else {
2217 /* SRC is a reg. */
2218 reg = radeon_get_ib_value(p, idx+1) << 2;
2219 if (!r600_is_safe_reg(p, reg, idx+1))
2220 return -EINVAL;
2222 if (idx_value & 0x2) {
2223 u64 offset;
2224 /* DST is memory. */
2225 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2226 if (r) {
2227 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2228 return -EINVAL;
2230 offset = radeon_get_ib_value(p, idx+3);
2231 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2232 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2233 DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2234 offset + 4, radeon_bo_size(reloc->robj));
2235 return -EINVAL;
2237 offset += reloc->lobj.gpu_offset;
2238 ib[idx+3] = offset;
2239 ib[idx+4] = upper_32_bits(offset) & 0xff;
2240 } else {
2241 /* DST is a reg. */
2242 reg = radeon_get_ib_value(p, idx+3) << 2;
2243 if (!r600_is_safe_reg(p, reg, idx+3))
2244 return -EINVAL;
2246 break;
2247 case PACKET3_NOP:
2248 break;
2249 default:
2250 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2251 return -EINVAL;
2253 return 0;
2256 int r600_cs_parse(struct radeon_cs_parser *p)
2258 struct radeon_cs_packet pkt;
2259 struct r600_cs_track *track;
2260 int r;
2262 if (p->track == NULL) {
2263 /* initialize tracker, we are in kms */
2264 track = kzalloc(sizeof(*track), GFP_KERNEL);
2265 if (track == NULL)
2266 return -ENOMEM;
2267 r600_cs_track_init(track);
2268 if (p->rdev->family < CHIP_RV770) {
2269 track->npipes = p->rdev->config.r600.tiling_npipes;
2270 track->nbanks = p->rdev->config.r600.tiling_nbanks;
2271 track->group_size = p->rdev->config.r600.tiling_group_size;
2272 } else if (p->rdev->family <= CHIP_RV740) {
2273 track->npipes = p->rdev->config.rv770.tiling_npipes;
2274 track->nbanks = p->rdev->config.rv770.tiling_nbanks;
2275 track->group_size = p->rdev->config.rv770.tiling_group_size;
2277 p->track = track;
2279 do {
2280 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2281 if (r) {
2282 kfree(p->track);
2283 p->track = NULL;
2284 return r;
2286 p->idx += pkt.count + 2;
2287 switch (pkt.type) {
2288 case RADEON_PACKET_TYPE0:
2289 r = r600_cs_parse_packet0(p, &pkt);
2290 break;
2291 case RADEON_PACKET_TYPE2:
2292 break;
2293 case RADEON_PACKET_TYPE3:
2294 r = r600_packet3_check(p, &pkt);
2295 break;
2296 default:
2297 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2298 kfree(p->track);
2299 p->track = NULL;
2300 return -EINVAL;
2302 if (r) {
2303 kfree(p->track);
2304 p->track = NULL;
2305 return r;
2307 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2308 #if 0
2309 for (r = 0; r < p->ib.length_dw; r++) {
2310 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
2311 mdelay(1);
2313 #endif
2314 kfree(p->track);
2315 p->track = NULL;
2316 return 0;
2319 #ifdef CONFIG_DRM_RADEON_UMS
2322 * cs_parser_fini() - clean parser states
2323 * @parser: parser structure holding parsing context.
2324 * @error: error number
2326 * If error is set than unvalidate buffer, otherwise just free memory
2327 * used by parsing context.
2329 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2331 unsigned i;
2333 kfree(parser->relocs);
2334 for (i = 0; i < parser->nchunks; i++) {
2335 kfree(parser->chunks[i].kdata);
2336 if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
2337 kfree(parser->chunks[i].kpage[0]);
2338 kfree(parser->chunks[i].kpage[1]);
2341 kfree(parser->chunks);
2342 kfree(parser->chunks_array);
2345 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
2347 if (p->chunk_relocs_idx == -1) {
2348 return 0;
2350 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
2351 if (p->relocs == NULL) {
2352 return -ENOMEM;
2354 return 0;
2357 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2358 unsigned family, u32 *ib, int *l)
2360 struct radeon_cs_parser parser;
2361 struct radeon_cs_chunk *ib_chunk;
2362 struct r600_cs_track *track;
2363 int r;
2365 /* initialize tracker */
2366 track = kzalloc(sizeof(*track), GFP_KERNEL);
2367 if (track == NULL)
2368 return -ENOMEM;
2369 r600_cs_track_init(track);
2370 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
2371 /* initialize parser */
2372 memset(&parser, 0, sizeof(struct radeon_cs_parser));
2373 parser.filp = filp;
2374 parser.dev = &dev->pdev->dev;
2375 parser.rdev = NULL;
2376 parser.family = family;
2377 parser.track = track;
2378 parser.ib.ptr = ib;
2379 r = radeon_cs_parser_init(&parser, data);
2380 if (r) {
2381 DRM_ERROR("Failed to initialize parser !\n");
2382 r600_cs_parser_fini(&parser, r);
2383 return r;
2385 r = r600_cs_parser_relocs_legacy(&parser);
2386 if (r) {
2387 DRM_ERROR("Failed to parse relocation !\n");
2388 r600_cs_parser_fini(&parser, r);
2389 return r;
2391 /* Copy the packet into the IB, the parser will read from the
2392 * input memory (cached) and write to the IB (which can be
2393 * uncached). */
2394 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
2395 parser.ib.length_dw = ib_chunk->length_dw;
2396 *l = parser.ib.length_dw;
2397 r = r600_cs_parse(&parser);
2398 if (r) {
2399 DRM_ERROR("Invalid command stream !\n");
2400 r600_cs_parser_fini(&parser, r);
2401 return r;
2403 r = radeon_cs_finish_pages(&parser);
2404 if (r) {
2405 DRM_ERROR("Invalid command stream !\n");
2406 r600_cs_parser_fini(&parser, r);
2407 return r;
2409 r600_cs_parser_fini(&parser, r);
2410 return r;
2413 void r600_cs_legacy_init(void)
2415 r600_nomm = 1;
2418 #endif
2421 * DMA
2424 * r600_dma_cs_next_reloc() - parse next reloc
2425 * @p: parser structure holding parsing context.
2426 * @cs_reloc: reloc informations
2428 * Return the next reloc, do bo validation and compute
2429 * GPU offset using the provided start.
2431 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2432 struct radeon_cs_reloc **cs_reloc)
2434 struct radeon_cs_chunk *relocs_chunk;
2435 unsigned idx;
2437 *cs_reloc = NULL;
2438 if (p->chunk_relocs_idx == -1) {
2439 DRM_ERROR("No relocation chunk !\n");
2440 return -EINVAL;
2442 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
2443 idx = p->dma_reloc_idx;
2444 if (idx >= p->nrelocs) {
2445 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2446 idx, p->nrelocs);
2447 return -EINVAL;
2449 *cs_reloc = p->relocs_ptr[idx];
2450 p->dma_reloc_idx++;
2451 return 0;
2454 #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2455 #define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
2456 #define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2459 * r600_dma_cs_parse() - parse the DMA IB
2460 * @p: parser structure holding parsing context.
2462 * Parses the DMA IB from the CS ioctl and updates
2463 * the GPU addresses based on the reloc information and
2464 * checks for errors. (R6xx-R7xx)
2465 * Returns 0 for success and an error on failure.
2467 int r600_dma_cs_parse(struct radeon_cs_parser *p)
2469 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2470 struct radeon_cs_reloc *src_reloc, *dst_reloc;
2471 u32 header, cmd, count, tiled;
2472 volatile u32 *ib = p->ib.ptr;
2473 u32 idx, idx_value;
2474 u64 src_offset, dst_offset;
2475 int r;
2477 do {
2478 if (p->idx >= ib_chunk->length_dw) {
2479 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2480 p->idx, ib_chunk->length_dw);
2481 return -EINVAL;
2483 idx = p->idx;
2484 header = radeon_get_ib_value(p, idx);
2485 cmd = GET_DMA_CMD(header);
2486 count = GET_DMA_COUNT(header);
2487 tiled = GET_DMA_T(header);
2489 switch (cmd) {
2490 case DMA_PACKET_WRITE:
2491 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2492 if (r) {
2493 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2494 return -EINVAL;
2496 if (tiled) {
2497 dst_offset = radeon_get_ib_value(p, idx+1);
2498 dst_offset <<= 8;
2500 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2501 p->idx += count + 5;
2502 } else {
2503 dst_offset = radeon_get_ib_value(p, idx+1);
2504 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2506 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2507 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2508 p->idx += count + 3;
2510 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2511 dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
2512 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2513 return -EINVAL;
2515 break;
2516 case DMA_PACKET_COPY:
2517 r = r600_dma_cs_next_reloc(p, &src_reloc);
2518 if (r) {
2519 DRM_ERROR("bad DMA_PACKET_COPY\n");
2520 return -EINVAL;
2522 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2523 if (r) {
2524 DRM_ERROR("bad DMA_PACKET_COPY\n");
2525 return -EINVAL;
2527 if (tiled) {
2528 idx_value = radeon_get_ib_value(p, idx + 2);
2529 /* detile bit */
2530 if (idx_value & (1 << 31)) {
2531 /* tiled src, linear dst */
2532 src_offset = radeon_get_ib_value(p, idx+1);
2533 src_offset <<= 8;
2534 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2536 dst_offset = radeon_get_ib_value(p, idx+5);
2537 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2538 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2539 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2540 } else {
2541 /* linear src, tiled dst */
2542 src_offset = radeon_get_ib_value(p, idx+5);
2543 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2544 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2545 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2547 dst_offset = radeon_get_ib_value(p, idx+1);
2548 dst_offset <<= 8;
2549 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2551 p->idx += 7;
2552 } else {
2553 if (p->family >= CHIP_RV770) {
2554 src_offset = radeon_get_ib_value(p, idx+2);
2555 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2556 dst_offset = radeon_get_ib_value(p, idx+1);
2557 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2559 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2560 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2561 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2562 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2563 p->idx += 5;
2564 } else {
2565 src_offset = radeon_get_ib_value(p, idx+2);
2566 src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2567 dst_offset = radeon_get_ib_value(p, idx+1);
2568 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
2570 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2571 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2572 ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2573 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
2574 p->idx += 4;
2577 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2578 dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
2579 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2580 return -EINVAL;
2582 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2583 dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
2584 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2585 return -EINVAL;
2587 break;
2588 case DMA_PACKET_CONSTANT_FILL:
2589 if (p->family < CHIP_RV770) {
2590 DRM_ERROR("Constant Fill is 7xx only !\n");
2591 return -EINVAL;
2593 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2594 if (r) {
2595 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2596 return -EINVAL;
2598 dst_offset = radeon_get_ib_value(p, idx+1);
2599 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
2600 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2601 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
2602 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2603 return -EINVAL;
2605 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2606 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
2607 p->idx += 4;
2608 break;
2609 case DMA_PACKET_NOP:
2610 p->idx += 1;
2611 break;
2612 default:
2613 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
2614 return -EINVAL;
2616 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2617 #if 0
2618 for (r = 0; r < p->ib->length_dw; r++) {
2619 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
2620 mdelay(1);
2622 #endif
2623 return 0;