Merge remote-tracking branch 'moduleh/module.h-split'
[linux-2.6/next.git] / drivers / gpu / drm / r128 / r128_state.c
bloba9e33ce65918c98bed3c039dbc65fa8e02bac15e
1 /* r128_state.c -- State support for r128 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
3 */
4 /*
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors:
28 * Gareth Hughes <gareth@valinux.com>
31 #include "drmP.h"
32 #include "drm.h"
33 #include "r128_drm.h"
34 #include "r128_drv.h"
36 /* ================================================================
37 * CCE hardware state programming functions
40 static void r128_emit_clip_rects(drm_r128_private_t *dev_priv,
41 struct drm_clip_rect *boxes, int count)
43 u32 aux_sc_cntl = 0x00000000;
44 RING_LOCALS;
45 DRM_DEBUG("\n");
47 BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
49 if (count >= 1) {
50 OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
51 OUT_RING(boxes[0].x1);
52 OUT_RING(boxes[0].x2 - 1);
53 OUT_RING(boxes[0].y1);
54 OUT_RING(boxes[0].y2 - 1);
56 aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
58 if (count >= 2) {
59 OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
60 OUT_RING(boxes[1].x1);
61 OUT_RING(boxes[1].x2 - 1);
62 OUT_RING(boxes[1].y1);
63 OUT_RING(boxes[1].y2 - 1);
65 aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
67 if (count >= 3) {
68 OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
69 OUT_RING(boxes[2].x1);
70 OUT_RING(boxes[2].x2 - 1);
71 OUT_RING(boxes[2].y1);
72 OUT_RING(boxes[2].y2 - 1);
74 aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
77 OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
78 OUT_RING(aux_sc_cntl);
80 ADVANCE_RING();
83 static __inline__ void r128_emit_core(drm_r128_private_t *dev_priv)
85 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
86 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
87 RING_LOCALS;
88 DRM_DEBUG("\n");
90 BEGIN_RING(2);
92 OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
93 OUT_RING(ctx->scale_3d_cntl);
95 ADVANCE_RING();
98 static __inline__ void r128_emit_context(drm_r128_private_t *dev_priv)
100 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
101 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
102 RING_LOCALS;
103 DRM_DEBUG("\n");
105 BEGIN_RING(13);
107 OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
108 OUT_RING(ctx->dst_pitch_offset_c);
109 OUT_RING(ctx->dp_gui_master_cntl_c);
110 OUT_RING(ctx->sc_top_left_c);
111 OUT_RING(ctx->sc_bottom_right_c);
112 OUT_RING(ctx->z_offset_c);
113 OUT_RING(ctx->z_pitch_c);
114 OUT_RING(ctx->z_sten_cntl_c);
115 OUT_RING(ctx->tex_cntl_c);
116 OUT_RING(ctx->misc_3d_state_cntl_reg);
117 OUT_RING(ctx->texture_clr_cmp_clr_c);
118 OUT_RING(ctx->texture_clr_cmp_msk_c);
119 OUT_RING(ctx->fog_color_c);
121 ADVANCE_RING();
124 static __inline__ void r128_emit_setup(drm_r128_private_t *dev_priv)
126 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
127 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
128 RING_LOCALS;
129 DRM_DEBUG("\n");
131 BEGIN_RING(3);
133 OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
134 OUT_RING(ctx->setup_cntl);
135 OUT_RING(ctx->pm4_vc_fpu_setup);
137 ADVANCE_RING();
140 static __inline__ void r128_emit_masks(drm_r128_private_t *dev_priv)
142 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
143 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
144 RING_LOCALS;
145 DRM_DEBUG("\n");
147 BEGIN_RING(5);
149 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
150 OUT_RING(ctx->dp_write_mask);
152 OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
153 OUT_RING(ctx->sten_ref_mask_c);
154 OUT_RING(ctx->plane_3d_mask_c);
156 ADVANCE_RING();
159 static __inline__ void r128_emit_window(drm_r128_private_t *dev_priv)
161 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
162 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
163 RING_LOCALS;
164 DRM_DEBUG("\n");
166 BEGIN_RING(2);
168 OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
169 OUT_RING(ctx->window_xy_offset);
171 ADVANCE_RING();
174 static __inline__ void r128_emit_tex0(drm_r128_private_t *dev_priv)
176 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
177 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
178 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
179 int i;
180 RING_LOCALS;
181 DRM_DEBUG("\n");
183 BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
185 OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
186 2 + R128_MAX_TEXTURE_LEVELS));
187 OUT_RING(tex->tex_cntl);
188 OUT_RING(tex->tex_combine_cntl);
189 OUT_RING(ctx->tex_size_pitch_c);
190 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
191 OUT_RING(tex->tex_offset[i]);
193 OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
194 OUT_RING(ctx->constant_color_c);
195 OUT_RING(tex->tex_border_color);
197 ADVANCE_RING();
200 static __inline__ void r128_emit_tex1(drm_r128_private_t *dev_priv)
202 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
203 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
204 int i;
205 RING_LOCALS;
206 DRM_DEBUG("\n");
208 BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
210 OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
211 OUT_RING(tex->tex_cntl);
212 OUT_RING(tex->tex_combine_cntl);
213 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
214 OUT_RING(tex->tex_offset[i]);
216 OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
217 OUT_RING(tex->tex_border_color);
219 ADVANCE_RING();
222 static void r128_emit_state(drm_r128_private_t *dev_priv)
224 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
225 unsigned int dirty = sarea_priv->dirty;
227 DRM_DEBUG("dirty=0x%08x\n", dirty);
229 if (dirty & R128_UPLOAD_CORE) {
230 r128_emit_core(dev_priv);
231 sarea_priv->dirty &= ~R128_UPLOAD_CORE;
234 if (dirty & R128_UPLOAD_CONTEXT) {
235 r128_emit_context(dev_priv);
236 sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
239 if (dirty & R128_UPLOAD_SETUP) {
240 r128_emit_setup(dev_priv);
241 sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
244 if (dirty & R128_UPLOAD_MASKS) {
245 r128_emit_masks(dev_priv);
246 sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
249 if (dirty & R128_UPLOAD_WINDOW) {
250 r128_emit_window(dev_priv);
251 sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
254 if (dirty & R128_UPLOAD_TEX0) {
255 r128_emit_tex0(dev_priv);
256 sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
259 if (dirty & R128_UPLOAD_TEX1) {
260 r128_emit_tex1(dev_priv);
261 sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
264 /* Turn off the texture cache flushing */
265 sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
267 sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
270 #if R128_PERFORMANCE_BOXES
271 /* ================================================================
272 * Performance monitoring functions
275 static void r128_clear_box(drm_r128_private_t *dev_priv,
276 int x, int y, int w, int h, int r, int g, int b)
278 u32 pitch, offset;
279 u32 fb_bpp, color;
280 RING_LOCALS;
282 switch (dev_priv->fb_bpp) {
283 case 16:
284 fb_bpp = R128_GMC_DST_16BPP;
285 color = (((r & 0xf8) << 8) |
286 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
287 break;
288 case 24:
289 fb_bpp = R128_GMC_DST_24BPP;
290 color = ((r << 16) | (g << 8) | b);
291 break;
292 case 32:
293 fb_bpp = R128_GMC_DST_32BPP;
294 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
295 break;
296 default:
297 return;
300 offset = dev_priv->back_offset;
301 pitch = dev_priv->back_pitch >> 3;
303 BEGIN_RING(6);
305 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
306 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
307 R128_GMC_BRUSH_SOLID_COLOR |
308 fb_bpp |
309 R128_GMC_SRC_DATATYPE_COLOR |
310 R128_ROP3_P |
311 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
313 OUT_RING((pitch << 21) | (offset >> 5));
314 OUT_RING(color);
316 OUT_RING((x << 16) | y);
317 OUT_RING((w << 16) | h);
319 ADVANCE_RING();
322 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
324 if (atomic_read(&dev_priv->idle_count) == 0)
325 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
326 else
327 atomic_set(&dev_priv->idle_count, 0);
330 #endif
332 /* ================================================================
333 * CCE command dispatch functions
336 static void r128_print_dirty(const char *msg, unsigned int flags)
338 DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
339 msg,
340 flags,
341 (flags & R128_UPLOAD_CORE) ? "core, " : "",
342 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
343 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
344 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
345 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
346 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
347 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
348 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
349 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
352 static void r128_cce_dispatch_clear(struct drm_device *dev,
353 drm_r128_clear_t *clear)
355 drm_r128_private_t *dev_priv = dev->dev_private;
356 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
357 int nbox = sarea_priv->nbox;
358 struct drm_clip_rect *pbox = sarea_priv->boxes;
359 unsigned int flags = clear->flags;
360 int i;
361 RING_LOCALS;
362 DRM_DEBUG("\n");
364 if (dev_priv->page_flipping && dev_priv->current_page == 1) {
365 unsigned int tmp = flags;
367 flags &= ~(R128_FRONT | R128_BACK);
368 if (tmp & R128_FRONT)
369 flags |= R128_BACK;
370 if (tmp & R128_BACK)
371 flags |= R128_FRONT;
374 for (i = 0; i < nbox; i++) {
375 int x = pbox[i].x1;
376 int y = pbox[i].y1;
377 int w = pbox[i].x2 - x;
378 int h = pbox[i].y2 - y;
380 DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
381 pbox[i].x1, pbox[i].y1, pbox[i].x2,
382 pbox[i].y2, flags);
384 if (flags & (R128_FRONT | R128_BACK)) {
385 BEGIN_RING(2);
387 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
388 OUT_RING(clear->color_mask);
390 ADVANCE_RING();
393 if (flags & R128_FRONT) {
394 BEGIN_RING(6);
396 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
397 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
398 R128_GMC_BRUSH_SOLID_COLOR |
399 (dev_priv->color_fmt << 8) |
400 R128_GMC_SRC_DATATYPE_COLOR |
401 R128_ROP3_P |
402 R128_GMC_CLR_CMP_CNTL_DIS |
403 R128_GMC_AUX_CLIP_DIS);
405 OUT_RING(dev_priv->front_pitch_offset_c);
406 OUT_RING(clear->clear_color);
408 OUT_RING((x << 16) | y);
409 OUT_RING((w << 16) | h);
411 ADVANCE_RING();
414 if (flags & R128_BACK) {
415 BEGIN_RING(6);
417 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
418 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
419 R128_GMC_BRUSH_SOLID_COLOR |
420 (dev_priv->color_fmt << 8) |
421 R128_GMC_SRC_DATATYPE_COLOR |
422 R128_ROP3_P |
423 R128_GMC_CLR_CMP_CNTL_DIS |
424 R128_GMC_AUX_CLIP_DIS);
426 OUT_RING(dev_priv->back_pitch_offset_c);
427 OUT_RING(clear->clear_color);
429 OUT_RING((x << 16) | y);
430 OUT_RING((w << 16) | h);
432 ADVANCE_RING();
435 if (flags & R128_DEPTH) {
436 BEGIN_RING(6);
438 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
439 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
440 R128_GMC_BRUSH_SOLID_COLOR |
441 (dev_priv->depth_fmt << 8) |
442 R128_GMC_SRC_DATATYPE_COLOR |
443 R128_ROP3_P |
444 R128_GMC_CLR_CMP_CNTL_DIS |
445 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
447 OUT_RING(dev_priv->depth_pitch_offset_c);
448 OUT_RING(clear->clear_depth);
450 OUT_RING((x << 16) | y);
451 OUT_RING((w << 16) | h);
453 ADVANCE_RING();
458 static void r128_cce_dispatch_swap(struct drm_device *dev)
460 drm_r128_private_t *dev_priv = dev->dev_private;
461 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
462 int nbox = sarea_priv->nbox;
463 struct drm_clip_rect *pbox = sarea_priv->boxes;
464 int i;
465 RING_LOCALS;
466 DRM_DEBUG("\n");
468 #if R128_PERFORMANCE_BOXES
469 /* Do some trivial performance monitoring...
471 r128_cce_performance_boxes(dev_priv);
472 #endif
474 for (i = 0; i < nbox; i++) {
475 int x = pbox[i].x1;
476 int y = pbox[i].y1;
477 int w = pbox[i].x2 - x;
478 int h = pbox[i].y2 - y;
480 BEGIN_RING(7);
482 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
483 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
484 R128_GMC_DST_PITCH_OFFSET_CNTL |
485 R128_GMC_BRUSH_NONE |
486 (dev_priv->color_fmt << 8) |
487 R128_GMC_SRC_DATATYPE_COLOR |
488 R128_ROP3_S |
489 R128_DP_SRC_SOURCE_MEMORY |
490 R128_GMC_CLR_CMP_CNTL_DIS |
491 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
493 /* Make this work even if front & back are flipped:
495 if (dev_priv->current_page == 0) {
496 OUT_RING(dev_priv->back_pitch_offset_c);
497 OUT_RING(dev_priv->front_pitch_offset_c);
498 } else {
499 OUT_RING(dev_priv->front_pitch_offset_c);
500 OUT_RING(dev_priv->back_pitch_offset_c);
503 OUT_RING((x << 16) | y);
504 OUT_RING((x << 16) | y);
505 OUT_RING((w << 16) | h);
507 ADVANCE_RING();
510 /* Increment the frame counter. The client-side 3D driver must
511 * throttle the framerate by waiting for this value before
512 * performing the swapbuffer ioctl.
514 dev_priv->sarea_priv->last_frame++;
516 BEGIN_RING(2);
518 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
519 OUT_RING(dev_priv->sarea_priv->last_frame);
521 ADVANCE_RING();
524 static void r128_cce_dispatch_flip(struct drm_device *dev)
526 drm_r128_private_t *dev_priv = dev->dev_private;
527 RING_LOCALS;
528 DRM_DEBUG("page=%d pfCurrentPage=%d\n",
529 dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
531 #if R128_PERFORMANCE_BOXES
532 /* Do some trivial performance monitoring...
534 r128_cce_performance_boxes(dev_priv);
535 #endif
537 BEGIN_RING(4);
539 R128_WAIT_UNTIL_PAGE_FLIPPED();
540 OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
542 if (dev_priv->current_page == 0)
543 OUT_RING(dev_priv->back_offset);
544 else
545 OUT_RING(dev_priv->front_offset);
547 ADVANCE_RING();
549 /* Increment the frame counter. The client-side 3D driver must
550 * throttle the framerate by waiting for this value before
551 * performing the swapbuffer ioctl.
553 dev_priv->sarea_priv->last_frame++;
554 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
555 1 - dev_priv->current_page;
557 BEGIN_RING(2);
559 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
560 OUT_RING(dev_priv->sarea_priv->last_frame);
562 ADVANCE_RING();
565 static void r128_cce_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
567 drm_r128_private_t *dev_priv = dev->dev_private;
568 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
569 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
570 int format = sarea_priv->vc_format;
571 int offset = buf->bus_address;
572 int size = buf->used;
573 int prim = buf_priv->prim;
574 int i = 0;
575 RING_LOCALS;
576 DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
578 if (0)
579 r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
581 if (buf->used) {
582 buf_priv->dispatched = 1;
584 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
585 r128_emit_state(dev_priv);
587 do {
588 /* Emit the next set of up to three cliprects */
589 if (i < sarea_priv->nbox) {
590 r128_emit_clip_rects(dev_priv,
591 &sarea_priv->boxes[i],
592 sarea_priv->nbox - i);
595 /* Emit the vertex buffer rendering commands */
596 BEGIN_RING(5);
598 OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
599 OUT_RING(offset);
600 OUT_RING(size);
601 OUT_RING(format);
602 OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
603 (size << R128_CCE_VC_CNTL_NUM_SHIFT));
605 ADVANCE_RING();
607 i += 3;
608 } while (i < sarea_priv->nbox);
611 if (buf_priv->discard) {
612 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
614 /* Emit the vertex buffer age */
615 BEGIN_RING(2);
617 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
618 OUT_RING(buf_priv->age);
620 ADVANCE_RING();
622 buf->pending = 1;
623 buf->used = 0;
624 /* FIXME: Check dispatched field */
625 buf_priv->dispatched = 0;
628 dev_priv->sarea_priv->last_dispatch++;
630 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
631 sarea_priv->nbox = 0;
634 static void r128_cce_dispatch_indirect(struct drm_device *dev,
635 struct drm_buf *buf, int start, int end)
637 drm_r128_private_t *dev_priv = dev->dev_private;
638 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
639 RING_LOCALS;
640 DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
642 if (start != end) {
643 int offset = buf->bus_address + start;
644 int dwords = (end - start + 3) / sizeof(u32);
646 /* Indirect buffer data must be an even number of
647 * dwords, so if we've been given an odd number we must
648 * pad the data with a Type-2 CCE packet.
650 if (dwords & 1) {
651 u32 *data = (u32 *)
652 ((char *)dev->agp_buffer_map->handle
653 + buf->offset + start);
654 data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
657 buf_priv->dispatched = 1;
659 /* Fire off the indirect buffer */
660 BEGIN_RING(3);
662 OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
663 OUT_RING(offset);
664 OUT_RING(dwords);
666 ADVANCE_RING();
669 if (buf_priv->discard) {
670 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
672 /* Emit the indirect buffer age */
673 BEGIN_RING(2);
675 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
676 OUT_RING(buf_priv->age);
678 ADVANCE_RING();
680 buf->pending = 1;
681 buf->used = 0;
682 /* FIXME: Check dispatched field */
683 buf_priv->dispatched = 0;
686 dev_priv->sarea_priv->last_dispatch++;
689 static void r128_cce_dispatch_indices(struct drm_device *dev,
690 struct drm_buf *buf,
691 int start, int end, int count)
693 drm_r128_private_t *dev_priv = dev->dev_private;
694 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
695 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
696 int format = sarea_priv->vc_format;
697 int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
698 int prim = buf_priv->prim;
699 u32 *data;
700 int dwords;
701 int i = 0;
702 RING_LOCALS;
703 DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
705 if (0)
706 r128_print_dirty("dispatch_indices", sarea_priv->dirty);
708 if (start != end) {
709 buf_priv->dispatched = 1;
711 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
712 r128_emit_state(dev_priv);
714 dwords = (end - start + 3) / sizeof(u32);
716 data = (u32 *) ((char *)dev->agp_buffer_map->handle
717 + buf->offset + start);
719 data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
720 dwords - 2));
722 data[1] = cpu_to_le32(offset);
723 data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
724 data[3] = cpu_to_le32(format);
725 data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
726 (count << 16)));
728 if (count & 0x1) {
729 #ifdef __LITTLE_ENDIAN
730 data[dwords - 1] &= 0x0000ffff;
731 #else
732 data[dwords - 1] &= 0xffff0000;
733 #endif
736 do {
737 /* Emit the next set of up to three cliprects */
738 if (i < sarea_priv->nbox) {
739 r128_emit_clip_rects(dev_priv,
740 &sarea_priv->boxes[i],
741 sarea_priv->nbox - i);
744 r128_cce_dispatch_indirect(dev, buf, start, end);
746 i += 3;
747 } while (i < sarea_priv->nbox);
750 if (buf_priv->discard) {
751 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
753 /* Emit the vertex buffer age */
754 BEGIN_RING(2);
756 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
757 OUT_RING(buf_priv->age);
759 ADVANCE_RING();
761 buf->pending = 1;
762 /* FIXME: Check dispatched field */
763 buf_priv->dispatched = 0;
766 dev_priv->sarea_priv->last_dispatch++;
768 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
769 sarea_priv->nbox = 0;
772 static int r128_cce_dispatch_blit(struct drm_device *dev,
773 struct drm_file *file_priv,
774 drm_r128_blit_t *blit)
776 drm_r128_private_t *dev_priv = dev->dev_private;
777 struct drm_device_dma *dma = dev->dma;
778 struct drm_buf *buf;
779 drm_r128_buf_priv_t *buf_priv;
780 u32 *data;
781 int dword_shift, dwords;
782 RING_LOCALS;
783 DRM_DEBUG("\n");
785 /* The compiler won't optimize away a division by a variable,
786 * even if the only legal values are powers of two. Thus, we'll
787 * use a shift instead.
789 switch (blit->format) {
790 case R128_DATATYPE_ARGB8888:
791 dword_shift = 0;
792 break;
793 case R128_DATATYPE_ARGB1555:
794 case R128_DATATYPE_RGB565:
795 case R128_DATATYPE_ARGB4444:
796 case R128_DATATYPE_YVYU422:
797 case R128_DATATYPE_VYUY422:
798 dword_shift = 1;
799 break;
800 case R128_DATATYPE_CI8:
801 case R128_DATATYPE_RGB8:
802 dword_shift = 2;
803 break;
804 default:
805 DRM_ERROR("invalid blit format %d\n", blit->format);
806 return -EINVAL;
809 /* Flush the pixel cache, and mark the contents as Read Invalid.
810 * This ensures no pixel data gets mixed up with the texture
811 * data from the host data blit, otherwise part of the texture
812 * image may be corrupted.
814 BEGIN_RING(2);
816 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
817 OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
819 ADVANCE_RING();
821 /* Dispatch the indirect buffer.
823 buf = dma->buflist[blit->idx];
824 buf_priv = buf->dev_private;
826 if (buf->file_priv != file_priv) {
827 DRM_ERROR("process %d using buffer owned by %p\n",
828 DRM_CURRENTPID, buf->file_priv);
829 return -EINVAL;
831 if (buf->pending) {
832 DRM_ERROR("sending pending buffer %d\n", blit->idx);
833 return -EINVAL;
836 buf_priv->discard = 1;
838 dwords = (blit->width * blit->height) >> dword_shift;
840 data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
842 data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
843 data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
844 R128_GMC_BRUSH_NONE |
845 (blit->format << 8) |
846 R128_GMC_SRC_DATATYPE_COLOR |
847 R128_ROP3_S |
848 R128_DP_SRC_SOURCE_HOST_DATA |
849 R128_GMC_CLR_CMP_CNTL_DIS |
850 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
852 data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
853 data[3] = cpu_to_le32(0xffffffff);
854 data[4] = cpu_to_le32(0xffffffff);
855 data[5] = cpu_to_le32((blit->y << 16) | blit->x);
856 data[6] = cpu_to_le32((blit->height << 16) | blit->width);
857 data[7] = cpu_to_le32(dwords);
859 buf->used = (dwords + 8) * sizeof(u32);
861 r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
863 /* Flush the pixel cache after the blit completes. This ensures
864 * the texture data is written out to memory before rendering
865 * continues.
867 BEGIN_RING(2);
869 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
870 OUT_RING(R128_PC_FLUSH_GUI);
872 ADVANCE_RING();
874 return 0;
877 /* ================================================================
878 * Tiled depth buffer management
880 * FIXME: These should all set the destination write mask for when we
881 * have hardware stencil support.
884 static int r128_cce_dispatch_write_span(struct drm_device *dev,
885 drm_r128_depth_t *depth)
887 drm_r128_private_t *dev_priv = dev->dev_private;
888 int count, x, y;
889 u32 *buffer;
890 u8 *mask;
891 int i, buffer_size, mask_size;
892 RING_LOCALS;
893 DRM_DEBUG("\n");
895 count = depth->n;
896 if (count > 4096 || count <= 0)
897 return -EMSGSIZE;
899 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
900 return -EFAULT;
901 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
902 return -EFAULT;
904 buffer_size = depth->n * sizeof(u32);
905 buffer = kmalloc(buffer_size, GFP_KERNEL);
906 if (buffer == NULL)
907 return -ENOMEM;
908 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
909 kfree(buffer);
910 return -EFAULT;
913 mask_size = depth->n * sizeof(u8);
914 if (depth->mask) {
915 mask = kmalloc(mask_size, GFP_KERNEL);
916 if (mask == NULL) {
917 kfree(buffer);
918 return -ENOMEM;
920 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
921 kfree(buffer);
922 kfree(mask);
923 return -EFAULT;
926 for (i = 0; i < count; i++, x++) {
927 if (mask[i]) {
928 BEGIN_RING(6);
930 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
931 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
932 R128_GMC_BRUSH_SOLID_COLOR |
933 (dev_priv->depth_fmt << 8) |
934 R128_GMC_SRC_DATATYPE_COLOR |
935 R128_ROP3_P |
936 R128_GMC_CLR_CMP_CNTL_DIS |
937 R128_GMC_WR_MSK_DIS);
939 OUT_RING(dev_priv->depth_pitch_offset_c);
940 OUT_RING(buffer[i]);
942 OUT_RING((x << 16) | y);
943 OUT_RING((1 << 16) | 1);
945 ADVANCE_RING();
949 kfree(mask);
950 } else {
951 for (i = 0; i < count; i++, x++) {
952 BEGIN_RING(6);
954 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
955 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
956 R128_GMC_BRUSH_SOLID_COLOR |
957 (dev_priv->depth_fmt << 8) |
958 R128_GMC_SRC_DATATYPE_COLOR |
959 R128_ROP3_P |
960 R128_GMC_CLR_CMP_CNTL_DIS |
961 R128_GMC_WR_MSK_DIS);
963 OUT_RING(dev_priv->depth_pitch_offset_c);
964 OUT_RING(buffer[i]);
966 OUT_RING((x << 16) | y);
967 OUT_RING((1 << 16) | 1);
969 ADVANCE_RING();
973 kfree(buffer);
975 return 0;
978 static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
979 drm_r128_depth_t *depth)
981 drm_r128_private_t *dev_priv = dev->dev_private;
982 int count, *x, *y;
983 u32 *buffer;
984 u8 *mask;
985 int i, xbuf_size, ybuf_size, buffer_size, mask_size;
986 RING_LOCALS;
987 DRM_DEBUG("\n");
989 count = depth->n;
990 if (count > 4096 || count <= 0)
991 return -EMSGSIZE;
993 xbuf_size = count * sizeof(*x);
994 ybuf_size = count * sizeof(*y);
995 x = kmalloc(xbuf_size, GFP_KERNEL);
996 if (x == NULL)
997 return -ENOMEM;
998 y = kmalloc(ybuf_size, GFP_KERNEL);
999 if (y == NULL) {
1000 kfree(x);
1001 return -ENOMEM;
1003 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1004 kfree(x);
1005 kfree(y);
1006 return -EFAULT;
1008 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
1009 kfree(x);
1010 kfree(y);
1011 return -EFAULT;
1014 buffer_size = depth->n * sizeof(u32);
1015 buffer = kmalloc(buffer_size, GFP_KERNEL);
1016 if (buffer == NULL) {
1017 kfree(x);
1018 kfree(y);
1019 return -ENOMEM;
1021 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
1022 kfree(x);
1023 kfree(y);
1024 kfree(buffer);
1025 return -EFAULT;
1028 if (depth->mask) {
1029 mask_size = depth->n * sizeof(u8);
1030 mask = kmalloc(mask_size, GFP_KERNEL);
1031 if (mask == NULL) {
1032 kfree(x);
1033 kfree(y);
1034 kfree(buffer);
1035 return -ENOMEM;
1037 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
1038 kfree(x);
1039 kfree(y);
1040 kfree(buffer);
1041 kfree(mask);
1042 return -EFAULT;
1045 for (i = 0; i < count; i++) {
1046 if (mask[i]) {
1047 BEGIN_RING(6);
1049 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1050 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1051 R128_GMC_BRUSH_SOLID_COLOR |
1052 (dev_priv->depth_fmt << 8) |
1053 R128_GMC_SRC_DATATYPE_COLOR |
1054 R128_ROP3_P |
1055 R128_GMC_CLR_CMP_CNTL_DIS |
1056 R128_GMC_WR_MSK_DIS);
1058 OUT_RING(dev_priv->depth_pitch_offset_c);
1059 OUT_RING(buffer[i]);
1061 OUT_RING((x[i] << 16) | y[i]);
1062 OUT_RING((1 << 16) | 1);
1064 ADVANCE_RING();
1068 kfree(mask);
1069 } else {
1070 for (i = 0; i < count; i++) {
1071 BEGIN_RING(6);
1073 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1074 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1075 R128_GMC_BRUSH_SOLID_COLOR |
1076 (dev_priv->depth_fmt << 8) |
1077 R128_GMC_SRC_DATATYPE_COLOR |
1078 R128_ROP3_P |
1079 R128_GMC_CLR_CMP_CNTL_DIS |
1080 R128_GMC_WR_MSK_DIS);
1082 OUT_RING(dev_priv->depth_pitch_offset_c);
1083 OUT_RING(buffer[i]);
1085 OUT_RING((x[i] << 16) | y[i]);
1086 OUT_RING((1 << 16) | 1);
1088 ADVANCE_RING();
1092 kfree(x);
1093 kfree(y);
1094 kfree(buffer);
1096 return 0;
1099 static int r128_cce_dispatch_read_span(struct drm_device *dev,
1100 drm_r128_depth_t *depth)
1102 drm_r128_private_t *dev_priv = dev->dev_private;
1103 int count, x, y;
1104 RING_LOCALS;
1105 DRM_DEBUG("\n");
1107 count = depth->n;
1108 if (count > 4096 || count <= 0)
1109 return -EMSGSIZE;
1111 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
1112 return -EFAULT;
1113 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
1114 return -EFAULT;
1116 BEGIN_RING(7);
1118 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1119 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1120 R128_GMC_DST_PITCH_OFFSET_CNTL |
1121 R128_GMC_BRUSH_NONE |
1122 (dev_priv->depth_fmt << 8) |
1123 R128_GMC_SRC_DATATYPE_COLOR |
1124 R128_ROP3_S |
1125 R128_DP_SRC_SOURCE_MEMORY |
1126 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1128 OUT_RING(dev_priv->depth_pitch_offset_c);
1129 OUT_RING(dev_priv->span_pitch_offset_c);
1131 OUT_RING((x << 16) | y);
1132 OUT_RING((0 << 16) | 0);
1133 OUT_RING((count << 16) | 1);
1135 ADVANCE_RING();
1137 return 0;
1140 static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
1141 drm_r128_depth_t *depth)
1143 drm_r128_private_t *dev_priv = dev->dev_private;
1144 int count, *x, *y;
1145 int i, xbuf_size, ybuf_size;
1146 RING_LOCALS;
1147 DRM_DEBUG("\n");
1149 count = depth->n;
1150 if (count > 4096 || count <= 0)
1151 return -EMSGSIZE;
1153 if (count > dev_priv->depth_pitch)
1154 count = dev_priv->depth_pitch;
1156 xbuf_size = count * sizeof(*x);
1157 ybuf_size = count * sizeof(*y);
1158 x = kmalloc(xbuf_size, GFP_KERNEL);
1159 if (x == NULL)
1160 return -ENOMEM;
1161 y = kmalloc(ybuf_size, GFP_KERNEL);
1162 if (y == NULL) {
1163 kfree(x);
1164 return -ENOMEM;
1166 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1167 kfree(x);
1168 kfree(y);
1169 return -EFAULT;
1171 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
1172 kfree(x);
1173 kfree(y);
1174 return -EFAULT;
1177 for (i = 0; i < count; i++) {
1178 BEGIN_RING(7);
1180 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1181 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1182 R128_GMC_DST_PITCH_OFFSET_CNTL |
1183 R128_GMC_BRUSH_NONE |
1184 (dev_priv->depth_fmt << 8) |
1185 R128_GMC_SRC_DATATYPE_COLOR |
1186 R128_ROP3_S |
1187 R128_DP_SRC_SOURCE_MEMORY |
1188 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1190 OUT_RING(dev_priv->depth_pitch_offset_c);
1191 OUT_RING(dev_priv->span_pitch_offset_c);
1193 OUT_RING((x[i] << 16) | y[i]);
1194 OUT_RING((i << 16) | 0);
1195 OUT_RING((1 << 16) | 1);
1197 ADVANCE_RING();
1200 kfree(x);
1201 kfree(y);
1203 return 0;
1206 /* ================================================================
1207 * Polygon stipple
1210 static void r128_cce_dispatch_stipple(struct drm_device *dev, u32 *stipple)
1212 drm_r128_private_t *dev_priv = dev->dev_private;
1213 int i;
1214 RING_LOCALS;
1215 DRM_DEBUG("\n");
1217 BEGIN_RING(33);
1219 OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
1220 for (i = 0; i < 32; i++)
1221 OUT_RING(stipple[i]);
1223 ADVANCE_RING();
1226 /* ================================================================
1227 * IOCTL functions
1230 static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
1232 drm_r128_private_t *dev_priv = dev->dev_private;
1233 drm_r128_sarea_t *sarea_priv;
1234 drm_r128_clear_t *clear = data;
1235 DRM_DEBUG("\n");
1237 LOCK_TEST_WITH_RETURN(dev, file_priv);
1239 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1241 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1243 sarea_priv = dev_priv->sarea_priv;
1245 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1246 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1248 r128_cce_dispatch_clear(dev, clear);
1249 COMMIT_RING();
1251 /* Make sure we restore the 3D state next time.
1253 dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
1255 return 0;
1258 static int r128_do_init_pageflip(struct drm_device *dev)
1260 drm_r128_private_t *dev_priv = dev->dev_private;
1261 DRM_DEBUG("\n");
1263 dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
1264 dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
1266 R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
1267 R128_WRITE(R128_CRTC_OFFSET_CNTL,
1268 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
1270 dev_priv->page_flipping = 1;
1271 dev_priv->current_page = 0;
1272 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1274 return 0;
1277 static int r128_do_cleanup_pageflip(struct drm_device *dev)
1279 drm_r128_private_t *dev_priv = dev->dev_private;
1280 DRM_DEBUG("\n");
1282 R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
1283 R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
1285 if (dev_priv->current_page != 0) {
1286 r128_cce_dispatch_flip(dev);
1287 COMMIT_RING();
1290 dev_priv->page_flipping = 0;
1291 return 0;
1294 /* Swapping and flipping are different operations, need different ioctls.
1295 * They can & should be intermixed to support multiple 3d windows.
1298 static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
1300 drm_r128_private_t *dev_priv = dev->dev_private;
1301 DRM_DEBUG("\n");
1303 LOCK_TEST_WITH_RETURN(dev, file_priv);
1305 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1307 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1309 if (!dev_priv->page_flipping)
1310 r128_do_init_pageflip(dev);
1312 r128_cce_dispatch_flip(dev);
1314 COMMIT_RING();
1315 return 0;
1318 static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1320 drm_r128_private_t *dev_priv = dev->dev_private;
1321 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1322 DRM_DEBUG("\n");
1324 LOCK_TEST_WITH_RETURN(dev, file_priv);
1326 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1328 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1330 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1331 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1333 r128_cce_dispatch_swap(dev);
1334 dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
1335 R128_UPLOAD_MASKS);
1337 COMMIT_RING();
1338 return 0;
1341 static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
1343 drm_r128_private_t *dev_priv = dev->dev_private;
1344 struct drm_device_dma *dma = dev->dma;
1345 struct drm_buf *buf;
1346 drm_r128_buf_priv_t *buf_priv;
1347 drm_r128_vertex_t *vertex = data;
1349 LOCK_TEST_WITH_RETURN(dev, file_priv);
1351 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1353 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
1354 DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
1356 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
1357 DRM_ERROR("buffer index %d (of %d max)\n",
1358 vertex->idx, dma->buf_count - 1);
1359 return -EINVAL;
1361 if (vertex->prim < 0 ||
1362 vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1363 DRM_ERROR("buffer prim %d\n", vertex->prim);
1364 return -EINVAL;
1367 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1368 VB_AGE_TEST_WITH_RETURN(dev_priv);
1370 buf = dma->buflist[vertex->idx];
1371 buf_priv = buf->dev_private;
1373 if (buf->file_priv != file_priv) {
1374 DRM_ERROR("process %d using buffer owned by %p\n",
1375 DRM_CURRENTPID, buf->file_priv);
1376 return -EINVAL;
1378 if (buf->pending) {
1379 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
1380 return -EINVAL;
1383 buf->used = vertex->count;
1384 buf_priv->prim = vertex->prim;
1385 buf_priv->discard = vertex->discard;
1387 r128_cce_dispatch_vertex(dev, buf);
1389 COMMIT_RING();
1390 return 0;
1393 static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
1395 drm_r128_private_t *dev_priv = dev->dev_private;
1396 struct drm_device_dma *dma = dev->dma;
1397 struct drm_buf *buf;
1398 drm_r128_buf_priv_t *buf_priv;
1399 drm_r128_indices_t *elts = data;
1400 int count;
1402 LOCK_TEST_WITH_RETURN(dev, file_priv);
1404 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1406 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
1407 elts->idx, elts->start, elts->end, elts->discard);
1409 if (elts->idx < 0 || elts->idx >= dma->buf_count) {
1410 DRM_ERROR("buffer index %d (of %d max)\n",
1411 elts->idx, dma->buf_count - 1);
1412 return -EINVAL;
1414 if (elts->prim < 0 ||
1415 elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1416 DRM_ERROR("buffer prim %d\n", elts->prim);
1417 return -EINVAL;
1420 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1421 VB_AGE_TEST_WITH_RETURN(dev_priv);
1423 buf = dma->buflist[elts->idx];
1424 buf_priv = buf->dev_private;
1426 if (buf->file_priv != file_priv) {
1427 DRM_ERROR("process %d using buffer owned by %p\n",
1428 DRM_CURRENTPID, buf->file_priv);
1429 return -EINVAL;
1431 if (buf->pending) {
1432 DRM_ERROR("sending pending buffer %d\n", elts->idx);
1433 return -EINVAL;
1436 count = (elts->end - elts->start) / sizeof(u16);
1437 elts->start -= R128_INDEX_PRIM_OFFSET;
1439 if (elts->start & 0x7) {
1440 DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
1441 return -EINVAL;
1443 if (elts->start < buf->used) {
1444 DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
1445 return -EINVAL;
1448 buf->used = elts->end;
1449 buf_priv->prim = elts->prim;
1450 buf_priv->discard = elts->discard;
1452 r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
1454 COMMIT_RING();
1455 return 0;
1458 static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
1460 struct drm_device_dma *dma = dev->dma;
1461 drm_r128_private_t *dev_priv = dev->dev_private;
1462 drm_r128_blit_t *blit = data;
1463 int ret;
1465 LOCK_TEST_WITH_RETURN(dev, file_priv);
1467 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1469 DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
1471 if (blit->idx < 0 || blit->idx >= dma->buf_count) {
1472 DRM_ERROR("buffer index %d (of %d max)\n",
1473 blit->idx, dma->buf_count - 1);
1474 return -EINVAL;
1477 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1478 VB_AGE_TEST_WITH_RETURN(dev_priv);
1480 ret = r128_cce_dispatch_blit(dev, file_priv, blit);
1482 COMMIT_RING();
1483 return ret;
1486 static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
1488 drm_r128_private_t *dev_priv = dev->dev_private;
1489 drm_r128_depth_t *depth = data;
1490 int ret;
1492 LOCK_TEST_WITH_RETURN(dev, file_priv);
1494 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1496 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1498 ret = -EINVAL;
1499 switch (depth->func) {
1500 case R128_WRITE_SPAN:
1501 ret = r128_cce_dispatch_write_span(dev, depth);
1502 break;
1503 case R128_WRITE_PIXELS:
1504 ret = r128_cce_dispatch_write_pixels(dev, depth);
1505 break;
1506 case R128_READ_SPAN:
1507 ret = r128_cce_dispatch_read_span(dev, depth);
1508 break;
1509 case R128_READ_PIXELS:
1510 ret = r128_cce_dispatch_read_pixels(dev, depth);
1511 break;
1514 COMMIT_RING();
1515 return ret;
1518 static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
1520 drm_r128_private_t *dev_priv = dev->dev_private;
1521 drm_r128_stipple_t *stipple = data;
1522 u32 mask[32];
1524 LOCK_TEST_WITH_RETURN(dev, file_priv);
1526 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1528 if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
1529 return -EFAULT;
1531 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1533 r128_cce_dispatch_stipple(dev, mask);
1535 COMMIT_RING();
1536 return 0;
1539 static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
1541 drm_r128_private_t *dev_priv = dev->dev_private;
1542 struct drm_device_dma *dma = dev->dma;
1543 struct drm_buf *buf;
1544 drm_r128_buf_priv_t *buf_priv;
1545 drm_r128_indirect_t *indirect = data;
1546 #if 0
1547 RING_LOCALS;
1548 #endif
1550 LOCK_TEST_WITH_RETURN(dev, file_priv);
1552 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1554 DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
1555 indirect->idx, indirect->start, indirect->end,
1556 indirect->discard);
1558 if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
1559 DRM_ERROR("buffer index %d (of %d max)\n",
1560 indirect->idx, dma->buf_count - 1);
1561 return -EINVAL;
1564 buf = dma->buflist[indirect->idx];
1565 buf_priv = buf->dev_private;
1567 if (buf->file_priv != file_priv) {
1568 DRM_ERROR("process %d using buffer owned by %p\n",
1569 DRM_CURRENTPID, buf->file_priv);
1570 return -EINVAL;
1572 if (buf->pending) {
1573 DRM_ERROR("sending pending buffer %d\n", indirect->idx);
1574 return -EINVAL;
1577 if (indirect->start < buf->used) {
1578 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
1579 indirect->start, buf->used);
1580 return -EINVAL;
1583 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1584 VB_AGE_TEST_WITH_RETURN(dev_priv);
1586 buf->used = indirect->end;
1587 buf_priv->discard = indirect->discard;
1589 #if 0
1590 /* Wait for the 3D stream to idle before the indirect buffer
1591 * containing 2D acceleration commands is processed.
1593 BEGIN_RING(2);
1594 RADEON_WAIT_UNTIL_3D_IDLE();
1595 ADVANCE_RING();
1596 #endif
1598 /* Dispatch the indirect buffer full of commands from the
1599 * X server. This is insecure and is thus only available to
1600 * privileged clients.
1602 r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
1604 COMMIT_RING();
1605 return 0;
1608 static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
1610 drm_r128_private_t *dev_priv = dev->dev_private;
1611 drm_r128_getparam_t *param = data;
1612 int value;
1614 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1616 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1618 switch (param->param) {
1619 case R128_PARAM_IRQ_NR:
1620 value = drm_dev_to_irq(dev);
1621 break;
1622 default:
1623 return -EINVAL;
1626 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1627 DRM_ERROR("copy_to_user\n");
1628 return -EFAULT;
1631 return 0;
1634 void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1636 if (dev->dev_private) {
1637 drm_r128_private_t *dev_priv = dev->dev_private;
1638 if (dev_priv->page_flipping)
1639 r128_do_cleanup_pageflip(dev);
1642 void r128_driver_lastclose(struct drm_device *dev)
1644 r128_do_cleanup_cce(dev);
1647 struct drm_ioctl_desc r128_ioctls[] = {
1648 DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1649 DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1650 DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1651 DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1652 DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
1653 DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
1654 DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
1655 DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
1656 DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
1657 DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
1658 DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
1659 DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
1660 DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
1661 DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
1662 DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
1663 DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1664 DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
1667 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);