treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / r128 / r128_state.c
blob9d74c9d914cb1b8300bfab3b63c1f700d59e80e0
1 /* r128_state.c -- State support for r128 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
3 */
4 /*
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors:
28 * Gareth Hughes <gareth@valinux.com>
31 #include <linux/pci.h>
32 #include <linux/slab.h>
33 #include <linux/uaccess.h>
35 #include <drm/drm_device.h>
36 #include <drm/drm_file.h>
37 #include <drm/drm_print.h>
38 #include <drm/r128_drm.h>
40 #include "r128_drv.h"
42 /* ================================================================
43 * CCE hardware state programming functions
46 static void r128_emit_clip_rects(drm_r128_private_t *dev_priv,
47 struct drm_clip_rect *boxes, int count)
49 u32 aux_sc_cntl = 0x00000000;
50 RING_LOCALS;
51 DRM_DEBUG("\n");
53 BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
55 if (count >= 1) {
56 OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
57 OUT_RING(boxes[0].x1);
58 OUT_RING(boxes[0].x2 - 1);
59 OUT_RING(boxes[0].y1);
60 OUT_RING(boxes[0].y2 - 1);
62 aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
64 if (count >= 2) {
65 OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
66 OUT_RING(boxes[1].x1);
67 OUT_RING(boxes[1].x2 - 1);
68 OUT_RING(boxes[1].y1);
69 OUT_RING(boxes[1].y2 - 1);
71 aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
73 if (count >= 3) {
74 OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
75 OUT_RING(boxes[2].x1);
76 OUT_RING(boxes[2].x2 - 1);
77 OUT_RING(boxes[2].y1);
78 OUT_RING(boxes[2].y2 - 1);
80 aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
83 OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
84 OUT_RING(aux_sc_cntl);
86 ADVANCE_RING();
89 static __inline__ void r128_emit_core(drm_r128_private_t *dev_priv)
91 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
92 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
93 RING_LOCALS;
94 DRM_DEBUG("\n");
96 BEGIN_RING(2);
98 OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
99 OUT_RING(ctx->scale_3d_cntl);
101 ADVANCE_RING();
104 static __inline__ void r128_emit_context(drm_r128_private_t *dev_priv)
106 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
107 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
108 RING_LOCALS;
109 DRM_DEBUG("\n");
111 BEGIN_RING(13);
113 OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
114 OUT_RING(ctx->dst_pitch_offset_c);
115 OUT_RING(ctx->dp_gui_master_cntl_c);
116 OUT_RING(ctx->sc_top_left_c);
117 OUT_RING(ctx->sc_bottom_right_c);
118 OUT_RING(ctx->z_offset_c);
119 OUT_RING(ctx->z_pitch_c);
120 OUT_RING(ctx->z_sten_cntl_c);
121 OUT_RING(ctx->tex_cntl_c);
122 OUT_RING(ctx->misc_3d_state_cntl_reg);
123 OUT_RING(ctx->texture_clr_cmp_clr_c);
124 OUT_RING(ctx->texture_clr_cmp_msk_c);
125 OUT_RING(ctx->fog_color_c);
127 ADVANCE_RING();
130 static __inline__ void r128_emit_setup(drm_r128_private_t *dev_priv)
132 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
133 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
134 RING_LOCALS;
135 DRM_DEBUG("\n");
137 BEGIN_RING(3);
139 OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
140 OUT_RING(ctx->setup_cntl);
141 OUT_RING(ctx->pm4_vc_fpu_setup);
143 ADVANCE_RING();
146 static __inline__ void r128_emit_masks(drm_r128_private_t *dev_priv)
148 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
149 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
150 RING_LOCALS;
151 DRM_DEBUG("\n");
153 BEGIN_RING(5);
155 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
156 OUT_RING(ctx->dp_write_mask);
158 OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
159 OUT_RING(ctx->sten_ref_mask_c);
160 OUT_RING(ctx->plane_3d_mask_c);
162 ADVANCE_RING();
165 static __inline__ void r128_emit_window(drm_r128_private_t *dev_priv)
167 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
168 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
169 RING_LOCALS;
170 DRM_DEBUG("\n");
172 BEGIN_RING(2);
174 OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
175 OUT_RING(ctx->window_xy_offset);
177 ADVANCE_RING();
180 static __inline__ void r128_emit_tex0(drm_r128_private_t *dev_priv)
182 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
183 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
184 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
185 int i;
186 RING_LOCALS;
187 DRM_DEBUG("\n");
189 BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
191 OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
192 2 + R128_MAX_TEXTURE_LEVELS));
193 OUT_RING(tex->tex_cntl);
194 OUT_RING(tex->tex_combine_cntl);
195 OUT_RING(ctx->tex_size_pitch_c);
196 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
197 OUT_RING(tex->tex_offset[i]);
199 OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
200 OUT_RING(ctx->constant_color_c);
201 OUT_RING(tex->tex_border_color);
203 ADVANCE_RING();
206 static __inline__ void r128_emit_tex1(drm_r128_private_t *dev_priv)
208 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
209 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
210 int i;
211 RING_LOCALS;
212 DRM_DEBUG("\n");
214 BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
216 OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
217 OUT_RING(tex->tex_cntl);
218 OUT_RING(tex->tex_combine_cntl);
219 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
220 OUT_RING(tex->tex_offset[i]);
222 OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
223 OUT_RING(tex->tex_border_color);
225 ADVANCE_RING();
228 static void r128_emit_state(drm_r128_private_t *dev_priv)
230 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
231 unsigned int dirty = sarea_priv->dirty;
233 DRM_DEBUG("dirty=0x%08x\n", dirty);
235 if (dirty & R128_UPLOAD_CORE) {
236 r128_emit_core(dev_priv);
237 sarea_priv->dirty &= ~R128_UPLOAD_CORE;
240 if (dirty & R128_UPLOAD_CONTEXT) {
241 r128_emit_context(dev_priv);
242 sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
245 if (dirty & R128_UPLOAD_SETUP) {
246 r128_emit_setup(dev_priv);
247 sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
250 if (dirty & R128_UPLOAD_MASKS) {
251 r128_emit_masks(dev_priv);
252 sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
255 if (dirty & R128_UPLOAD_WINDOW) {
256 r128_emit_window(dev_priv);
257 sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
260 if (dirty & R128_UPLOAD_TEX0) {
261 r128_emit_tex0(dev_priv);
262 sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
265 if (dirty & R128_UPLOAD_TEX1) {
266 r128_emit_tex1(dev_priv);
267 sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
270 /* Turn off the texture cache flushing */
271 sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
273 sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
276 #if R128_PERFORMANCE_BOXES
277 /* ================================================================
278 * Performance monitoring functions
281 static void r128_clear_box(drm_r128_private_t *dev_priv,
282 int x, int y, int w, int h, int r, int g, int b)
284 u32 pitch, offset;
285 u32 fb_bpp, color;
286 RING_LOCALS;
288 switch (dev_priv->fb_bpp) {
289 case 16:
290 fb_bpp = R128_GMC_DST_16BPP;
291 color = (((r & 0xf8) << 8) |
292 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
293 break;
294 case 24:
295 fb_bpp = R128_GMC_DST_24BPP;
296 color = ((r << 16) | (g << 8) | b);
297 break;
298 case 32:
299 fb_bpp = R128_GMC_DST_32BPP;
300 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
301 break;
302 default:
303 return;
306 offset = dev_priv->back_offset;
307 pitch = dev_priv->back_pitch >> 3;
309 BEGIN_RING(6);
311 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
312 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
313 R128_GMC_BRUSH_SOLID_COLOR |
314 fb_bpp |
315 R128_GMC_SRC_DATATYPE_COLOR |
316 R128_ROP3_P |
317 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
319 OUT_RING((pitch << 21) | (offset >> 5));
320 OUT_RING(color);
322 OUT_RING((x << 16) | y);
323 OUT_RING((w << 16) | h);
325 ADVANCE_RING();
328 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
330 if (atomic_read(&dev_priv->idle_count) == 0)
331 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
332 else
333 atomic_set(&dev_priv->idle_count, 0);
336 #endif
338 /* ================================================================
339 * CCE command dispatch functions
342 static void r128_print_dirty(const char *msg, unsigned int flags)
344 DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
345 msg,
346 flags,
347 (flags & R128_UPLOAD_CORE) ? "core, " : "",
348 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
349 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
350 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
351 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
352 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
353 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
354 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
355 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
358 static void r128_cce_dispatch_clear(struct drm_device *dev,
359 drm_r128_clear_t *clear)
361 drm_r128_private_t *dev_priv = dev->dev_private;
362 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
363 int nbox = sarea_priv->nbox;
364 struct drm_clip_rect *pbox = sarea_priv->boxes;
365 unsigned int flags = clear->flags;
366 int i;
367 RING_LOCALS;
368 DRM_DEBUG("\n");
370 if (dev_priv->page_flipping && dev_priv->current_page == 1) {
371 unsigned int tmp = flags;
373 flags &= ~(R128_FRONT | R128_BACK);
374 if (tmp & R128_FRONT)
375 flags |= R128_BACK;
376 if (tmp & R128_BACK)
377 flags |= R128_FRONT;
380 for (i = 0; i < nbox; i++) {
381 int x = pbox[i].x1;
382 int y = pbox[i].y1;
383 int w = pbox[i].x2 - x;
384 int h = pbox[i].y2 - y;
386 DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
387 pbox[i].x1, pbox[i].y1, pbox[i].x2,
388 pbox[i].y2, flags);
390 if (flags & (R128_FRONT | R128_BACK)) {
391 BEGIN_RING(2);
393 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
394 OUT_RING(clear->color_mask);
396 ADVANCE_RING();
399 if (flags & R128_FRONT) {
400 BEGIN_RING(6);
402 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
403 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
404 R128_GMC_BRUSH_SOLID_COLOR |
405 (dev_priv->color_fmt << 8) |
406 R128_GMC_SRC_DATATYPE_COLOR |
407 R128_ROP3_P |
408 R128_GMC_CLR_CMP_CNTL_DIS |
409 R128_GMC_AUX_CLIP_DIS);
411 OUT_RING(dev_priv->front_pitch_offset_c);
412 OUT_RING(clear->clear_color);
414 OUT_RING((x << 16) | y);
415 OUT_RING((w << 16) | h);
417 ADVANCE_RING();
420 if (flags & R128_BACK) {
421 BEGIN_RING(6);
423 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
424 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
425 R128_GMC_BRUSH_SOLID_COLOR |
426 (dev_priv->color_fmt << 8) |
427 R128_GMC_SRC_DATATYPE_COLOR |
428 R128_ROP3_P |
429 R128_GMC_CLR_CMP_CNTL_DIS |
430 R128_GMC_AUX_CLIP_DIS);
432 OUT_RING(dev_priv->back_pitch_offset_c);
433 OUT_RING(clear->clear_color);
435 OUT_RING((x << 16) | y);
436 OUT_RING((w << 16) | h);
438 ADVANCE_RING();
441 if (flags & R128_DEPTH) {
442 BEGIN_RING(6);
444 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
445 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
446 R128_GMC_BRUSH_SOLID_COLOR |
447 (dev_priv->depth_fmt << 8) |
448 R128_GMC_SRC_DATATYPE_COLOR |
449 R128_ROP3_P |
450 R128_GMC_CLR_CMP_CNTL_DIS |
451 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
453 OUT_RING(dev_priv->depth_pitch_offset_c);
454 OUT_RING(clear->clear_depth);
456 OUT_RING((x << 16) | y);
457 OUT_RING((w << 16) | h);
459 ADVANCE_RING();
464 static void r128_cce_dispatch_swap(struct drm_device *dev)
466 drm_r128_private_t *dev_priv = dev->dev_private;
467 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
468 int nbox = sarea_priv->nbox;
469 struct drm_clip_rect *pbox = sarea_priv->boxes;
470 int i;
471 RING_LOCALS;
472 DRM_DEBUG("\n");
474 #if R128_PERFORMANCE_BOXES
475 /* Do some trivial performance monitoring...
477 r128_cce_performance_boxes(dev_priv);
478 #endif
480 for (i = 0; i < nbox; i++) {
481 int x = pbox[i].x1;
482 int y = pbox[i].y1;
483 int w = pbox[i].x2 - x;
484 int h = pbox[i].y2 - y;
486 BEGIN_RING(7);
488 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
489 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
490 R128_GMC_DST_PITCH_OFFSET_CNTL |
491 R128_GMC_BRUSH_NONE |
492 (dev_priv->color_fmt << 8) |
493 R128_GMC_SRC_DATATYPE_COLOR |
494 R128_ROP3_S |
495 R128_DP_SRC_SOURCE_MEMORY |
496 R128_GMC_CLR_CMP_CNTL_DIS |
497 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
499 /* Make this work even if front & back are flipped:
501 if (dev_priv->current_page == 0) {
502 OUT_RING(dev_priv->back_pitch_offset_c);
503 OUT_RING(dev_priv->front_pitch_offset_c);
504 } else {
505 OUT_RING(dev_priv->front_pitch_offset_c);
506 OUT_RING(dev_priv->back_pitch_offset_c);
509 OUT_RING((x << 16) | y);
510 OUT_RING((x << 16) | y);
511 OUT_RING((w << 16) | h);
513 ADVANCE_RING();
516 /* Increment the frame counter. The client-side 3D driver must
517 * throttle the framerate by waiting for this value before
518 * performing the swapbuffer ioctl.
520 dev_priv->sarea_priv->last_frame++;
522 BEGIN_RING(2);
524 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
525 OUT_RING(dev_priv->sarea_priv->last_frame);
527 ADVANCE_RING();
530 static void r128_cce_dispatch_flip(struct drm_device *dev)
532 drm_r128_private_t *dev_priv = dev->dev_private;
533 RING_LOCALS;
534 DRM_DEBUG("page=%d pfCurrentPage=%d\n",
535 dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
537 #if R128_PERFORMANCE_BOXES
538 /* Do some trivial performance monitoring...
540 r128_cce_performance_boxes(dev_priv);
541 #endif
543 BEGIN_RING(4);
545 R128_WAIT_UNTIL_PAGE_FLIPPED();
546 OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
548 if (dev_priv->current_page == 0)
549 OUT_RING(dev_priv->back_offset);
550 else
551 OUT_RING(dev_priv->front_offset);
553 ADVANCE_RING();
555 /* Increment the frame counter. The client-side 3D driver must
556 * throttle the framerate by waiting for this value before
557 * performing the swapbuffer ioctl.
559 dev_priv->sarea_priv->last_frame++;
560 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
561 1 - dev_priv->current_page;
563 BEGIN_RING(2);
565 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
566 OUT_RING(dev_priv->sarea_priv->last_frame);
568 ADVANCE_RING();
571 static void r128_cce_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
573 drm_r128_private_t *dev_priv = dev->dev_private;
574 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
575 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
576 int format = sarea_priv->vc_format;
577 int offset = buf->bus_address;
578 int size = buf->used;
579 int prim = buf_priv->prim;
580 int i = 0;
581 RING_LOCALS;
582 DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
584 if (0)
585 r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
587 if (buf->used) {
588 buf_priv->dispatched = 1;
590 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
591 r128_emit_state(dev_priv);
593 do {
594 /* Emit the next set of up to three cliprects */
595 if (i < sarea_priv->nbox) {
596 r128_emit_clip_rects(dev_priv,
597 &sarea_priv->boxes[i],
598 sarea_priv->nbox - i);
601 /* Emit the vertex buffer rendering commands */
602 BEGIN_RING(5);
604 OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
605 OUT_RING(offset);
606 OUT_RING(size);
607 OUT_RING(format);
608 OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
609 (size << R128_CCE_VC_CNTL_NUM_SHIFT));
611 ADVANCE_RING();
613 i += 3;
614 } while (i < sarea_priv->nbox);
617 if (buf_priv->discard) {
618 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
620 /* Emit the vertex buffer age */
621 BEGIN_RING(2);
623 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
624 OUT_RING(buf_priv->age);
626 ADVANCE_RING();
628 buf->pending = 1;
629 buf->used = 0;
630 /* FIXME: Check dispatched field */
631 buf_priv->dispatched = 0;
634 dev_priv->sarea_priv->last_dispatch++;
636 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
637 sarea_priv->nbox = 0;
640 static void r128_cce_dispatch_indirect(struct drm_device *dev,
641 struct drm_buf *buf, int start, int end)
643 drm_r128_private_t *dev_priv = dev->dev_private;
644 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
645 RING_LOCALS;
646 DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
648 if (start != end) {
649 int offset = buf->bus_address + start;
650 int dwords = (end - start + 3) / sizeof(u32);
652 /* Indirect buffer data must be an even number of
653 * dwords, so if we've been given an odd number we must
654 * pad the data with a Type-2 CCE packet.
656 if (dwords & 1) {
657 u32 *data = (u32 *)
658 ((char *)dev->agp_buffer_map->handle
659 + buf->offset + start);
660 data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
663 buf_priv->dispatched = 1;
665 /* Fire off the indirect buffer */
666 BEGIN_RING(3);
668 OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
669 OUT_RING(offset);
670 OUT_RING(dwords);
672 ADVANCE_RING();
675 if (buf_priv->discard) {
676 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
678 /* Emit the indirect buffer age */
679 BEGIN_RING(2);
681 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
682 OUT_RING(buf_priv->age);
684 ADVANCE_RING();
686 buf->pending = 1;
687 buf->used = 0;
688 /* FIXME: Check dispatched field */
689 buf_priv->dispatched = 0;
692 dev_priv->sarea_priv->last_dispatch++;
695 static void r128_cce_dispatch_indices(struct drm_device *dev,
696 struct drm_buf *buf,
697 int start, int end, int count)
699 drm_r128_private_t *dev_priv = dev->dev_private;
700 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
701 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
702 int format = sarea_priv->vc_format;
703 int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
704 int prim = buf_priv->prim;
705 u32 *data;
706 int dwords;
707 int i = 0;
708 RING_LOCALS;
709 DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
711 if (0)
712 r128_print_dirty("dispatch_indices", sarea_priv->dirty);
714 if (start != end) {
715 buf_priv->dispatched = 1;
717 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
718 r128_emit_state(dev_priv);
720 dwords = (end - start + 3) / sizeof(u32);
722 data = (u32 *) ((char *)dev->agp_buffer_map->handle
723 + buf->offset + start);
725 data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
726 dwords - 2));
728 data[1] = cpu_to_le32(offset);
729 data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
730 data[3] = cpu_to_le32(format);
731 data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
732 (count << 16)));
734 if (count & 0x1) {
735 #ifdef __LITTLE_ENDIAN
736 data[dwords - 1] &= 0x0000ffff;
737 #else
738 data[dwords - 1] &= 0xffff0000;
739 #endif
742 do {
743 /* Emit the next set of up to three cliprects */
744 if (i < sarea_priv->nbox) {
745 r128_emit_clip_rects(dev_priv,
746 &sarea_priv->boxes[i],
747 sarea_priv->nbox - i);
750 r128_cce_dispatch_indirect(dev, buf, start, end);
752 i += 3;
753 } while (i < sarea_priv->nbox);
756 if (buf_priv->discard) {
757 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
759 /* Emit the vertex buffer age */
760 BEGIN_RING(2);
762 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
763 OUT_RING(buf_priv->age);
765 ADVANCE_RING();
767 buf->pending = 1;
768 /* FIXME: Check dispatched field */
769 buf_priv->dispatched = 0;
772 dev_priv->sarea_priv->last_dispatch++;
774 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
775 sarea_priv->nbox = 0;
778 static int r128_cce_dispatch_blit(struct drm_device *dev,
779 struct drm_file *file_priv,
780 drm_r128_blit_t *blit)
782 drm_r128_private_t *dev_priv = dev->dev_private;
783 struct drm_device_dma *dma = dev->dma;
784 struct drm_buf *buf;
785 drm_r128_buf_priv_t *buf_priv;
786 u32 *data;
787 int dword_shift, dwords;
788 RING_LOCALS;
789 DRM_DEBUG("\n");
791 /* The compiler won't optimize away a division by a variable,
792 * even if the only legal values are powers of two. Thus, we'll
793 * use a shift instead.
795 switch (blit->format) {
796 case R128_DATATYPE_ARGB8888:
797 dword_shift = 0;
798 break;
799 case R128_DATATYPE_ARGB1555:
800 case R128_DATATYPE_RGB565:
801 case R128_DATATYPE_ARGB4444:
802 case R128_DATATYPE_YVYU422:
803 case R128_DATATYPE_VYUY422:
804 dword_shift = 1;
805 break;
806 case R128_DATATYPE_CI8:
807 case R128_DATATYPE_RGB8:
808 dword_shift = 2;
809 break;
810 default:
811 DRM_ERROR("invalid blit format %d\n", blit->format);
812 return -EINVAL;
815 /* Flush the pixel cache, and mark the contents as Read Invalid.
816 * This ensures no pixel data gets mixed up with the texture
817 * data from the host data blit, otherwise part of the texture
818 * image may be corrupted.
820 BEGIN_RING(2);
822 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
823 OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
825 ADVANCE_RING();
827 /* Dispatch the indirect buffer.
829 buf = dma->buflist[blit->idx];
830 buf_priv = buf->dev_private;
832 if (buf->file_priv != file_priv) {
833 DRM_ERROR("process %d using buffer owned by %p\n",
834 task_pid_nr(current), buf->file_priv);
835 return -EINVAL;
837 if (buf->pending) {
838 DRM_ERROR("sending pending buffer %d\n", blit->idx);
839 return -EINVAL;
842 buf_priv->discard = 1;
844 dwords = (blit->width * blit->height) >> dword_shift;
846 data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
848 data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
849 data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
850 R128_GMC_BRUSH_NONE |
851 (blit->format << 8) |
852 R128_GMC_SRC_DATATYPE_COLOR |
853 R128_ROP3_S |
854 R128_DP_SRC_SOURCE_HOST_DATA |
855 R128_GMC_CLR_CMP_CNTL_DIS |
856 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
858 data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
859 data[3] = cpu_to_le32(0xffffffff);
860 data[4] = cpu_to_le32(0xffffffff);
861 data[5] = cpu_to_le32((blit->y << 16) | blit->x);
862 data[6] = cpu_to_le32((blit->height << 16) | blit->width);
863 data[7] = cpu_to_le32(dwords);
865 buf->used = (dwords + 8) * sizeof(u32);
867 r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
869 /* Flush the pixel cache after the blit completes. This ensures
870 * the texture data is written out to memory before rendering
871 * continues.
873 BEGIN_RING(2);
875 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
876 OUT_RING(R128_PC_FLUSH_GUI);
878 ADVANCE_RING();
880 return 0;
883 /* ================================================================
884 * Tiled depth buffer management
886 * FIXME: These should all set the destination write mask for when we
887 * have hardware stencil support.
890 static int r128_cce_dispatch_write_span(struct drm_device *dev,
891 drm_r128_depth_t *depth)
893 drm_r128_private_t *dev_priv = dev->dev_private;
894 int count, x, y;
895 u32 *buffer;
896 u8 *mask;
897 int i, buffer_size, mask_size;
898 RING_LOCALS;
899 DRM_DEBUG("\n");
901 count = depth->n;
902 if (count > 4096 || count <= 0)
903 return -EMSGSIZE;
905 if (copy_from_user(&x, depth->x, sizeof(x)))
906 return -EFAULT;
907 if (copy_from_user(&y, depth->y, sizeof(y)))
908 return -EFAULT;
910 buffer_size = depth->n * sizeof(u32);
911 buffer = memdup_user(depth->buffer, buffer_size);
912 if (IS_ERR(buffer))
913 return PTR_ERR(buffer);
915 mask_size = depth->n;
916 if (depth->mask) {
917 mask = memdup_user(depth->mask, mask_size);
918 if (IS_ERR(mask)) {
919 kfree(buffer);
920 return PTR_ERR(mask);
923 for (i = 0; i < count; i++, x++) {
924 if (mask[i]) {
925 BEGIN_RING(6);
927 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
928 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
929 R128_GMC_BRUSH_SOLID_COLOR |
930 (dev_priv->depth_fmt << 8) |
931 R128_GMC_SRC_DATATYPE_COLOR |
932 R128_ROP3_P |
933 R128_GMC_CLR_CMP_CNTL_DIS |
934 R128_GMC_WR_MSK_DIS);
936 OUT_RING(dev_priv->depth_pitch_offset_c);
937 OUT_RING(buffer[i]);
939 OUT_RING((x << 16) | y);
940 OUT_RING((1 << 16) | 1);
942 ADVANCE_RING();
946 kfree(mask);
947 } else {
948 for (i = 0; i < count; i++, x++) {
949 BEGIN_RING(6);
951 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
952 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
953 R128_GMC_BRUSH_SOLID_COLOR |
954 (dev_priv->depth_fmt << 8) |
955 R128_GMC_SRC_DATATYPE_COLOR |
956 R128_ROP3_P |
957 R128_GMC_CLR_CMP_CNTL_DIS |
958 R128_GMC_WR_MSK_DIS);
960 OUT_RING(dev_priv->depth_pitch_offset_c);
961 OUT_RING(buffer[i]);
963 OUT_RING((x << 16) | y);
964 OUT_RING((1 << 16) | 1);
966 ADVANCE_RING();
970 kfree(buffer);
972 return 0;
975 static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
976 drm_r128_depth_t *depth)
978 drm_r128_private_t *dev_priv = dev->dev_private;
979 int count, *x, *y;
980 u32 *buffer;
981 u8 *mask;
982 int i, xbuf_size, ybuf_size, buffer_size, mask_size;
983 RING_LOCALS;
984 DRM_DEBUG("\n");
986 count = depth->n;
987 if (count > 4096 || count <= 0)
988 return -EMSGSIZE;
990 xbuf_size = count * sizeof(*x);
991 ybuf_size = count * sizeof(*y);
992 x = memdup_user(depth->x, xbuf_size);
993 if (IS_ERR(x))
994 return PTR_ERR(x);
995 y = memdup_user(depth->y, ybuf_size);
996 if (IS_ERR(y)) {
997 kfree(x);
998 return PTR_ERR(y);
1000 buffer_size = depth->n * sizeof(u32);
1001 buffer = memdup_user(depth->buffer, buffer_size);
1002 if (IS_ERR(buffer)) {
1003 kfree(x);
1004 kfree(y);
1005 return PTR_ERR(buffer);
1008 if (depth->mask) {
1009 mask_size = depth->n;
1010 mask = memdup_user(depth->mask, mask_size);
1011 if (IS_ERR(mask)) {
1012 kfree(x);
1013 kfree(y);
1014 kfree(buffer);
1015 return PTR_ERR(mask);
1018 for (i = 0; i < count; i++) {
1019 if (mask[i]) {
1020 BEGIN_RING(6);
1022 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1023 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1024 R128_GMC_BRUSH_SOLID_COLOR |
1025 (dev_priv->depth_fmt << 8) |
1026 R128_GMC_SRC_DATATYPE_COLOR |
1027 R128_ROP3_P |
1028 R128_GMC_CLR_CMP_CNTL_DIS |
1029 R128_GMC_WR_MSK_DIS);
1031 OUT_RING(dev_priv->depth_pitch_offset_c);
1032 OUT_RING(buffer[i]);
1034 OUT_RING((x[i] << 16) | y[i]);
1035 OUT_RING((1 << 16) | 1);
1037 ADVANCE_RING();
1041 kfree(mask);
1042 } else {
1043 for (i = 0; i < count; i++) {
1044 BEGIN_RING(6);
1046 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1047 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1048 R128_GMC_BRUSH_SOLID_COLOR |
1049 (dev_priv->depth_fmt << 8) |
1050 R128_GMC_SRC_DATATYPE_COLOR |
1051 R128_ROP3_P |
1052 R128_GMC_CLR_CMP_CNTL_DIS |
1053 R128_GMC_WR_MSK_DIS);
1055 OUT_RING(dev_priv->depth_pitch_offset_c);
1056 OUT_RING(buffer[i]);
1058 OUT_RING((x[i] << 16) | y[i]);
1059 OUT_RING((1 << 16) | 1);
1061 ADVANCE_RING();
1065 kfree(x);
1066 kfree(y);
1067 kfree(buffer);
1069 return 0;
1072 static int r128_cce_dispatch_read_span(struct drm_device *dev,
1073 drm_r128_depth_t *depth)
1075 drm_r128_private_t *dev_priv = dev->dev_private;
1076 int count, x, y;
1077 RING_LOCALS;
1078 DRM_DEBUG("\n");
1080 count = depth->n;
1081 if (count > 4096 || count <= 0)
1082 return -EMSGSIZE;
1084 if (copy_from_user(&x, depth->x, sizeof(x)))
1085 return -EFAULT;
1086 if (copy_from_user(&y, depth->y, sizeof(y)))
1087 return -EFAULT;
1089 BEGIN_RING(7);
1091 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1092 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1093 R128_GMC_DST_PITCH_OFFSET_CNTL |
1094 R128_GMC_BRUSH_NONE |
1095 (dev_priv->depth_fmt << 8) |
1096 R128_GMC_SRC_DATATYPE_COLOR |
1097 R128_ROP3_S |
1098 R128_DP_SRC_SOURCE_MEMORY |
1099 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1101 OUT_RING(dev_priv->depth_pitch_offset_c);
1102 OUT_RING(dev_priv->span_pitch_offset_c);
1104 OUT_RING((x << 16) | y);
1105 OUT_RING((0 << 16) | 0);
1106 OUT_RING((count << 16) | 1);
1108 ADVANCE_RING();
1110 return 0;
1113 static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
1114 drm_r128_depth_t *depth)
1116 drm_r128_private_t *dev_priv = dev->dev_private;
1117 int count, *x, *y;
1118 int i, xbuf_size, ybuf_size;
1119 RING_LOCALS;
1120 DRM_DEBUG("\n");
1122 count = depth->n;
1123 if (count > 4096 || count <= 0)
1124 return -EMSGSIZE;
1126 if (count > dev_priv->depth_pitch)
1127 count = dev_priv->depth_pitch;
1129 xbuf_size = count * sizeof(*x);
1130 ybuf_size = count * sizeof(*y);
1131 x = kmalloc(xbuf_size, GFP_KERNEL);
1132 if (x == NULL)
1133 return -ENOMEM;
1134 y = kmalloc(ybuf_size, GFP_KERNEL);
1135 if (y == NULL) {
1136 kfree(x);
1137 return -ENOMEM;
1139 if (copy_from_user(x, depth->x, xbuf_size)) {
1140 kfree(x);
1141 kfree(y);
1142 return -EFAULT;
1144 if (copy_from_user(y, depth->y, ybuf_size)) {
1145 kfree(x);
1146 kfree(y);
1147 return -EFAULT;
1150 for (i = 0; i < count; i++) {
1151 BEGIN_RING(7);
1153 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1154 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1155 R128_GMC_DST_PITCH_OFFSET_CNTL |
1156 R128_GMC_BRUSH_NONE |
1157 (dev_priv->depth_fmt << 8) |
1158 R128_GMC_SRC_DATATYPE_COLOR |
1159 R128_ROP3_S |
1160 R128_DP_SRC_SOURCE_MEMORY |
1161 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1163 OUT_RING(dev_priv->depth_pitch_offset_c);
1164 OUT_RING(dev_priv->span_pitch_offset_c);
1166 OUT_RING((x[i] << 16) | y[i]);
1167 OUT_RING((i << 16) | 0);
1168 OUT_RING((1 << 16) | 1);
1170 ADVANCE_RING();
1173 kfree(x);
1174 kfree(y);
1176 return 0;
1179 /* ================================================================
1180 * Polygon stipple
1183 static void r128_cce_dispatch_stipple(struct drm_device *dev, u32 *stipple)
1185 drm_r128_private_t *dev_priv = dev->dev_private;
1186 int i;
1187 RING_LOCALS;
1188 DRM_DEBUG("\n");
1190 BEGIN_RING(33);
1192 OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
1193 for (i = 0; i < 32; i++)
1194 OUT_RING(stipple[i]);
1196 ADVANCE_RING();
1199 /* ================================================================
1200 * IOCTL functions
1203 static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
1205 drm_r128_private_t *dev_priv = dev->dev_private;
1206 drm_r128_sarea_t *sarea_priv;
1207 drm_r128_clear_t *clear = data;
1208 DRM_DEBUG("\n");
1210 LOCK_TEST_WITH_RETURN(dev, file_priv);
1212 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1214 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1216 sarea_priv = dev_priv->sarea_priv;
1218 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1219 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1221 r128_cce_dispatch_clear(dev, clear);
1222 COMMIT_RING();
1224 /* Make sure we restore the 3D state next time.
1226 dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
1228 return 0;
1231 static int r128_do_init_pageflip(struct drm_device *dev)
1233 drm_r128_private_t *dev_priv = dev->dev_private;
1234 DRM_DEBUG("\n");
1236 dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
1237 dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
1239 R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
1240 R128_WRITE(R128_CRTC_OFFSET_CNTL,
1241 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
1243 dev_priv->page_flipping = 1;
1244 dev_priv->current_page = 0;
1245 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1247 return 0;
1250 static int r128_do_cleanup_pageflip(struct drm_device *dev)
1252 drm_r128_private_t *dev_priv = dev->dev_private;
1253 DRM_DEBUG("\n");
1255 R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
1256 R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
1258 if (dev_priv->current_page != 0) {
1259 r128_cce_dispatch_flip(dev);
1260 COMMIT_RING();
1263 dev_priv->page_flipping = 0;
1264 return 0;
1267 /* Swapping and flipping are different operations, need different ioctls.
1268 * They can & should be intermixed to support multiple 3d windows.
1271 static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
1273 drm_r128_private_t *dev_priv = dev->dev_private;
1274 DRM_DEBUG("\n");
1276 LOCK_TEST_WITH_RETURN(dev, file_priv);
1278 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1280 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1282 if (!dev_priv->page_flipping)
1283 r128_do_init_pageflip(dev);
1285 r128_cce_dispatch_flip(dev);
1287 COMMIT_RING();
1288 return 0;
1291 static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1293 drm_r128_private_t *dev_priv = dev->dev_private;
1294 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1295 DRM_DEBUG("\n");
1297 LOCK_TEST_WITH_RETURN(dev, file_priv);
1299 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1301 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1303 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1304 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1306 r128_cce_dispatch_swap(dev);
1307 dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
1308 R128_UPLOAD_MASKS);
1310 COMMIT_RING();
1311 return 0;
1314 static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
1316 drm_r128_private_t *dev_priv = dev->dev_private;
1317 struct drm_device_dma *dma = dev->dma;
1318 struct drm_buf *buf;
1319 drm_r128_buf_priv_t *buf_priv;
1320 drm_r128_vertex_t *vertex = data;
1322 LOCK_TEST_WITH_RETURN(dev, file_priv);
1324 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1326 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
1327 task_pid_nr(current), vertex->idx, vertex->count, vertex->discard);
1329 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
1330 DRM_ERROR("buffer index %d (of %d max)\n",
1331 vertex->idx, dma->buf_count - 1);
1332 return -EINVAL;
1334 if (vertex->prim < 0 ||
1335 vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1336 DRM_ERROR("buffer prim %d\n", vertex->prim);
1337 return -EINVAL;
1340 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1341 VB_AGE_TEST_WITH_RETURN(dev_priv);
1343 buf = dma->buflist[vertex->idx];
1344 buf_priv = buf->dev_private;
1346 if (buf->file_priv != file_priv) {
1347 DRM_ERROR("process %d using buffer owned by %p\n",
1348 task_pid_nr(current), buf->file_priv);
1349 return -EINVAL;
1351 if (buf->pending) {
1352 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
1353 return -EINVAL;
1356 buf->used = vertex->count;
1357 buf_priv->prim = vertex->prim;
1358 buf_priv->discard = vertex->discard;
1360 r128_cce_dispatch_vertex(dev, buf);
1362 COMMIT_RING();
1363 return 0;
1366 static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
1368 drm_r128_private_t *dev_priv = dev->dev_private;
1369 struct drm_device_dma *dma = dev->dma;
1370 struct drm_buf *buf;
1371 drm_r128_buf_priv_t *buf_priv;
1372 drm_r128_indices_t *elts = data;
1373 int count;
1375 LOCK_TEST_WITH_RETURN(dev, file_priv);
1377 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1379 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", task_pid_nr(current),
1380 elts->idx, elts->start, elts->end, elts->discard);
1382 if (elts->idx < 0 || elts->idx >= dma->buf_count) {
1383 DRM_ERROR("buffer index %d (of %d max)\n",
1384 elts->idx, dma->buf_count - 1);
1385 return -EINVAL;
1387 if (elts->prim < 0 ||
1388 elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1389 DRM_ERROR("buffer prim %d\n", elts->prim);
1390 return -EINVAL;
1393 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1394 VB_AGE_TEST_WITH_RETURN(dev_priv);
1396 buf = dma->buflist[elts->idx];
1397 buf_priv = buf->dev_private;
1399 if (buf->file_priv != file_priv) {
1400 DRM_ERROR("process %d using buffer owned by %p\n",
1401 task_pid_nr(current), buf->file_priv);
1402 return -EINVAL;
1404 if (buf->pending) {
1405 DRM_ERROR("sending pending buffer %d\n", elts->idx);
1406 return -EINVAL;
1409 count = (elts->end - elts->start) / sizeof(u16);
1410 elts->start -= R128_INDEX_PRIM_OFFSET;
1412 if (elts->start & 0x7) {
1413 DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
1414 return -EINVAL;
1416 if (elts->start < buf->used) {
1417 DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
1418 return -EINVAL;
1421 buf->used = elts->end;
1422 buf_priv->prim = elts->prim;
1423 buf_priv->discard = elts->discard;
1425 r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
1427 COMMIT_RING();
1428 return 0;
1431 static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
1433 struct drm_device_dma *dma = dev->dma;
1434 drm_r128_private_t *dev_priv = dev->dev_private;
1435 drm_r128_blit_t *blit = data;
1436 int ret;
1438 LOCK_TEST_WITH_RETURN(dev, file_priv);
1440 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1442 DRM_DEBUG("pid=%d index=%d\n", task_pid_nr(current), blit->idx);
1444 if (blit->idx < 0 || blit->idx >= dma->buf_count) {
1445 DRM_ERROR("buffer index %d (of %d max)\n",
1446 blit->idx, dma->buf_count - 1);
1447 return -EINVAL;
1450 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1451 VB_AGE_TEST_WITH_RETURN(dev_priv);
1453 ret = r128_cce_dispatch_blit(dev, file_priv, blit);
1455 COMMIT_RING();
1456 return ret;
1459 int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
1461 drm_r128_private_t *dev_priv = dev->dev_private;
1462 drm_r128_depth_t *depth = data;
1463 int ret;
1465 LOCK_TEST_WITH_RETURN(dev, file_priv);
1467 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1469 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1471 ret = -EINVAL;
1472 switch (depth->func) {
1473 case R128_WRITE_SPAN:
1474 ret = r128_cce_dispatch_write_span(dev, depth);
1475 break;
1476 case R128_WRITE_PIXELS:
1477 ret = r128_cce_dispatch_write_pixels(dev, depth);
1478 break;
1479 case R128_READ_SPAN:
1480 ret = r128_cce_dispatch_read_span(dev, depth);
1481 break;
1482 case R128_READ_PIXELS:
1483 ret = r128_cce_dispatch_read_pixels(dev, depth);
1484 break;
1487 COMMIT_RING();
1488 return ret;
1491 int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
1493 drm_r128_private_t *dev_priv = dev->dev_private;
1494 drm_r128_stipple_t *stipple = data;
1495 u32 mask[32];
1497 LOCK_TEST_WITH_RETURN(dev, file_priv);
1499 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1501 if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
1502 return -EFAULT;
1504 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1506 r128_cce_dispatch_stipple(dev, mask);
1508 COMMIT_RING();
1509 return 0;
1512 static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
1514 drm_r128_private_t *dev_priv = dev->dev_private;
1515 struct drm_device_dma *dma = dev->dma;
1516 struct drm_buf *buf;
1517 drm_r128_buf_priv_t *buf_priv;
1518 drm_r128_indirect_t *indirect = data;
1519 #if 0
1520 RING_LOCALS;
1521 #endif
1523 LOCK_TEST_WITH_RETURN(dev, file_priv);
1525 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1527 DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
1528 indirect->idx, indirect->start, indirect->end,
1529 indirect->discard);
1531 if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
1532 DRM_ERROR("buffer index %d (of %d max)\n",
1533 indirect->idx, dma->buf_count - 1);
1534 return -EINVAL;
1537 buf = dma->buflist[indirect->idx];
1538 buf_priv = buf->dev_private;
1540 if (buf->file_priv != file_priv) {
1541 DRM_ERROR("process %d using buffer owned by %p\n",
1542 task_pid_nr(current), buf->file_priv);
1543 return -EINVAL;
1545 if (buf->pending) {
1546 DRM_ERROR("sending pending buffer %d\n", indirect->idx);
1547 return -EINVAL;
1550 if (indirect->start < buf->used) {
1551 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
1552 indirect->start, buf->used);
1553 return -EINVAL;
1556 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1557 VB_AGE_TEST_WITH_RETURN(dev_priv);
1559 buf->used = indirect->end;
1560 buf_priv->discard = indirect->discard;
1562 #if 0
1563 /* Wait for the 3D stream to idle before the indirect buffer
1564 * containing 2D acceleration commands is processed.
1566 BEGIN_RING(2);
1567 RADEON_WAIT_UNTIL_3D_IDLE();
1568 ADVANCE_RING();
1569 #endif
1571 /* Dispatch the indirect buffer full of commands from the
1572 * X server. This is insecure and is thus only available to
1573 * privileged clients.
1575 r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
1577 COMMIT_RING();
1578 return 0;
1581 int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
1583 drm_r128_private_t *dev_priv = dev->dev_private;
1584 drm_r128_getparam_t *param = data;
1585 int value;
1587 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1589 DRM_DEBUG("pid=%d\n", task_pid_nr(current));
1591 switch (param->param) {
1592 case R128_PARAM_IRQ_NR:
1593 value = dev->pdev->irq;
1594 break;
1595 default:
1596 return -EINVAL;
1599 if (copy_to_user(param->value, &value, sizeof(int))) {
1600 DRM_ERROR("copy_to_user\n");
1601 return -EFAULT;
1604 return 0;
1607 void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1609 if (dev->dev_private) {
1610 drm_r128_private_t *dev_priv = dev->dev_private;
1611 if (dev_priv->page_flipping)
1612 r128_do_cleanup_pageflip(dev);
1615 void r128_driver_lastclose(struct drm_device *dev)
1617 r128_do_cleanup_cce(dev);
1620 const struct drm_ioctl_desc r128_ioctls[] = {
1621 DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1622 DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1623 DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1624 DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1625 DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
1626 DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
1627 DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
1628 DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
1629 DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
1630 DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
1631 DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
1632 DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
1633 DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
1634 DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
1635 DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
1636 DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1637 DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
1640 int r128_max_ioctl = ARRAY_SIZE(r128_ioctls);