x86: cpa: move clflush_cache_range()
[wrt350n-kernel.git] / drivers / char / drm / radeon_irq.c
blob84f5bc36252b92d8cbaf6fc9a5a4a00ee835f463
1 /* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
2 /*
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 * Michel Dänzer <michel@daenzer.net>
33 #include "drmP.h"
34 #include "drm.h"
35 #include "radeon_drm.h"
36 #include "radeon_drv.h"
38 static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv,
39 u32 mask)
41 u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask;
42 if (irqs)
43 RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
44 return irqs;
47 /* Interrupts - Used for device synchronization and flushing in the
48 * following circumstances:
50 * - Exclusive FB access with hw idle:
51 * - Wait for GUI Idle (?) interrupt, then do normal flush.
53 * - Frame throttling, NV_fence:
54 * - Drop marker irq's into command stream ahead of time.
55 * - Wait on irq's with lock *not held*
56 * - Check each for termination condition
58 * - Internally in cp_getbuffer, etc:
59 * - as above, but wait with lock held???
61 * NOTE: These functions are misleadingly named -- the irq's aren't
62 * tied to dma at all, this is just a hangover from dri prehistory.
65 irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
67 struct drm_device *dev = (struct drm_device *) arg;
68 drm_radeon_private_t *dev_priv =
69 (drm_radeon_private_t *) dev->dev_private;
70 u32 stat;
72 /* Only consider the bits we're interested in - others could be used
73 * outside the DRM
75 stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
76 RADEON_CRTC_VBLANK_STAT |
77 RADEON_CRTC2_VBLANK_STAT));
78 if (!stat)
79 return IRQ_NONE;
81 stat &= dev_priv->irq_enable_reg;
83 /* SW interrupt */
84 if (stat & RADEON_SW_INT_TEST) {
85 DRM_WAKEUP(&dev_priv->swi_queue);
88 /* VBLANK interrupt */
89 if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) {
90 int vblank_crtc = dev_priv->vblank_crtc;
92 if ((vblank_crtc &
93 (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
94 (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
95 if (stat & RADEON_CRTC_VBLANK_STAT)
96 atomic_inc(&dev->vbl_received);
97 if (stat & RADEON_CRTC2_VBLANK_STAT)
98 atomic_inc(&dev->vbl_received2);
99 } else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
100 (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
101 ((stat & RADEON_CRTC2_VBLANK_STAT) &&
102 (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
103 atomic_inc(&dev->vbl_received);
105 DRM_WAKEUP(&dev->vbl_queue);
106 drm_vbl_send_signals(dev);
109 return IRQ_HANDLED;
112 static int radeon_emit_irq(struct drm_device * dev)
114 drm_radeon_private_t *dev_priv = dev->dev_private;
115 unsigned int ret;
116 RING_LOCALS;
118 atomic_inc(&dev_priv->swi_emitted);
119 ret = atomic_read(&dev_priv->swi_emitted);
121 BEGIN_RING(4);
122 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
123 OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
124 ADVANCE_RING();
125 COMMIT_RING();
127 return ret;
130 static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
132 drm_radeon_private_t *dev_priv =
133 (drm_radeon_private_t *) dev->dev_private;
134 int ret = 0;
136 if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
137 return 0;
139 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
141 DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
142 RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
144 return ret;
147 static int radeon_driver_vblank_do_wait(struct drm_device * dev,
148 unsigned int *sequence, int crtc)
150 drm_radeon_private_t *dev_priv =
151 (drm_radeon_private_t *) dev->dev_private;
152 unsigned int cur_vblank;
153 int ret = 0;
154 int ack = 0;
155 atomic_t *counter;
156 if (!dev_priv) {
157 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
158 return -EINVAL;
161 if (crtc == DRM_RADEON_VBLANK_CRTC1) {
162 counter = &dev->vbl_received;
163 ack |= RADEON_CRTC_VBLANK_STAT;
164 } else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
165 counter = &dev->vbl_received2;
166 ack |= RADEON_CRTC2_VBLANK_STAT;
167 } else
168 return -EINVAL;
170 radeon_acknowledge_irqs(dev_priv, ack);
172 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
174 /* Assume that the user has missed the current sequence number
175 * by about a day rather than she wants to wait for years
176 * using vertical blanks...
178 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
179 (((cur_vblank = atomic_read(counter))
180 - *sequence) <= (1 << 23)));
182 *sequence = cur_vblank;
184 return ret;
187 int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
189 return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
192 int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
194 return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
197 /* Needs the lock as it touches the ring.
199 int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
201 drm_radeon_private_t *dev_priv = dev->dev_private;
202 drm_radeon_irq_emit_t *emit = data;
203 int result;
205 LOCK_TEST_WITH_RETURN(dev, file_priv);
207 if (!dev_priv) {
208 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
209 return -EINVAL;
212 result = radeon_emit_irq(dev);
214 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
215 DRM_ERROR("copy_to_user\n");
216 return -EFAULT;
219 return 0;
222 /* Doesn't need the hardware lock.
224 int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
226 drm_radeon_private_t *dev_priv = dev->dev_private;
227 drm_radeon_irq_wait_t *irqwait = data;
229 if (!dev_priv) {
230 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
231 return -EINVAL;
234 return radeon_wait_irq(dev, irqwait->irq_seq);
237 static void radeon_enable_interrupt(struct drm_device *dev)
239 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
241 dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
242 if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
243 dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
245 if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
246 dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
248 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
249 dev_priv->irq_enabled = 1;
252 /* drm_dma.h hooks
254 void radeon_driver_irq_preinstall(struct drm_device * dev)
256 drm_radeon_private_t *dev_priv =
257 (drm_radeon_private_t *) dev->dev_private;
259 /* Disable *all* interrupts */
260 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
262 /* Clear bits if they're already high */
263 radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
264 RADEON_CRTC_VBLANK_STAT |
265 RADEON_CRTC2_VBLANK_STAT));
268 void radeon_driver_irq_postinstall(struct drm_device * dev)
270 drm_radeon_private_t *dev_priv =
271 (drm_radeon_private_t *) dev->dev_private;
273 atomic_set(&dev_priv->swi_emitted, 0);
274 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
276 radeon_enable_interrupt(dev);
279 void radeon_driver_irq_uninstall(struct drm_device * dev)
281 drm_radeon_private_t *dev_priv =
282 (drm_radeon_private_t *) dev->dev_private;
283 if (!dev_priv)
284 return;
286 dev_priv->irq_enabled = 0;
288 /* Disable *all* interrupts */
289 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
293 int radeon_vblank_crtc_get(struct drm_device *dev)
295 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
296 u32 flag;
297 u32 value;
299 flag = RADEON_READ(RADEON_GEN_INT_CNTL);
300 value = 0;
302 if (flag & RADEON_CRTC_VBLANK_MASK)
303 value |= DRM_RADEON_VBLANK_CRTC1;
305 if (flag & RADEON_CRTC2_VBLANK_MASK)
306 value |= DRM_RADEON_VBLANK_CRTC2;
307 return value;
310 int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
312 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
313 if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
314 DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
315 return -EINVAL;
317 dev_priv->vblank_crtc = (unsigned int)value;
318 radeon_enable_interrupt(dev);
319 return 0;