[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / drivers / video / cfbfillrect.c
blobe4fc42b013ebdb7f3bab16ef7f33c0d4d7a2573f
1 /*
2 * Generic fillrect for frame buffers with packed pixels of any depth.
4 * Copyright (C) 2000 James Simmons (jsimmons@linux-fbdev.org)
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive for
8 * more details.
10 * NOTES:
12 * The code for depths like 24 that don't have integer number of pixels per
13 * long is broken and needs to be fixed. For now I turned these types of
14 * mode off.
16 * Also need to add code to deal with cards endians that are different than
17 * the native cpu endians. I also need to deal with MSB position in the word.
20 #include <linux/config.h>
21 #include <linux/module.h>
22 #include <linux/string.h>
23 #include <linux/fb.h>
24 #include <asm/types.h>
26 #if BITS_PER_LONG == 32
27 # define FB_WRITEL fb_writel
28 # define FB_READL fb_readl
29 #else
30 # define FB_WRITEL fb_writeq
31 # define FB_READL fb_readq
32 #endif
35 * Compose two values, using a bitmask as decision value
36 * This is equivalent to (a & mask) | (b & ~mask)
39 static inline unsigned long
40 comp(unsigned long a, unsigned long b, unsigned long mask)
42 return ((a ^ b) & mask) ^ b;
46 * Create a pattern with the given pixel's color
49 #if BITS_PER_LONG == 64
50 static inline unsigned long
51 pixel_to_pat( u32 bpp, u32 pixel)
53 switch (bpp) {
54 case 1:
55 return 0xfffffffffffffffful*pixel;
56 case 2:
57 return 0x5555555555555555ul*pixel;
58 case 4:
59 return 0x1111111111111111ul*pixel;
60 case 8:
61 return 0x0101010101010101ul*pixel;
62 case 12:
63 return 0x0001001001001001ul*pixel;
64 case 16:
65 return 0x0001000100010001ul*pixel;
66 case 24:
67 return 0x0000000001000001ul*pixel;
68 case 32:
69 return 0x0000000100000001ul*pixel;
70 default:
71 panic("pixel_to_pat(): unsupported pixelformat\n");
74 #else
75 static inline unsigned long
76 pixel_to_pat( u32 bpp, u32 pixel)
78 switch (bpp) {
79 case 1:
80 return 0xfffffffful*pixel;
81 case 2:
82 return 0x55555555ul*pixel;
83 case 4:
84 return 0x11111111ul*pixel;
85 case 8:
86 return 0x01010101ul*pixel;
87 case 12:
88 return 0x00001001ul*pixel;
89 case 16:
90 return 0x00010001ul*pixel;
91 case 24:
92 return 0x00000001ul*pixel;
93 case 32:
94 return 0x00000001ul*pixel;
95 default:
96 panic("pixel_to_pat(): unsupported pixelformat\n");
99 #endif
102 * Aligned pattern fill using 32/64-bit memory accesses
105 static void
106 bitfill_aligned(unsigned long __iomem *dst, int dst_idx, unsigned long pat, unsigned n, int bits)
108 unsigned long first, last;
110 if (!n)
111 return;
113 first = ~0UL >> dst_idx;
114 last = ~(~0UL >> ((dst_idx+n) % bits));
116 if (dst_idx+n <= bits) {
117 // Single word
118 if (last)
119 first &= last;
120 FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
121 } else {
122 // Multiple destination words
124 // Leading bits
125 if (first!= ~0UL) {
126 FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
127 dst++;
128 n -= bits - dst_idx;
131 // Main chunk
132 n /= bits;
133 while (n >= 8) {
134 FB_WRITEL(pat, dst++);
135 FB_WRITEL(pat, dst++);
136 FB_WRITEL(pat, dst++);
137 FB_WRITEL(pat, dst++);
138 FB_WRITEL(pat, dst++);
139 FB_WRITEL(pat, dst++);
140 FB_WRITEL(pat, dst++);
141 FB_WRITEL(pat, dst++);
142 n -= 8;
144 while (n--)
145 FB_WRITEL(pat, dst++);
147 // Trailing bits
148 if (last)
149 FB_WRITEL(comp(pat, FB_READL(dst), last), dst);
155 * Unaligned generic pattern fill using 32/64-bit memory accesses
156 * The pattern must have been expanded to a full 32/64-bit value
157 * Left/right are the appropriate shifts to convert to the pattern to be
158 * used for the next 32/64-bit word
161 static void
162 bitfill_unaligned(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
163 int left, int right, unsigned n, int bits)
165 unsigned long first, last;
167 if (!n)
168 return;
170 first = ~0UL >> dst_idx;
171 last = ~(~0UL >> ((dst_idx+n) % bits));
173 if (dst_idx+n <= bits) {
174 // Single word
175 if (last)
176 first &= last;
177 FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
178 } else {
179 // Multiple destination words
180 // Leading bits
181 if (first) {
182 FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
183 dst++;
184 pat = pat << left | pat >> right;
185 n -= bits - dst_idx;
188 // Main chunk
189 n /= bits;
190 while (n >= 4) {
191 FB_WRITEL(pat, dst++);
192 pat = pat << left | pat >> right;
193 FB_WRITEL(pat, dst++);
194 pat = pat << left | pat >> right;
195 FB_WRITEL(pat, dst++);
196 pat = pat << left | pat >> right;
197 FB_WRITEL(pat, dst++);
198 pat = pat << left | pat >> right;
199 n -= 4;
201 while (n--) {
202 FB_WRITEL(pat, dst++);
203 pat = pat << left | pat >> right;
206 // Trailing bits
207 if (last)
208 FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
213 * Aligned pattern invert using 32/64-bit memory accesses
215 static void
216 bitfill_aligned_rev(unsigned long __iomem *dst, int dst_idx, unsigned long pat, unsigned n, int bits)
218 unsigned long val = pat, dat;
219 unsigned long first, last;
221 if (!n)
222 return;
224 first = ~0UL >> dst_idx;
225 last = ~(~0UL >> ((dst_idx+n) % bits));
227 if (dst_idx+n <= bits) {
228 // Single word
229 if (last)
230 first &= last;
231 dat = FB_READL(dst);
232 FB_WRITEL(comp(dat ^ val, dat, first), dst);
233 } else {
234 // Multiple destination words
235 // Leading bits
236 if (first!=0UL) {
237 dat = FB_READL(dst);
238 FB_WRITEL(comp(dat ^ val, dat, first), dst);
239 dst++;
240 n -= bits - dst_idx;
243 // Main chunk
244 n /= bits;
245 while (n >= 8) {
246 FB_WRITEL(FB_READL(dst) ^ val, dst);
247 dst++;
248 FB_WRITEL(FB_READL(dst) ^ val, dst);
249 dst++;
250 FB_WRITEL(FB_READL(dst) ^ val, dst);
251 dst++;
252 FB_WRITEL(FB_READL(dst) ^ val, dst);
253 dst++;
254 FB_WRITEL(FB_READL(dst) ^ val, dst);
255 dst++;
256 FB_WRITEL(FB_READL(dst) ^ val, dst);
257 dst++;
258 FB_WRITEL(FB_READL(dst) ^ val, dst);
259 dst++;
260 FB_WRITEL(FB_READL(dst) ^ val, dst);
261 dst++;
262 n -= 8;
264 while (n--) {
265 FB_WRITEL(FB_READL(dst) ^ val, dst);
266 dst++;
268 // Trailing bits
269 if (last) {
270 dat = FB_READL(dst);
271 FB_WRITEL(comp(dat ^ val, dat, last), dst);
278 * Unaligned generic pattern invert using 32/64-bit memory accesses
279 * The pattern must have been expanded to a full 32/64-bit value
280 * Left/right are the appropriate shifts to convert to the pattern to be
281 * used for the next 32/64-bit word
284 static void
285 bitfill_unaligned_rev(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
286 int left, int right, unsigned n, int bits)
288 unsigned long first, last, dat;
290 if (!n)
291 return;
293 first = ~0UL >> dst_idx;
294 last = ~(~0UL >> ((dst_idx+n) % bits));
296 if (dst_idx+n <= bits) {
297 // Single word
298 if (last)
299 first &= last;
300 dat = FB_READL(dst);
301 FB_WRITEL(comp(dat ^ pat, dat, first), dst);
302 } else {
303 // Multiple destination words
305 // Leading bits
306 if (first != 0UL) {
307 dat = FB_READL(dst);
308 FB_WRITEL(comp(dat ^ pat, dat, first), dst);
309 dst++;
310 pat = pat << left | pat >> right;
311 n -= bits - dst_idx;
314 // Main chunk
315 n /= bits;
316 while (n >= 4) {
317 FB_WRITEL(FB_READL(dst) ^ pat, dst);
318 dst++;
319 pat = pat << left | pat >> right;
320 FB_WRITEL(FB_READL(dst) ^ pat, dst);
321 dst++;
322 pat = pat << left | pat >> right;
323 FB_WRITEL(FB_READL(dst) ^ pat, dst);
324 dst++;
325 pat = pat << left | pat >> right;
326 FB_WRITEL(FB_READL(dst) ^ pat, dst);
327 dst++;
328 pat = pat << left | pat >> right;
329 n -= 4;
331 while (n--) {
332 FB_WRITEL(FB_READL(dst) ^ pat, dst);
333 dst++;
334 pat = pat << left | pat >> right;
337 // Trailing bits
338 if (last) {
339 dat = FB_READL(dst);
340 FB_WRITEL(comp(dat ^ pat, dat, last), dst);
345 void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
347 unsigned long x2, y2, vxres, vyres, height, width, pat, fg;
348 int bits = BITS_PER_LONG, bytes = bits >> 3;
349 u32 bpp = p->var.bits_per_pixel;
350 unsigned long __iomem *dst;
351 int dst_idx, left;
353 if (p->state != FBINFO_STATE_RUNNING)
354 return;
356 /* We want rotation but lack hardware to do it for us. */
357 if (!p->fbops->fb_rotate && p->var.rotate) {
360 vxres = p->var.xres_virtual;
361 vyres = p->var.yres_virtual;
363 if (!rect->width || !rect->height ||
364 rect->dx > vxres || rect->dy > vyres)
365 return;
367 /* We could use hardware clipping but on many cards you get around
368 * hardware clipping by writing to framebuffer directly. */
370 x2 = rect->dx + rect->width;
371 y2 = rect->dy + rect->height;
372 x2 = x2 < vxres ? x2 : vxres;
373 y2 = y2 < vyres ? y2 : vyres;
374 width = x2 - rect->dx;
375 height = y2 - rect->dy;
377 if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
378 p->fix.visual == FB_VISUAL_DIRECTCOLOR )
379 fg = ((u32 *) (p->pseudo_palette))[rect->color];
380 else
381 fg = rect->color;
383 pat = pixel_to_pat( bpp, fg);
385 dst = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
386 dst_idx = ((unsigned long)p->screen_base & (bytes - 1))*8;
387 dst_idx += rect->dy*p->fix.line_length*8+rect->dx*bpp;
388 /* FIXME For now we support 1-32 bpp only */
389 left = bits % bpp;
390 if (p->fbops->fb_sync)
391 p->fbops->fb_sync(p);
392 if (!left) {
393 void (*fill_op32)(unsigned long __iomem *dst, int dst_idx,
394 unsigned long pat, unsigned n, int bits) = NULL;
396 switch (rect->rop) {
397 case ROP_XOR:
398 fill_op32 = bitfill_aligned_rev;
399 break;
400 case ROP_COPY:
401 fill_op32 = bitfill_aligned;
402 break;
403 default:
404 printk( KERN_ERR "cfb_fillrect(): unknown rop, defaulting to ROP_COPY\n");
405 fill_op32 = bitfill_aligned;
406 break;
408 while (height--) {
409 dst += dst_idx >> (ffs(bits) - 1);
410 dst_idx &= (bits - 1);
411 fill_op32(dst, dst_idx, pat, width*bpp, bits);
412 dst_idx += p->fix.line_length*8;
414 } else {
415 int right;
416 int r;
417 int rot = (left-dst_idx) % bpp;
418 void (*fill_op)(unsigned long __iomem *dst, int dst_idx,
419 unsigned long pat, int left, int right,
420 unsigned n, int bits) = NULL;
422 /* rotate pattern to correct start position */
423 pat = pat << rot | pat >> (bpp-rot);
425 right = bpp-left;
426 switch (rect->rop) {
427 case ROP_XOR:
428 fill_op = bitfill_unaligned_rev;
429 break;
430 case ROP_COPY:
431 fill_op = bitfill_unaligned;
432 break;
433 default:
434 printk( KERN_ERR "cfb_fillrect(): unknown rop, defaulting to ROP_COPY\n");
435 fill_op = bitfill_unaligned;
436 break;
438 while (height--) {
439 dst += dst_idx >> (ffs(bits) - 1);
440 dst_idx &= (bits - 1);
441 fill_op(dst, dst_idx, pat, left, right,
442 width*bpp, bits);
443 r = (p->fix.line_length*8) % bpp;
444 pat = pat << (bpp-r) | pat >> r;
445 dst_idx += p->fix.line_length*8;
450 EXPORT_SYMBOL(cfb_fillrect);
452 MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
453 MODULE_DESCRIPTION("Generic software accelerated fill rectangle");
454 MODULE_LICENSE("GPL");