Changes.
[cairo/gpu.git] / src / gpu / cairo-gpu-impl-context-gl.h
blob647b72f4ba598c35b867f13b28b43d8d9676b373
1 #define L_TERMINATOR ";"
2 #define L_TEXTURE "texture"
3 #define L_PROGRAM_ENV "program.env"
5 #define L_RESULT_COLOR_TEMP "result_color"
6 #define L_SET_RESULT_COLOR(p) (p)[6] = '.'
8 #define L_TMP "tmp"
10 #define L_TEMP_TMP "TEMP tmp"
11 #define L_TEMP_RESULT_COLOR "TEMP result_color"
13 #define L_0 "0"
14 #define L_1 "1"
16 #define L_SCALAR_X ".x"
18 #include "cairo-gpu-impl-programs.h"
20 static inline void
21 _cairo_gpu_context_set_vert_param(cairo_gpu_context_t* ctx, unsigned i, float* v)
23 ctx->gl.ProgramEnvParameter4fvARB(GL_VERTEX_PROGRAM_ARB, i, v);;
26 static inline void
27 _cairo_gpu_context_set_frag_param(cairo_gpu_context_t* ctx, unsigned i, float* v)
29 ctx->gl.ProgramEnvParameter4fvARB(GL_FRAGMENT_PROGRAM_ARB, i, v);
32 static inline unsigned
33 _cairo_gl_context_active_target(cairo_gpu_context_t* ctx)
35 return _cairo_gl_target(ctx->textures[ctx->active_texture].active_target_idx);
38 static inline cairo_gpu_texture_t*
39 _cairo_gpu_context__active_texture(cairo_gpu_context_t* ctx, int idx)
41 return ctx->textures[idx].targets[ctx->textures[idx].active_target_idx].texture;
44 static inline void
45 _cairo_gpu_context__do_set_active_texture(cairo_gpu_context_t* ctx, unsigned i)
47 ctx->gl.ActiveTextureARB(GL_TEXTURE0_ARB + i);
48 ctx->active_texture = i;
51 static inline void
52 _cairo_gl_context_set_active_texture(cairo_gpu_context_t* ctx, unsigned i)
54 if(ctx->active_texture != i)
55 _cairo_gpu_context__do_set_active_texture(ctx, i);
58 static void
59 _cairo_gl_context_set_texture_target(cairo_gpu_context_t* ctx, int i, unsigned target)
61 if(target != ctx->textures[i].enabled_target)
63 _cairo_gl_context_set_active_texture(ctx, i);
64 if(ctx->textures[i].enabled_target)
65 ctx->gl.Disable(ctx->textures[i].enabled_target);
66 if(target)
67 ctx->gl.Enable(target);
68 ctx->textures[i].enabled_target = target;
72 static void
73 _cairo_gpu_context__set_nv_texture_shader(cairo_gpu_context_t* ctx, int i, unsigned nv_op)
75 if(ctx->nv_texture_shader)
77 if(nv_op != ctx->textures[i].nv_texture_shader)
79 _cairo_gl_context_set_active_texture(ctx, i);
80 ctx->gl.TexEnvi(GL_TEXTURE_SHADER_NV, GL_SHADER_OPERATION_NV, nv_op);
81 ctx->textures[i].nv_texture_shader = nv_op;
86 static void
87 _cairo_gpu_context__set_mipmap_hint(cairo_gpu_context_t* ctx)
89 if(_cairo_gpu_context__active_texture(ctx, ctx->active_texture)->filter != GL_NEAREST)
91 ctx->gl.TexParameteri(_cairo_gl_context_active_target(ctx), GL_TEXTURE_MIN_FILTER, GL_NEAREST);
92 ctx->gl.TexParameteri(_cairo_gl_context_active_target(ctx), GL_TEXTURE_MAG_FILTER, GL_NEAREST);
93 _cairo_gpu_context__active_texture(ctx, ctx->active_texture)->filter = GL_NEAREST;
97 // We use "generations" for textures because if we delete a texture which is shared among context and the number is reused, we have a problem that can only be solved this way
98 // NOT guaranteed to make target texture unit active
99 static int
100 _cairo_gl_context_set_texture(cairo_gpu_context_t* ctx, int i, cairo_gpu_texture_t* texture)
102 int j;
103 if(i < 0)
104 i = ctx->preferred_texture;
105 else
106 ctx->preferred_texture = i;
107 j = texture->target_idx;
109 ctx->textures[i].active_target_idx = j;
111 if(ctx->textures[i].targets[j].tex != texture->tex || !texture->id || ctx->textures[i].targets[j].id != texture->id)
113 _cairo_gl_context_set_active_texture(ctx, i);
114 ctx->gl.BindTextureEXT(_cairo_gl_context_active_target(ctx), texture->tex);
115 ctx->textures[i].targets[j].tex = texture->tex;
116 ctx->textures[i].targets[j].id = texture->id;
117 ctx->textures[i].targets[j].texture = texture;
120 j ^= 1;
121 if(ctx->textures[i].targets[j].tex)
123 ctx->gl.BindTextureEXT(_cairo_gl_target(j), 0);
124 ctx->textures[i].targets[j].tex = 0;
125 ctx->textures[i].targets[j].id = 0;
126 ctx->textures[i].targets[j].texture = 0;
130 return i;
133 static void
134 _cairo_gl_context_set_framebuffer(cairo_gpu_context_t* ctx, int mask, GLuint fb, long long id, unsigned buffer, int flip_height)
136 if(!mask)
137 return;
139 if(!ctx->space->fb_blit)
140 mask = FB_DRAW | FB_READ;
142 if(mask == (FB_DRAW | FB_READ))
144 if(ctx->draw_fb != fb || ctx->draw_fb_id != id || ctx->read_fb != fb || ctx->read_fb_id != id)
146 ctx->gl.BindFramebufferEXT(GL_FRAMEBUFFER_EXT, fb);
148 ctx->draw_fb = fb;
149 ctx->draw_fb_id = id;
150 ctx->draw_buffer = 0;
151 ctx->read_fb = fb;
152 ctx->read_fb_id = id;
153 ctx->read_buffer = 0;
155 ctx->draw_flip_height = flip_height;
156 ctx->read_flip_height = flip_height;
158 else if(mask == FB_DRAW)
160 if(ctx->draw_fb != fb || ctx->draw_fb_id != id)
162 ctx->gl.BindFramebufferEXT(GL_DRAW_FRAMEBUFFER, fb);
163 ctx->draw_fb = fb;
164 ctx->draw_fb_id = id;
165 ctx->draw_buffer = 0;
167 ctx->draw_flip_height = flip_height;
169 else if(mask == FB_READ)
171 if(ctx->read_fb != fb || ctx->read_fb_id != id)
173 ctx->gl.BindFramebufferEXT(GL_READ_FRAMEBUFFER, fb);
175 ctx->read_fb = fb;
176 ctx->read_fb_id = id;
177 ctx->read_buffer = 0;
179 ctx->read_flip_height = flip_height;
182 if(buffer)
184 if((mask & FB_DRAW) && ctx->draw_buffer != buffer)
186 ctx->gl.DrawBuffer(buffer);
187 ctx->draw_buffer = buffer;
190 if((mask & FB_READ) && ctx->read_buffer != buffer)
192 ctx->gl.ReadBuffer(buffer);
193 ctx->read_buffer = buffer;
198 static GLenum
199 _cairo_gl_context_set_any_framebuffer(cairo_gpu_context_t* ctx, GLuint fb, long long id, int buffer)
201 if(!ctx->space->fb_blit)
203 _cairo_gl_context_set_framebuffer(ctx, FB_READ | FB_DRAW, fb, id, buffer, -1);
204 return GL_FRAMEBUFFER_EXT;
207 _cairo_gl_context_set_framebuffer(ctx, FB_READ, fb, id, buffer, -1);
208 return GL_READ_FRAMEBUFFER;
211 static inline void
212 _cairo_gl_context_set_texture_extend(cairo_gpu_context_t* ctx, int idx, cairo_extend_t extend, int is_1d)
214 unsigned wrap_s, wrap_t;
215 if(!(ctx->space->extend_mask & (1 << extend)))
216 wrap_s = GL_CLAMP;
217 else
219 switch (extend)
221 case CAIRO_EXTEND_NONE:
222 wrap_s = GL_CLAMP_TO_BORDER;
223 break;
224 case CAIRO_EXTEND_PAD:
225 wrap_s = GL_CLAMP_TO_EDGE;
226 break;
227 case CAIRO_EXTEND_REPEAT:
228 wrap_s = GL_REPEAT;
229 break;
230 case CAIRO_EXTEND_REFLECT:
231 wrap_s = GL_MIRRORED_REPEAT;
232 break;
236 if(is_1d)
237 wrap_t = GL_CLAMP_TO_EDGE;
238 else
239 wrap_t = wrap_s;
241 if(_cairo_gpu_context__active_texture(ctx, idx)->wrap_s != wrap_s)
243 _cairo_gl_context_set_active_texture(ctx, idx);
244 ctx->gl.TexParameteri(_cairo_gl_context_active_target(ctx), GL_TEXTURE_WRAP_S, wrap_s);
245 _cairo_gpu_context__active_texture(ctx, idx)->wrap_s = wrap_s;
248 if(_cairo_gpu_context__active_texture(ctx, idx)->wrap_t != wrap_t)
250 _cairo_gl_context_set_active_texture(ctx, idx);
251 ctx->gl.TexParameteri(_cairo_gl_context_active_target(ctx), GL_TEXTURE_WRAP_T, wrap_t);
252 _cairo_gpu_context__active_texture(ctx, idx)->wrap_t = wrap_t;
256 // TODO: are we sure we don't want to use mipmaps?
257 static inline void
258 _cairo_gl_context_set_texture_filter(cairo_gpu_context_t* ctx, int idx, cairo_filter_t filter)
260 float anisotropy = 1.0;
261 unsigned gl_filter = GL_NEAREST;
263 switch (filter)
265 case CAIRO_FILTER_FAST:
266 case CAIRO_FILTER_NEAREST:
267 break;
268 case CAIRO_FILTER_BEST:
269 anisotropy = ctx->space->max_anisotropy;
270 gl_filter = GL_LINEAR;
271 break;
272 case CAIRO_FILTER_GOOD:
273 // TODO: is 4x anisotropic on GOOD, which it the default, a good idea? Should we do 0? Maybe max?
274 anisotropy = 4.0;
275 if(anisotropy > ctx->space->max_anisotropy)
276 anisotropy = ctx->space->max_anisotropy;
277 gl_filter = GL_LINEAR;
278 break;
279 case CAIRO_FILTER_BILINEAR:
280 gl_filter = GL_LINEAR;
281 break;
282 default:
283 case CAIRO_FILTER_GAUSSIAN:
284 ASSERT_NOT_REACHED;
287 if(_cairo_gpu_context__active_texture(ctx, idx)->filter != gl_filter)
289 _cairo_gl_context_set_active_texture(ctx, idx);
290 ctx->gl.TexParameteri(_cairo_gl_context_active_target(ctx), GL_TEXTURE_MIN_FILTER, gl_filter);
291 ctx->gl.TexParameteri(_cairo_gl_context_active_target(ctx), GL_TEXTURE_MAG_FILTER, gl_filter);
292 _cairo_gpu_context__active_texture(ctx, idx)->filter = gl_filter;
295 #ifndef DEBUG_DISABLE_ANISOTROPY
296 if(_cairo_gpu_context__active_texture(ctx, idx)->anisotropy != anisotropy)
298 _cairo_gl_context_set_active_texture(ctx, idx);
299 ctx->gl.TexParameteri(_cairo_gl_context_active_target(ctx), GL_TEXTURE_MAX_ANISOTROPY_EXT, anisotropy);
300 _cairo_gpu_context__active_texture(ctx, idx)->anisotropy = anisotropy;
302 #endif
305 static inline void
306 _cairo_gl_context_set_texture_matrix(cairo_gpu_context_t* ctx, int idx, cairo_matrix_t * matrix, float* zm)
308 if(!ctx->vertp_enabled)
310 GLdouble m[16];
311 _cairo_gl_context_set_active_texture(ctx, idx);
312 ctx->gl.MatrixMode(GL_TEXTURE);
313 m[0] = matrix->xx;
314 m[1] = matrix->yx;
315 m[2] = zm[0];
316 m[3] = 0;
317 m[4] = matrix->xy;
318 m[5] = matrix->yy;
319 m[6] = zm[1];
320 m[7] = 0;
321 m[8] = 0;
322 m[9] = 0;
323 m[10] = 0;
324 m[11] = 0;
325 m[12] = matrix->x0;
326 m[13] = matrix->y0;
327 m[14] = zm[2];
328 m[15] = 1;
329 ctx->gl.LoadMatrixd(m);
331 else
333 float xv[4] = {matrix->xx, matrix->yx, zm[0], 0};
334 float yv[4] = {matrix->xy, matrix->yy, zm[1], 0};
335 float wv[4] = {matrix->x0, matrix->y0, zm[2], 1};
337 _cairo_gpu_context_set_vert_param(ctx, VERTENV_TEX_MATRIX_X(idx), xv);
338 _cairo_gpu_context_set_vert_param(ctx, VERTENV_TEX_MATRIX_Y(idx), yv);
339 _cairo_gpu_context_set_vert_param(ctx, VERTENV_TEX_MATRIX_W(idx), wv);
343 // note that attributes->matrix must be adjusted by the caller, if necessary!!
344 static void
345 _cairo_gpu_context_set_texture_and_attributes_(cairo_gpu_context_t* ctx, int idx, cairo_gpu_texture_t* texture, cairo_surface_attributes_t* attributes, float* zm)
347 _cairo_gl_context_set_texture(ctx, idx, texture);
348 _cairo_gl_context_set_texture_matrix(ctx, idx, &attributes->matrix, zm);
349 _cairo_gl_context_set_texture_extend(ctx, idx, attributes->extend, (unsigned)attributes->extra & 1);
350 _cairo_gl_context_set_texture_filter(ctx, idx, attributes->filter);
353 static void
354 _cairo_gpu_context_set_texture_and_attributes(cairo_gpu_context_t* ctx, int idx, cairo_gpu_texture_t* texture, cairo_surface_attributes_t* attributes)
356 _cairo_gpu_context_set_texture_and_attributes_(ctx, idx, texture, attributes, _cairo_gpu_vec4_zero);
359 static int
360 cairo_gpu_gl_format(cairo_gpu_space_t* space, pixman_format_code_t f, int* internal_format, int* format)
362 *format = 0;
364 if(PIXMAN_FORMAT_RGB(f))
366 if(PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_BGRA)
368 *format = GL_BGRA;
369 return GL_UNSIGNED_INT_8_8_8_8;
371 else if(format)
373 if(PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_ABGR)
374 *format = GL_RGBA;
375 else if(PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_ARGB)
376 *format = GL_BGRA;
379 if(!*internal_format)
380 *internal_format = PIXMAN_FORMAT_A(f) ? GL_RGBA : GL_RGB;
382 switch(PIXMAN_FORMAT_G(f))
384 case 10:
385 return GL_UNSIGNED_INT_2_10_10_10_REV;
386 case 8:
387 return PIXMAN_FORMAT_BPP(f) == 24 ? GL_UNSIGNED_BYTE : GL_UNSIGNED_INT_8_8_8_8_REV;
388 case 6:
389 return GL_UNSIGNED_SHORT_5_6_5;
390 case 5:
391 return GL_UNSIGNED_SHORT_1_5_5_5_REV;
392 case 4:
393 return GL_UNSIGNED_SHORT_4_4_4_4_REV;
394 case 3:
395 return GL_UNSIGNED_BYTE_2_3_3_REV;
396 default:
397 return 0;
401 if(PIXMAN_FORMAT_A(f))
403 if(PIXMAN_FORMAT_A(f) == 8)
405 if(!*internal_format)
406 *internal_format = space->use_intensity ? GL_INTENSITY : GL_ALPHA;
408 if(*internal_format == GL_INTENSITY)
409 *format = GL_LUMINANCE;
410 else if(*internal_format == GL_ALPHA)
411 *format = GL_ALPHA;
412 else // let Pixman do the conversion. Doing it with GL is impossible because we can't give GL_INTENSITY input...
413 return 0;
414 return GL_UNSIGNED_BYTE;
416 else if(PIXMAN_FORMAT_A(f) == 1)
418 if(!*internal_format)
419 *internal_format = space->use_intensity ? GL_INTENSITY : GL_ALPHA;
420 *format = GL_COLOR_INDEX;
421 return GL_BITMAP;
425 return 0;
428 /* GL_FLOAT appears to be supported in all OpenGL versions */
429 static inline void
430 _cairo_gpu_context__upload_data(cairo_gpu_context_t* ctx, int idx, int internal_format, float* data, int width, int height)
432 ctx->gl.PixelStorei(GL_UNPACK_ALIGNMENT, 1);
433 ctx->gl.PixelStorei(GL_UNPACK_ROW_LENGTH, width);
435 _cairo_gl_context_set_active_texture(ctx, idx);
436 if(internal_format)
438 _cairo_gpu_context__set_mipmap_hint(ctx);
439 ctx->gl.TexImage2D(_cairo_gl_context_active_target(ctx), 0, internal_format, width, height, 0, GL_RGBA, GL_FLOAT, data);
441 else
442 ctx->gl.TexSubImage2D(_cairo_gl_context_active_target(ctx), 0, 0, 0, width, height, GL_RGBA, GL_FLOAT, data);
445 static inline void
446 _cairo_gpu_context_upload_data(cairo_gpu_context_t* ctx, int idx, cairo_gpu_texture_t* texture, float* data, int width, int height)
448 _cairo_gl_context_set_texture(ctx, idx, texture);
449 _cairo_gpu_context__upload_data(ctx, idx, GL_RGBA, data, width, height);
452 static void
453 _cairo_gpu_surface__flip_texture(cairo_gpu_surface_t* surface);
455 static cairo_status_t
456 _cairo_gpu_context__upload_pixels(cairo_gpu_context_t* ctx, cairo_gpu_surface_t* dst, int idx, cairo_image_surface_t * image_surface, int src_x, int src_y, int width, int height, int dst_x, int dst_y)
458 unsigned char* p;
459 unsigned char* buf = 0;
460 int f = image_surface->pixman_format;
461 int internal_format;
462 int format;
463 int i;
464 int type;
465 float i_to_x[2] = {0.0, 1.0};
467 internal_format = (idx >= 0) ? _cairo_gpu_context__active_texture(ctx, idx)->unsized_format : 0;
469 type = cairo_gpu_gl_format(ctx->space, f, &internal_format, &format);
470 if(!type)
471 goto fallback;
473 if(PIXMAN_FORMAT_BPP(f) == 1)
475 p = image_surface->data + src_y * image_surface->stride;
477 ctx->gl.PixelStorei(GL_UNPACK_LSB_FIRST, 1);
478 ctx->gl.PixelStorei(GL_UNPACK_ALIGNMENT, 1);
479 ctx->gl.PixelStorei(GL_UNPACK_ROW_LENGTH, image_surface->stride * 8);
480 ctx->gl.PixelStorei(GL_UNPACK_SKIP_PIXELS, src_x);
482 else
484 int cpp = PIXMAN_FORMAT_BPP(f) >> 3;
485 int stride = image_surface->stride;
486 int stride_pixels = stride / cpp;
487 int stride_red = stride_pixels * cpp;
489 p = image_surface->data + src_x * cpp + src_y * image_surface->stride;
491 if(stride_red == stride)
493 ctx->gl.PixelStorei(GL_UNPACK_ALIGNMENT, 1);
494 ctx->gl.PixelStorei(GL_UNPACK_ROW_LENGTH, stride_pixels);
496 else if(((stride_red + 1) & ~1) == stride)
498 ctx->gl.PixelStorei(GL_UNPACK_ALIGNMENT, 2);
499 ctx->gl.PixelStorei(GL_UNPACK_ROW_LENGTH, stride_pixels);
501 else if(((stride_red + 3) & ~3) == stride)
503 ctx->gl.PixelStorei(GL_UNPACK_ALIGNMENT, 4);
504 ctx->gl.PixelStorei(GL_UNPACK_ROW_LENGTH, stride_pixels);
506 else
508 buf = (unsigned char*)malloc(cpp * width * height);
509 for(i = 0; i < height; ++i)
510 memcpy(buf + i * cpp * width, p + i * image_surface->stride, cpp * width);
511 ctx->gl.PixelStorei(GL_UNPACK_ALIGNMENT, 1);
512 ctx->gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
513 p = buf;
517 if(format == GL_COLOR_INDEX)
519 if(!ctx->init_pixel_map)
521 ctx->gl.PixelMapfv(GL_PIXEL_MAP_I_TO_R, 2, i_to_x);
522 ctx->gl.PixelMapfv(GL_PIXEL_MAP_I_TO_G, 2, i_to_x);
523 ctx->gl.PixelMapfv(GL_PIXEL_MAP_I_TO_B, 2, i_to_x);
524 ctx->gl.PixelMapfv(GL_PIXEL_MAP_I_TO_A, 2, i_to_x);
525 ctx->init_pixel_map = 1;
529 /* For GL_RGB textures, alpha values are always sampled as 1, even if CLAMP_TO_BORDER is set.
530 This causes the device-offset-fractional test to fail.
531 It seems unfixable without fragment programs (and even then it would probably be inefficient to implement)
533 TODO: when fragment programs are supported, maybe conditionally eliminate this
536 // we also do this so that we can steal the alpha channel to draw trapezoid masks
537 if(internal_format == GL_RGB)
538 internal_format = GL_RGBA;
540 if(!PIXMAN_FORMAT_A(f) && PIXMAN_FORMAT_BPP(f) == 32)
542 ctx->gl.PixelTransferf(GL_ALPHA_SCALE, 0.0);
543 ctx->gl.PixelTransferf(GL_ALPHA_BIAS, 1.0);
546 if(idx < 0)
548 ctx->gl.PixelZoom(1.0, ctx->draw_flip_height >= 0 ? -1.0 : 1.0);
549 if(ctx->space->has_window_pos)
551 if(ctx->draw_flip_height >= 0)
552 dst_y = ctx->draw_flip_height - dst_y;
553 ctx->gl.WindowPos2i(dst_x, dst_y);
555 else
556 ctx->gl.RasterPos2i(dst_x, dst_y);
557 ctx->gl.DrawPixels(width, height, format, type, p);
559 else
561 if(dst)
563 if(dst->texture.non_upside_down)
565 // if we want to relax this, we must flip the data if necessary
566 assert(dst_x <= 0 && dst_y <= 0 && width >= (int)dst->width && height >= (int)dst->height);
567 _cairo_gpu_surface__flip_texture(dst);
570 else // texture must be upside-down
571 assert(dst_x < 0 && dst_y < 0);
573 _cairo_gl_context_set_active_texture(ctx, idx);
574 if(dst_x < 0)
576 _cairo_gpu_context__set_mipmap_hint(ctx);
577 if(_cairo_gl_context_active_target(ctx) == GL_TEXTURE_2D && !ctx->space->tex_npot && (!is_pow2(width) || !is_pow2(height)))
579 ctx->gl.TexImage2D(_cairo_gl_context_active_target(ctx), 0, internal_format, higher_pow2(width), higher_pow2(height), 0, format, type, p);
580 ctx->gl.TexSubImage2D(_cairo_gl_context_active_target(ctx), 0, 0, 0, width, height, format, type, p);
582 else
583 ctx->gl.TexImage2D(_cairo_gl_context_active_target(ctx), 0, internal_format, width, height, 0, format, type, p);
585 _cairo_gpu_context__active_texture(ctx, idx)->unsized_format = internal_format;
587 else
588 ctx->gl.TexSubImage2D(_cairo_gl_context_active_target(ctx), 0, dst_x, dst_y, width, height, format, type, p);
591 if(!PIXMAN_FORMAT_A(f) && PIXMAN_FORMAT_BPP(f) == 32)
593 ctx->gl.PixelTransferf(GL_ALPHA_SCALE, 1.0);
594 ctx->gl.PixelTransferf(GL_ALPHA_BIAS, 0.0);
597 if(PIXMAN_FORMAT_BPP(f) == 1)
598 ctx->gl.PixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
600 if(buf)
601 free(buf);
603 if(ctx->gl.GetError())
605 cairo_surface_t* converted;
606 cairo_pattern_t* pattern;
607 cairo_format_t fallback_format;
608 cairo_int_status_t status;
610 while(ctx->gl.GetError())
613 fallback:
614 // TODO: use pixman directly
615 fallback_format = _cairo_format_from_content(image_surface->base.content);
617 if(fallback_format == image_surface->format)
619 fallback_format = CAIRO_FORMAT_ARGB32;
620 if(fallback_format == image_surface->format)
621 return CAIRO_INT_STATUS_UNSUPPORTED;
624 // this does not recurse into the GL backend
625 // TODO: maybe use pixman directly?
626 converted = cairo_image_surface_create(fallback_format, width, height);
627 pattern = cairo_pattern_create_for_surface(&image_surface->base);
628 status = _cairo_surface_composite(CAIRO_OPERATOR_SOURCE, pattern, 0, converted, src_x, src_y, 0, 0, 0, 0, width, height);
630 _cairo_gpu_context__upload_pixels(ctx, dst, idx, (cairo_image_surface_t*)converted, 0, 0, width, height, dst_x, dst_y);
631 cairo_pattern_destroy(pattern);
632 cairo_surface_destroy(converted);
633 return status;
636 return CAIRO_STATUS_SUCCESS;
639 static inline void
640 _cairo_gpu_context_upload_pixels(cairo_gpu_context_t* ctx, cairo_gpu_surface_t* dst, int idx, cairo_gpu_texture_t* texture, cairo_image_surface_t * image_surface, int src_x, int src_y, int width, int height, int dst_x, int dst_y)
642 idx = _cairo_gl_context_set_texture(ctx, idx, texture);
643 _cairo_gpu_context__upload_pixels(ctx, dst, idx, image_surface, src_x, src_y, width, height, dst_x, dst_y);
646 static inline void
647 _cairo_gpu_context__set_prog(cairo_gpu_context_t* ctx, unsigned target, unsigned table, unsigned value, char* (*writef)(cairo_gpu_space_t*, unsigned))
649 unsigned p;
650 cairo_gpu_int_entry_t* entry = _cairo_gpu_space__lookup(ctx->space, table | value);
651 if(entry)
652 p = entry->v;
653 else
655 char* ps;
656 ps = writef(ctx->space, value);
657 ctx->gl.GenProgramsARB(1, &p);
658 ctx->gl.BindProgramARB(target, p);
659 ctx->gl.ProgramStringARB(target, GL_PROGRAM_FORMAT_ASCII_ARB, strlen(ps), ps);
660 free(ps);
663 unsigned todel = 0;
664 cairo_hash_entry_t lookup;
665 lookup.hash = table | value;
667 CAIRO_MUTEX_LOCK(ctx->space->mutex);
668 entry = (cairo_gpu_int_entry_t*)_cairo_hash_table_lookup(ctx->space->table, &lookup);
669 if(entry)
671 todel = p;
672 p = entry->v;
674 else
676 entry = (cairo_gpu_int_entry_t*)malloc(sizeof(cairo_gpu_int_entry_t));
677 entry->base.hash = table | value;
678 entry->v = p;
680 if(_cairo_hash_table_insert(ctx->space->table, &entry->base))
682 // XXX: this will leak the program
683 free(entry);
686 CAIRO_MUTEX_UNLOCK(ctx->space->mutex);
687 if(todel)
688 ctx->gl.DeleteProgramsARB(1, &todel);
692 ctx->gl.BindProgramARB(target, p);
695 static void
696 _cairo_gpu_context__init_vert_fixed(cairo_gpu_context_t* ctx)
698 GLfloat tex_gen_s[] = { 1.0f, 0.0f, 0.0f, 0.0f };
699 GLfloat tex_gen_t[] = { 0.0f, 1.0f, 0.0f, 0.0f };
700 int i;
702 for(i = 0; i < 2; ++i)
704 _cairo_gl_context_set_active_texture(ctx, i);
705 ctx->gl.TexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_OBJECT_LINEAR);
706 ctx->gl.TexGenfv(GL_S, GL_EYE_PLANE, tex_gen_s);
708 ctx->gl.TexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_OBJECT_LINEAR);
709 ctx->gl.TexGenfv(GL_T, GL_EYE_PLANE, tex_gen_t);
712 _cairo_gl_context_set_active_texture(ctx, 0);
714 ctx->init_vert_fixed = 1;
717 static inline void
718 _cairo_gl_context_set_vert_fixed(cairo_gpu_context_t* ctx, unsigned vert)
720 unsigned diff_vert = ctx->vert ^ vert;
721 int i;
723 if(ctx->vertp_enabled)
725 ctx->gl.Disable(GL_VERTEX_PROGRAM_ARB);
726 ctx->vertp_enabled = 0;
729 if(!ctx->init_vert_fixed)
730 _cairo_gpu_context__init_vert_fixed(ctx);
732 for(i = 0; i < MAX_OPERANDS; ++i)
734 if((diff_vert >> (VERT_TEX_SHIFT + i * VERT_TEX_BITS)) & VERT_TEX_MASK)
736 int k = (vert >> (VERT_TEX_SHIFT + i * VERT_TEX_BITS)) & VERT_TEX_MASK;
737 if(k == VERT_TEX_GEN)
739 _cairo_gl_context_set_active_texture(ctx, i);
740 ctx->gl.Enable(GL_TEXTURE_GEN_S);
741 ctx->gl.Enable(GL_TEXTURE_GEN_T);
743 else
745 // need vertex programs for k != i
746 assert(!k || ((k - 1) == i));
747 _cairo_gl_context_set_active_texture(ctx, i);
748 ctx->gl.Disable(GL_TEXTURE_GEN_S);
749 ctx->gl.Disable(GL_TEXTURE_GEN_T);
754 ctx->vert = vert;
757 static inline char*
758 _cairo_gpu_context__write_vert(cairo_gpu_space_t* space, unsigned vert)
760 cairo_gpu_program_builder_t pb;
761 cairo_gpu_program_builder_t* p = &pb;
762 cairo_gpu_string_builder_t* builder = &p->body;
764 memset(&pb, 0, sizeof(pb));
765 p->div_uses = p->dp2a_uses = space->has_gpu_program4 ? 0 : -1;
767 p->in_position ="vertex.position";
768 p->in_color ="vertex.color";
769 p->in_texcoord[0] = p->in_texcoord[1] ="vertex.texcoord[0]";
770 p->out_position = "result.position";
771 p->out_color = "result.color";
772 p->out_texcoord[0] = "result.texcoord[0]";
773 p->out_texcoord[1] = "result.texcoord[1]";
775 _cairo_gpu_write_vert(p, vert);
777 builder = &p->main;
778 if(p->dp2a_uses > 0 || p->div_uses > 0)
779 OUT_("!!NVvp4.0\n");
780 else
781 OUT_("!!ARBvp1.0\n");
783 // only way to use clipping
784 OUT("OPTION ARB_position_invariant");
785 return _cairo_gpu_program_builder_finish(p);
788 static inline void
789 _cairo_gl_context_set_vert_prog(cairo_gpu_context_t* ctx, unsigned vert)
791 unsigned vertp = vert & VERT_PROG_MASK;
793 if(!ctx->vertp_enabled)
795 ctx->gl.Enable(GL_VERTEX_PROGRAM_ARB);
796 ctx->vertp_enabled = 1;
799 if(ctx->vertp != vertp)
801 _cairo_gpu_context__set_prog(ctx, GL_VERTEX_PROGRAM_ARB, TABLE_VERT, vertp, _cairo_gpu_context__write_vert);
802 ctx->vertp = vertp;
805 ctx->vert = (ctx->vert & VERT_PROG_MASK) | (vert & ~VERT_PROG_MASK);
808 static inline void
809 _cairo_gl_context_set_vert(cairo_gpu_context_t* ctx, unsigned vert)
811 if(!ctx->space->use_vert_prog)
812 _cairo_gl_context_set_vert_fixed(ctx, vert);
813 else
814 _cairo_gl_context_set_vert_prog(ctx, vert);
817 typedef struct
819 unsigned slot;
820 unsigned rgb_slot_mask;
821 unsigned alpha_slot_mask;
822 } cairo_gpu_combine_setup_t;
824 static inline void
825 _cairo_gpu_combine_operand_at(cairo_gpu_context_t* ctx, cairo_gpu_combine_setup_t* combs, unsigned slot, unsigned src_rgb, unsigned src_alpha, unsigned operand_rgb)
827 int op = slot ? 1 : 0;
828 int unit = slot ? (slot - 1) : 0;
829 _cairo_gl_context_set_active_texture(ctx, unit);
830 if(src_rgb)
832 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_SRC0_RGB + op, src_rgb);
833 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_OPERAND0_RGB + op, operand_rgb);
834 combs->rgb_slot_mask |= 1 << slot;
836 if(src_alpha)
838 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_SRC0_ALPHA + op, src_alpha);
839 combs->alpha_slot_mask |= 1 << slot;
843 static inline int
844 _cairo_gpu_combine_slot(cairo_gpu_combine_setup_t* combs)
846 while((1 << combs->slot) & (combs->rgb_slot_mask | combs->alpha_slot_mask))
847 ++combs->slot;
849 return combs->slot;
852 static inline void
853 _cairo_gpu_combine_operand(cairo_gpu_context_t* ctx, cairo_gpu_combine_setup_t* combs, unsigned src_rgb, unsigned src_alpha, unsigned operand_rgb)
855 _cairo_gpu_combine_operand_at(ctx, combs, _cairo_gpu_combine_slot(combs), src_rgb, src_alpha, operand_rgb);
858 static void
859 _cairo_gpu_context__init_frag_combine(cairo_gpu_context_t* ctx)
861 int i;
863 // avoid for now
864 //if(0 && GLEW_NV_texture_shader)
865 if(0)
867 ctx->nv_texture_shader = 1;
868 ctx->gl.Enable(GL_TEXTURE_SHADER_NV);
871 for(i = 0; i < ctx->space->tex_units; ++i)
873 _cairo_gl_context_set_active_texture(ctx, i);
874 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE);
876 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_SRC0_RGB, GL_PREVIOUS);
877 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_SRC0_ALPHA, GL_PREVIOUS);
879 // this is clobbered in unit 0
880 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR);
882 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, GL_SRC_ALPHA);
883 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_OPERAND1_ALPHA, GL_SRC_ALPHA);
885 ctx->textures[i].nv_texture_shader = GL_NONE;
888 _cairo_gl_context_set_active_texture(ctx, 0);
889 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_SRC2_RGB, GL_CONSTANT);
890 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_OPERAND2_RGB, GL_SRC_ALPHA);
892 ctx->init_frag_modulate = 0;
893 ctx->init_frag_combine = 1;
896 static inline void
897 _cairo_gpu_context_set_constant_color(cairo_gpu_context_t* ctx, cairo_gpu_color4_t* color)
899 if(!ctx->fragp_enabled)
901 if(ctx->constant_unit == -2)
902 ctx->gl.Color4fv(&color->c.r);
903 else if(ctx->constant_unit >= 0)
905 _cairo_gl_context_set_active_texture(ctx, ctx->constant_unit);
906 ctx->gl.TexEnvfv(GL_TEXTURE_ENV, GL_TEXTURE_ENV_COLOR, &color->c.r);
909 else
911 _cairo_gpu_context_set_frag_param(ctx, FRAGENV_CONSTANT, &color->c.r);
915 // XXX: we may fail due to lack of enough texture units, and this will result in silent wrong rendering!!!
916 // pre-NV30: 2
917 // NV30+: 4 (but we use fragment programs)
918 // R300+, Intel: 8
920 static inline void
921 _cairo_gl_context_set_frag_combine(cairo_gpu_context_t* ctx, unsigned frag)
923 unsigned tex0 = (frag >> (FRAG_TEX_SHIFT)) & FRAG_TEX_MASK;
924 unsigned tex1 = (frag >> (FRAG_TEX_SHIFT + FRAG_TEX_BITS)) & FRAG_TEX_MASK;
925 unsigned tex_2d_mask = 0;
926 unsigned tex_rect_mask = 0;
927 unsigned cfrag = frag;
928 unsigned tex2_src0_rgb_constant = 0;
929 cairo_gpu_combine_setup_t combs;
930 combs.slot = 0;
931 combs.rgb_slot_mask = 0;
932 combs.alpha_slot_mask = 0;
934 if(ctx->fragp_enabled)
936 ctx->gl.Disable(GL_FRAGMENT_PROGRAM_ARB);
937 ctx->fragp_enabled = 0;
940 if(cfrag & (3 << FRAG_COMPONENT_SHIFT))
942 unsigned c = (cfrag >> FRAG_COMPONENT_SHIFT) & 3;
943 cfrag |= 3 << FRAG_COMPONENT_SHIFT;
945 if(c != ctx->component)
947 float v[4];
948 v[0] = v[1] = v[2] = v[3] = 0.5;
949 v[c - 1] = 1.0;
951 _cairo_gl_context_set_active_texture(ctx, 0);
952 ctx->gl.TexEnvfv(GL_TEXTURE_ENV, GL_TEXTURE_ENV_COLOR, v);
953 ctx->component = c;
956 else
957 ctx->component = 0;
959 if(ctx->frag == cfrag)
960 return;
962 ctx->constant_unit = -1;
964 if(!ctx->init_frag_combine)
965 _cairo_gpu_context__init_frag_combine(ctx);
966 else if(ctx->init_frag_modulate)
968 int i;
969 for(i = 0; i < ctx->space->tex_units; ++i)
971 _cairo_gl_context_set_active_texture(ctx, i);
972 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE);
974 ctx->init_frag_modulate = 0;
977 if(tex0)
979 assert(tex0 & FRAG_TEX_COLOR_MASK);
980 if((tex0 & FRAG_TEX_COLOR_MASK) == FRAG_TEX_COLOR_RGBA)
981 _cairo_gpu_combine_operand_at(ctx, &combs, 0, GL_TEXTURE, GL_TEXTURE, GL_SRC_COLOR);
982 else if((tex0 & FRAG_TEX_COLOR_MASK) == FRAG_TEX_COLOR_AAAA)
983 _cairo_gpu_combine_operand_at(ctx, &combs, 0, GL_TEXTURE, GL_TEXTURE, GL_SRC_ALPHA);
984 else if((tex0 & FRAG_TEX_COLOR_MASK) == FRAG_TEX_COLOR_111C)
986 _cairo_gpu_combine_operand_at(ctx, &combs, 1, GL_CONSTANT, 0, GL_SRC_COLOR);
987 _cairo_gpu_combine_operand_at(ctx, &combs, 2, GL_CONSTANT, 0, GL_SRC_COLOR);
989 if((frag & (FRAG_OP_MASK | FRAG_OPPOS_TEX1)) == (OP_MUL_ALPHA << FRAG_OP_SHIFT))
992 else
993 assert(0);
995 if(!((tex0 & FRAG_TEX_COLOR_MASK) == FRAG_TEX_COLOR_111C)
996 && ((frag & (FRAG_OP_MASK | FRAG_OPPOS_TEX1)) == (OP_MUL_ALPHA << FRAG_OP_SHIFT)))
997 _cairo_gpu_combine_operand_at(ctx, &combs, 1, GL_TEXTURE, 0, GL_SRC_ALPHA);
999 if(tex0 & FRAG_TEX_RECTANGLE)
1000 tex_rect_mask |= 1;
1001 else
1002 tex_2d_mask |= 1;
1005 if(tex1)
1007 int slot = _cairo_gpu_combine_slot(&combs);
1008 assert(tex0 & FRAG_TEX_COLOR_MASK);
1009 assert(tex1 & FRAG_TEX_COLOR_MASK);
1011 // avoid having tex0 * tex1 in the first stage and nothing in the second stage
1012 if(!ctx->space->crossbar || !(frag & (FRAG_OPPOS_TEX1 | FRAG_PRIMARY | FRAG_CONSTANT)))
1014 if(slot < 2)
1016 slot = 2;
1018 if(frag & FRAG_OPPOS_TEX1)
1020 combs.rgb_slot_mask |= slot - 1;
1021 combs.alpha_slot_mask |= slot - 1;
1026 if((tex1 & FRAG_TEX_COLOR_MASK) == FRAG_TEX_COLOR_RGBA)
1027 _cairo_gpu_combine_operand_at(ctx, &combs, slot, slot == 2 ? GL_TEXTURE : GL_TEXTURE1_ARB, slot == 2 ? GL_TEXTURE : GL_TEXTURE1_ARB, GL_SRC_COLOR);
1028 else if((tex1 & FRAG_TEX_COLOR_MASK) == FRAG_TEX_COLOR_AAAA)
1029 _cairo_gpu_combine_operand_at(ctx, &combs, slot, slot == 2 ? GL_TEXTURE : GL_TEXTURE1_ARB, slot == 2 ? GL_TEXTURE : GL_TEXTURE1_ARB, GL_SRC_ALPHA);
1030 else
1031 assert(0);
1033 if(tex1 & FRAG_TEX_RECTANGLE)
1034 tex_rect_mask |= 2;
1035 else
1036 tex_2d_mask |= 2;
1039 if((frag & (FRAG_OP_MASK | FRAG_OPPOS_TEX1)) == ((OP_MUL_ALPHA << FRAG_OP_SHIFT) | FRAG_OPPOS_TEX1))
1041 assert(tex1 & FRAG_TEX_COLOR_MASK);
1042 _cairo_gpu_combine_operand(ctx, &combs, GL_PREVIOUS, 0, GL_SRC_ALPHA);
1045 if((frag & FRAG_CONSTANT) || !frag)
1047 _cairo_gpu_combine_operand(ctx, &combs, GL_CONSTANT, GL_CONSTANT, GL_SRC_COLOR);
1048 ctx->constant_unit = combs.slot ? (combs.slot - 1) : 0;
1049 if(!frag)
1050 _cairo_gpu_context_set_constant_color(ctx, &_cairo_gpu_white);
1053 if(frag & FRAG_PRIMARY)
1054 _cairo_gpu_combine_operand(ctx, &combs, GL_PRIMARY_COLOR, GL_PRIMARY_COLOR, GL_SRC_COLOR);
1057 int i;
1058 for(i = 0; i < ctx->space->tex_units; ++i)
1060 unsigned combine_rgb = !(combs.rgb_slot_mask & (1 << (i + 1))) ? GL_REPLACE : GL_MODULATE;
1061 unsigned combine_alpha = !(combs.alpha_slot_mask & (1 << (i + 1))) ? GL_REPLACE : GL_MODULATE;
1063 if((tex0 & FRAG_TEX_COLOR_MASK) == FRAG_TEX_COLOR_111C)
1065 if(i == 0)
1066 combine_rgb = GL_INTERPOLATE;
1067 else if(i == 1)
1068 combine_rgb = GL_DOT3_RGBA;
1069 else if(i == 2)
1071 if((frag & (FRAG_OP_MASK | FRAG_OPPOS_TEX1)) == (OP_MUL_ALPHA << FRAG_OP_SHIFT))
1073 else
1075 tex2_src0_rgb_constant = 1;
1077 if(combine_rgb == GL_MODULATE && i == ctx->constant_unit)
1078 combine_rgb = GL_REPLACE;
1079 else
1081 float v[4] = {1, 1, 1, 1};
1082 _cairo_gl_context_set_active_texture(ctx, i);
1083 ctx->gl.TexEnvfv(GL_TEXTURE_ENV, GL_TEXTURE_ENV_COLOR, v);
1089 if(i && combine_rgb == GL_REPLACE && combine_alpha == GL_REPLACE)
1091 _cairo_gl_context_set_texture_target(ctx, i, 0);
1092 _cairo_gpu_context__set_nv_texture_shader(ctx, i, GL_NONE);
1094 else
1096 if((1 << i) & tex_2d_mask)
1098 _cairo_gl_context_set_texture_target(ctx, i, GL_TEXTURE_2D);
1099 _cairo_gpu_context__set_nv_texture_shader(ctx, i, GL_TEXTURE_2D);
1101 else if((1 << i) & tex_rect_mask)
1103 _cairo_gl_context_set_texture_target(ctx, i, GL_TEXTURE_RECTANGLE_ARB);
1104 _cairo_gpu_context__set_nv_texture_shader(ctx, i, GL_TEXTURE_RECTANGLE_ARB);
1106 else
1108 _cairo_gl_context_set_texture(ctx, i, &ctx->space->dummy_texture);
1109 _cairo_gl_context_set_texture_target(ctx, i, GL_TEXTURE_2D);
1110 _cairo_gpu_context__set_nv_texture_shader(ctx, i, GL_PASS_THROUGH_NV);
1113 _cairo_gl_context_set_active_texture(ctx, i);
1114 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_COMBINE_RGB, combine_rgb);
1115 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_COMBINE_ALPHA, combine_alpha);
1119 if(tex2_src0_rgb_constant != ctx->tex2_src0_rgb_constant)
1121 _cairo_gl_context_set_active_texture(ctx, 2);
1122 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_SRC0_RGB, tex2_src0_rgb_constant ? GL_CONSTANT : GL_PREVIOUS);
1123 ctx->tex2_src0_rgb_constant = tex2_src0_rgb_constant;
1126 ctx->frag = cfrag;
1129 static inline void
1130 _cairo_gl_context_set_frag_primary(cairo_gpu_context_t* ctx)
1132 int i;
1134 if(ctx->fragp_enabled)
1136 ctx->gl.Disable(GL_FRAGMENT_PROGRAM_ARB);
1137 ctx->fragp_enabled = 0;
1140 if(ctx->frag == FRAG_PRIMARY)
1141 return;
1143 for(i = 0; i < ctx->space->tex_units; ++i)
1145 _cairo_gl_context_set_texture_target(ctx, i, 0);
1146 _cairo_gpu_context__set_nv_texture_shader(ctx, i, GL_NONE);
1149 ctx->frag = FRAG_PRIMARY;
1153 // TODO: when using this, we need to copy non-component-alpha but RGBA masks to an ALPHA texture
1154 // TODO: when using this, the span renderer AND onepass trapezoid geometry must embed the constant value in the primary color(s)
1156 // Vanilla path without ARB_texture_env_combine. Proof of concept. Currently unfinished.
1157 static inline void
1158 _cairo_gl_context_set_frag_modulate(cairo_gpu_context_t* ctx, unsigned frag)
1160 unsigned textures = 0;
1161 unsigned tex_2d_mask = 0;
1162 unsigned tex_rect_mask = 0;
1164 ctx->constant_unit = -1;
1166 if(!ctx->init_frag_modulate)
1168 int i;
1169 for(i = 1; i < ctx->space->tex_units; ++i)
1171 _cairo_gl_context_set_active_texture(ctx, i);
1172 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
1174 ctx->init_frag_modulate = 1;
1177 assert(!(frag & FRAG_OP_MASK));
1179 if(frag & FRAG_CONSTANT)
1181 assert(!(frag & FRAG_PRIMARY)); // must be baked in if so
1182 ctx->constant_unit = -2; // put in primary color
1185 if(frag & (FRAG_TEX_MASK << FRAG_TEX_SHIFT))
1187 if(frag & (FRAG_TEX_RECTANGLE << FRAG_TEX_SHIFT))
1188 tex_rect_mask |= 1;
1189 else
1190 tex_2d_mask |= 1;
1192 _cairo_gl_context_set_active_texture(ctx, 0);
1193 ctx->gl.TexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, (frag & (FRAG_PRIMARY | FRAG_CONSTANT)) ? GL_MODULATE : GL_REPLACE);
1194 ++textures;
1197 if(frag & (FRAG_TEX_MASK << (FRAG_TEX_SHIFT + FRAG_TEX_BITS)))
1199 if(frag & (FRAG_TEX_RECTANGLE << (FRAG_TEX_SHIFT + FRAG_TEX_BITS)))
1200 tex_rect_mask |= 2;
1201 else
1202 tex_2d_mask |= 2;
1203 ++textures;
1207 int i;
1208 for(i = 0; i < ctx->space->tex_units; ++i)
1210 if((1 << i) & tex_2d_mask)
1212 _cairo_gl_context_set_texture_target(ctx, i, GL_TEXTURE_2D);
1213 _cairo_gpu_context__set_nv_texture_shader(ctx, i, GL_TEXTURE_2D);
1215 else if((1 << i) & tex_rect_mask)
1217 _cairo_gl_context_set_texture_target(ctx, i, GL_TEXTURE_RECTANGLE_ARB);
1218 _cairo_gpu_context__set_nv_texture_shader(ctx, i, GL_TEXTURE_RECTANGLE_ARB);
1220 else
1222 _cairo_gl_context_set_texture_target(ctx, i, 0);
1223 _cairo_gpu_context__set_nv_texture_shader(ctx, i, GL_NONE);
1228 ctx->frag = frag;
1231 static inline char*
1232 _cairo_gpu_context__write_frag(cairo_gpu_space_t* space, unsigned frag)
1234 cairo_gpu_program_builder_t pb;
1235 cairo_gpu_program_builder_t* p = &pb;
1236 cairo_gpu_string_builder_t* builder = &p->body;
1238 memset(&pb, 0, sizeof(pb));
1239 p->div_uses = p->dp2a_uses = space->has_fragment_program2 ? 0 : -1;
1241 p->in_position ="fragment.position";
1242 p->in_color ="fragment.color";
1243 p->in_texcoord[0] = "fragment.texcoord[0]";
1244 p->in_texcoord[1] = "fragment.texcoord[1]";
1245 p->out_color = "result.color";
1247 _cairo_gpu_write_frag(p, frag);
1249 builder = &p->main;
1250 OUT_("!!ARBfp1.0\n");
1251 if(p->div_uses > 0 || p->dp2a_uses > 0)
1252 OUT("OPTION NV_fragment_program2");
1253 OUT("OPTION ARB_precision_hint_nicest");
1254 return _cairo_gpu_program_builder_finish(p);
1257 static inline void
1258 _cairo_gl_context_set_frag_prog(cairo_gpu_context_t* ctx, unsigned frag)
1260 if(!ctx->fragp_enabled)
1262 ctx->gl.Enable(GL_FRAGMENT_PROGRAM_ARB);
1263 ctx->fragp_enabled = 1;
1266 if(ctx->fragp != frag)
1268 _cairo_gpu_context__set_prog(ctx, GL_FRAGMENT_PROGRAM_ARB, TABLE_FRAG, frag, _cairo_gpu_context__write_frag);
1269 ctx->fragp = frag;
1273 static void
1274 _cairo_gl_context_set_frag(cairo_gpu_context_t* ctx, unsigned frag)
1276 if(!ctx->space->use_frag_prog)
1278 if(frag == FRAG_PRIMARY)
1279 _cairo_gl_context_set_frag_primary(ctx);
1280 else if(ctx->space->has_combine)
1281 _cairo_gl_context_set_frag_combine(ctx, frag);
1282 else
1283 _cairo_gl_context_set_frag_modulate(ctx, frag);
1285 else
1286 _cairo_gl_context_set_frag_prog(ctx, frag);
1289 static void
1290 _cairo_gpu_context_set_vert_frag(cairo_gpu_context_t* ctx, unsigned vert, unsigned frag)
1292 if(frag & FRAG_CONSTANT && !ctx->space->use_frag_prog && !ctx->space->has_combine)
1293 vert |= VERT_COLOR_POSTOP;
1295 _cairo_gl_context_set_vert(ctx, vert);
1296 _cairo_gl_context_set_frag(ctx, frag);
1299 static inline void
1300 _cairo_gpu_context_set_translation(cairo_gpu_context_t* ctx, int dx, int dy)
1302 dx -= ctx->x_offset;
1303 dy -= ctx->y_offset;
1304 if(dx || dy)
1306 ctx->gl.MatrixMode(GL_MODELVIEW);
1307 ctx->gl.Translatef(dx, dy, 0);
1308 ctx->x_offset += dx;
1309 ctx->y_offset += dy;
1313 static void
1314 _cairo_gpu_context_set_viewport(cairo_gpu_context_t* ctx, int x, int y, int width, int height)
1316 int proj_sign = ctx->draw_flip_height < 0;
1317 if(proj_sign != ctx->proj_sign || ctx->viewport_width != width || ctx->viewport_height != height || ctx->viewport_x != x || ctx->viewport_y != y)
1319 ctx->gl.Viewport(x, (ctx->draw_flip_height >= 0) ? (ctx->draw_flip_height - y - height) : y, width, height);
1321 ctx->gl.MatrixMode(GL_PROJECTION);
1322 ctx->gl.LoadIdentity();
1323 if(proj_sign)
1324 ctx->gl.Ortho(0, width, 0, height, -1.0, 1.0);
1325 else
1326 ctx->gl.Ortho(0, width, height, 0, -1.0, 1.0);
1328 // we apparently need an epsilon to make a1-image-sample and a1-traps-sample pass
1329 // TODO: find out why
1330 // ctx->gl.Translated(1.0 / (1 << 13) - 1.0 / (1 << 21) + 1.0 / (1 << 26), 1.0 / (1 << 13) - 1.0 / (1 << 21) + 1.0 / (1 << 26), 0.0);
1331 ctx->gl.Translated(1.0 / (1 << 13) - x, 1.0 / (1 << 13) - y, 0.0);
1333 ctx->proj_sign = proj_sign;
1334 ctx->viewport_x = x;
1335 ctx->viewport_y = y;
1336 ctx->viewport_width = width;
1337 ctx->viewport_height = height;
1341 static inline GLenum
1342 _cairo_gl_blend_factor(int v)
1344 if(v < BLEND_SRC_COLOR)
1345 return (GLenum)v;
1346 else if(v < BLEND_CONSTANT_COLOR)
1347 return GL_SRC_COLOR - BLEND_SRC_COLOR + v;
1348 else
1349 return GL_CONSTANT_COLOR - BLEND_CONSTANT_COLOR + v;
1352 static inline void
1353 _cairo_gpu_context__set_color_mask(cairo_gpu_context_t* ctx, unsigned color_mask)
1355 if(color_mask != ctx->color_mask)
1357 ctx->gl.ColorMask((color_mask) & 1, (color_mask >> 1) & 1, (color_mask >> 2) & 1, (color_mask >> 3) & 1);
1358 ctx->color_mask = color_mask;
1362 static inline void
1363 _cairo_gpu_context_set_blend(cairo_gpu_context_t* ctx, unsigned blendv)
1365 cairo_gpu_blend_t blend;
1366 blend.v = blendv;
1368 _cairo_gpu_context__set_color_mask(ctx, blend.color_mask);
1370 // XXX: fix endianness??
1371 if(blend.func == BLEND_FUNC_SOURCE && !blend.eq)
1373 if(ctx->blend)
1375 ctx->gl.Disable(GL_BLEND);
1376 ctx->blend = 0;
1379 else
1381 if(!ctx->blend)
1383 ctx->gl.Enable(GL_BLEND);
1384 ctx->blend = 1;
1387 if(blend.func != ctx->blend_func)
1389 if(blend.src_rgb != blend.src_alpha || blend.dst_rgb != blend.dst_alpha)
1390 ctx->gl.BlendFuncSeparateEXT(_cairo_gl_blend_factor(blend.src_rgb), _cairo_gl_blend_factor(blend.dst_rgb),
1391 _cairo_gl_blend_factor(blend.src_alpha), _cairo_gl_blend_factor(blend.dst_alpha));
1392 else
1393 ctx->gl.BlendFunc(_cairo_gl_blend_factor(blend.src_rgb), _cairo_gl_blend_factor(blend.dst_rgb));
1394 ctx->blend_func = blend.func;
1398 if(blend.eq != ctx->blend_eq)
1400 ctx->gl.BlendEquation(blend.eq ? GL_FUNC_REVERSE_SUBTRACT : GL_FUNC_ADD);
1401 ctx->blend_eq = blend.eq;
1405 static inline void _cairo_gpu_context_set_blend_color(cairo_gpu_context_t* ctx, cairo_gpu_color4_t* blend_color)
1407 ctx->gl.BlendColorEXT(blend_color->c.r, blend_color->c.g, blend_color->c.b, blend_color->ka);
1410 static inline void _cairo_gpu_context_set_raster(cairo_gpu_context_t* ctx, unsigned smooth)
1412 int enable = !!smooth;
1413 if(ctx->smooth != enable)
1415 if(enable)
1416 ctx->gl.Enable(GL_POLYGON_SMOOTH);
1417 else
1418 ctx->gl.Disable(GL_POLYGON_SMOOTH);
1419 ctx->smooth = enable;
1422 if(smooth && smooth != ctx->smooth_hint)
1424 ctx->gl.Hint(GL_POLYGON_SMOOTH_HINT, (smooth > 1) ? GL_NICEST : GL_FASTEST);
1425 ctx->smooth_hint = smooth;
1429 static inline void
1430 _cairo_gpu_context__gl_clear(cairo_gpu_context_t* ctx, cairo_gpu_surface_t* surface)
1432 unsigned color_mask = 0xf;
1434 if(surface)
1436 if(surface->base.content == CAIRO_CONTENT_COLOR)
1437 color_mask = 7;
1438 else if(surface->base.content == CAIRO_CONTENT_ALPHA)
1439 color_mask = 8;
1442 _cairo_gpu_context__set_color_mask(ctx, color_mask);
1443 ctx->gl.Clear(GL_COLOR_BUFFER_BIT);
1446 // TODO: some (most?) GL implementations may turn this into a quad render.
1447 // If so, we may want to avoid this and do the quad render ourselves.
1448 static inline void
1449 _cairo_gpu_context_fill_rect(cairo_gpu_context_t* ctx, cairo_gpu_surface_t* surface, int x, int y, int width, int height, float r, float g, float b, float a)
1451 _cairo_gpu_flip_draw_y(ctx, &y, height);
1453 //printf("fill rect %i %i %i %i\n", x, y, width, height);
1455 ctx->gl.ClearColor(r, g, b, a);
1456 ctx->gl.Scissor(x, y, width, height);
1457 ctx->gl.Enable(GL_SCISSOR_TEST);
1458 _cairo_gpu_context__gl_clear(ctx, surface);
1459 ctx->gl.Disable(GL_SCISSOR_TEST);
1462 static inline void
1463 _cairo_gpu_context_fill(cairo_gpu_context_t* ctx, cairo_gpu_surface_t* surface, float r, float g, float b, float a)
1465 ctx->gl.ClearColor(r, g, b, a);
1466 _cairo_gpu_context__gl_clear(ctx, surface);
1469 static inline void
1470 _cairo_gpu_context_set_geometry(cairo_gpu_context_t * ctx, cairo_gpu_geometry_t * geometry)
1472 _cairo_gpu_geometry_bind(ctx, geometry);
1475 // must be bound
1476 static inline void
1477 _cairo_gpu_context_draw(cairo_gpu_context_t * ctx)
1479 ctx->gl.DrawArraysEXT(ctx->mode, 0, ctx->count);
1482 static inline void
1483 _cairo_gpu_context_draw_rect(cairo_gpu_context_t * ctx, int x, int y, int width, int height)
1485 #if DEBUG_DISABLE_GL_RECTI
1487 cairo_gpu_geometry_t* geometry = &ctx->tls->geometries[GEOM_TEMP];
1488 float* v = _cairo_gpu_geometry_begin(ctx, geometry, PRIM_QUADS, 4, 2, 0, 0);
1489 _cairo_gpu_emit_rect(&v, x, y, width, height);
1490 _cairo_gpu_geometry__do_bind(ctx, geometry);
1491 _cairo_gpu_geometry_end(ctx, geometry, 4);
1493 _cairo_gpu_context_set_geometry(ctx, geometry);
1494 _cairo_gpu_context_draw(ctx);
1496 #else
1497 ctx->gl.Recti(x, y, x + width, y + height);
1498 #endif
1501 static void
1502 _cairo_gpu_context_blit_pixels(cairo_gpu_context_t* ctx, cairo_gpu_surface_t* dst, cairo_image_surface_t* image, int dst_x, int dst_y, int src_x, int src_y, int width, int height)
1504 _cairo_gpu_context_set_viewport(ctx, 0, 0, dst->width, dst->height);
1505 _cairo_gpu_context_set_blend(ctx, BLEND_SOURCE);
1506 _cairo_gpu_context_set_raster(ctx, 0);
1507 // vertex contextline is bypassed by upload_pixels
1509 /* nVidia crashes with SIGFPE when fragment programs are enabled with a pbuffer target.
1510 * Workaround by not using fragment programs here.
1513 _cairo_gl_context_set_frag_primary(ctx);
1515 if(!ctx->space->has_window_pos)
1517 _cairo_gl_context_set_vert(ctx, 0);
1518 _cairo_gpu_context_set_translation(ctx, 0, 0);
1521 _cairo_gpu_context__upload_pixels(ctx, dst, -1, image, src_x, src_y, width, height, dst_x, dst_y);
1526 static inline void
1527 _cairo_gpu_context_blit_zoom(cairo_gpu_context_t* ctx, cairo_gpu_surface_t* dst, int dx, int dy, int sx, int sy, int sw, int sh, int zx, int zy)
1529 if(!sw || !sh)
1530 return;
1532 if(ctx->space->fb_blit)
1534 int dw = sw * zx;
1535 int dh = sh * zy;
1537 dh = _cairo_gpu_flip_draw_y(ctx, &dy, dh);
1538 sh = _cairo_gpu_flip_read_y(ctx, &sy, sh);
1540 ctx->gl.BlitFramebufferEXT(sx, sy, sx + sw, sy + sh, dx, dy, dx + dw, dy + dh, GL_COLOR_BUFFER_BIT, GL_NEAREST);
1542 else
1544 _cairo_gpu_context_set_blend(ctx, BLEND_SOURCE);
1545 _cairo_gpu_context_set_raster(ctx, 0);
1547 _cairo_gl_context_set_frag_primary(ctx);
1549 // vertex contextline is bypassed by upload_pixels
1550 if(ctx->space->has_window_pos)
1552 if(_cairo_gpu_flip_read_y(ctx, &sy, sh) < 0)
1553 sy += sh;
1555 else
1557 _cairo_gpu_context_set_viewport(ctx, 0, 0, dst->width, dst->height);
1558 _cairo_gl_context_set_vert(ctx, 0);
1559 _cairo_gpu_context_set_translation(ctx, 0, 0);
1562 if(zy < 0)
1563 dy -= sh * zy;
1565 // TODO: this may well be unaccelerated, and we should have a texture-mapping fallback.
1566 // Hopefully drivers will fall back to texture mapping themselves like Mesa/Gallium does
1567 ctx->gl.PixelZoom(zx, zy);
1568 if(ctx->space->has_window_pos)
1569 ctx->gl.WindowPos2i(dx, dy);
1570 else
1571 ctx->gl.RasterPos2i(dx, dy);
1573 // XXX: this SIGFPEs on nVidia for unknown reasons
1574 ctx->gl.CopyPixels(sx, sy, sw, sh, GL_COLOR);
1578 static inline void
1579 _cairo_gpu_context_blit_same(cairo_gpu_context_t* ctx, cairo_gpu_surface_t* dst, int x, int y, int w, int h)
1581 _cairo_gpu_context_blit_zoom(ctx, dst, x, y, x, y, w, h, 1, 1);
1584 // only use for padding as it doesn't update dirty parts
1585 static inline void
1586 _cairo_gpu_context_read_to_texture(cairo_gpu_context_t* ctx, cairo_gpu_surface_t* dst, int dx, int dy, int sx, int sy, int w, int h)
1588 // this should be OK with GL_INTENSITY
1589 _cairo_gl_context_set_active_texture(ctx, _cairo_gl_context_set_texture(ctx, -1, &dst->texture));
1590 ctx->gl.CopyTexSubImage2D(_cairo_gl_target(dst->texture.target_idx), 0, dx, dy, sx, sy, w, h);
1593 #if 0
1594 static inline void
1595 _cairo_gl_context_flush_surface(cairo_gpu_context_t* ctx, cairo_gpu_surface_t* surface)
1597 int mask = ((ctx->draw_fb == surface->fb) ? FB_DRAW : 0) | ((ctx->read_fb == surface->fb) ? FB_READ : 0);
1598 if(mask)
1599 _cairo_gl_context_set_framebuffer(ctx, mask, 0, 0, 0, surface->buffer_non_upside_down ? (int)surface->height : -1);
1601 #endif