1 //----------------------------------------------------------------------------
2 // Anti-Grain Geometry - Version 2.4
3 // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
5 // Permission to copy, use, modify, sell and distribute this software
6 // is granted provided this copyright notice appears in all copies.
7 // This software is provided "as is" without express or implied
8 // warranty, and with no claim as to its suitability for any purpose.
10 //----------------------------------------------------------------------------
11 // Contact: mcseem@antigrain.com
12 // mcseemagg@yahoo.com
13 // http://www.antigrain.com
14 //----------------------------------------------------------------------------
18 //----------------------------------------------------------------------------
19 #ifndef AGG_ALPHA_MASK_U8_INCLUDED
20 #define AGG_ALPHA_MASK_U8_INCLUDED
23 #include "agg_basics.h"
24 #include "agg_rendering_buffer.h"
28 //===================================================one_component_mask_u8
29 struct one_component_mask_u8
31 static unsigned calculate(const int8u
* p
) { return *p
; }
35 //=====================================================rgb_to_gray_mask_u8
36 template<unsigned R
, unsigned G
, unsigned B
>
37 struct rgb_to_gray_mask_u8
39 static unsigned calculate(const int8u
* p
)
41 return (p
[R
]*77 + p
[G
]*150 + p
[B
]*29) >> 8;
45 //==========================================================alpha_mask_u8
46 template<unsigned Step
=1, unsigned Offset
=0, class MaskF
=one_component_mask_u8
>
50 typedef int8u cover_type
;
51 typedef alpha_mask_u8
<Step
, Offset
, MaskF
> self_type
;
59 alpha_mask_u8() : m_rbuf(0) {}
60 alpha_mask_u8(rendering_buffer
& rbuf
) : m_rbuf(&rbuf
) {}
62 void attach(rendering_buffer
& rbuf
) { m_rbuf
= &rbuf
; }
64 MaskF
& mask_function() { return m_mask_function
; }
65 const MaskF
& mask_function() const { return m_mask_function
; }
68 //--------------------------------------------------------------------
69 cover_type
pixel(int x
, int y
) const
71 if(x
>= 0 && y
>= 0 &&
72 x
< (int)m_rbuf
->width() &&
73 y
< (int)m_rbuf
->height())
75 return (cover_type
)m_mask_function
.calculate(
76 m_rbuf
->row_ptr(y
) + x
* Step
+ Offset
);
81 //--------------------------------------------------------------------
82 cover_type
combine_pixel(int x
, int y
, cover_type val
) const
84 if(x
>= 0 && y
>= 0 &&
85 x
< (int)m_rbuf
->width() &&
86 y
< (int)m_rbuf
->height())
88 return (cover_type
)((cover_full
+ val
*
89 m_mask_function
.calculate(
90 m_rbuf
->row_ptr(y
) + x
* Step
+ Offset
)) >>
97 //--------------------------------------------------------------------
98 void fill_hspan(int x
, int y
, cover_type
* dst
, int num_pix
) const
100 int xmax
= m_rbuf
->width() - 1;
101 int ymax
= m_rbuf
->height() - 1;
104 cover_type
* covers
= dst
;
106 if(y
< 0 || y
> ymax
)
108 memset(dst
, 0, num_pix
* sizeof(cover_type
));
117 memset(dst
, 0, num_pix
* sizeof(cover_type
));
120 memset(covers
, 0, -x
* sizeof(cover_type
));
127 int rest
= x
+ count
- xmax
- 1;
131 memset(dst
, 0, num_pix
* sizeof(cover_type
));
134 memset(covers
+ count
, 0, rest
* sizeof(cover_type
));
137 const int8u
* mask
= m_rbuf
->row_ptr(y
) + x
* Step
+ Offset
;
140 *covers
++ = (cover_type
)m_mask_function
.calculate(mask
);
147 //--------------------------------------------------------------------
148 void combine_hspan(int x
, int y
, cover_type
* dst
, int num_pix
) const
150 int xmax
= m_rbuf
->width() - 1;
151 int ymax
= m_rbuf
->height() - 1;
154 cover_type
* covers
= dst
;
156 if(y
< 0 || y
> ymax
)
158 memset(dst
, 0, num_pix
* sizeof(cover_type
));
167 memset(dst
, 0, num_pix
* sizeof(cover_type
));
170 memset(covers
, 0, -x
* sizeof(cover_type
));
177 int rest
= x
+ count
- xmax
- 1;
181 memset(dst
, 0, num_pix
* sizeof(cover_type
));
184 memset(covers
+ count
, 0, rest
* sizeof(cover_type
));
187 const int8u
* mask
= m_rbuf
->row_ptr(y
) + x
* Step
+ Offset
;
190 *covers
= (cover_type
)((cover_full
+ (*covers
) *
191 m_mask_function
.calculate(mask
)) >>
199 //--------------------------------------------------------------------
200 void fill_vspan(int x
, int y
, cover_type
* dst
, int num_pix
) const
202 int xmax
= m_rbuf
->width() - 1;
203 int ymax
= m_rbuf
->height() - 1;
206 cover_type
* covers
= dst
;
208 if(x
< 0 || x
> xmax
)
210 memset(dst
, 0, num_pix
* sizeof(cover_type
));
219 memset(dst
, 0, num_pix
* sizeof(cover_type
));
222 memset(covers
, 0, -y
* sizeof(cover_type
));
229 int rest
= y
+ count
- ymax
- 1;
233 memset(dst
, 0, num_pix
* sizeof(cover_type
));
236 memset(covers
+ count
, 0, rest
* sizeof(cover_type
));
239 const int8u
* mask
= m_rbuf
->row_ptr(y
) + x
* Step
+ Offset
;
242 *covers
++ = (cover_type
)m_mask_function
.calculate(mask
);
243 mask
+= m_rbuf
->stride();
248 //--------------------------------------------------------------------
249 void combine_vspan(int x
, int y
, cover_type
* dst
, int num_pix
) const
251 int xmax
= m_rbuf
->width() - 1;
252 int ymax
= m_rbuf
->height() - 1;
255 cover_type
* covers
= dst
;
257 if(x
< 0 || x
> xmax
)
259 memset(dst
, 0, num_pix
* sizeof(cover_type
));
268 memset(dst
, 0, num_pix
* sizeof(cover_type
));
271 memset(covers
, 0, -y
* sizeof(cover_type
));
278 int rest
= y
+ count
- ymax
- 1;
282 memset(dst
, 0, num_pix
* sizeof(cover_type
));
285 memset(covers
+ count
, 0, rest
* sizeof(cover_type
));
288 const int8u
* mask
= m_rbuf
->row_ptr(y
) + x
* Step
+ Offset
;
291 *covers
= (cover_type
)((cover_full
+ (*covers
) *
292 m_mask_function
.calculate(mask
)) >>
295 mask
+= m_rbuf
->stride();
302 alpha_mask_u8(const self_type
&);
303 const self_type
& operator = (const self_type
&);
305 rendering_buffer
* m_rbuf
;
306 MaskF m_mask_function
;
310 typedef alpha_mask_u8
<1, 0> alpha_mask_gray8
; //----alpha_mask_gray8
312 typedef alpha_mask_u8
<3, 0> alpha_mask_rgb24r
; //----alpha_mask_rgb24r
313 typedef alpha_mask_u8
<3, 1> alpha_mask_rgb24g
; //----alpha_mask_rgb24g
314 typedef alpha_mask_u8
<3, 2> alpha_mask_rgb24b
; //----alpha_mask_rgb24b
316 typedef alpha_mask_u8
<3, 2> alpha_mask_bgr24r
; //----alpha_mask_bgr24r
317 typedef alpha_mask_u8
<3, 1> alpha_mask_bgr24g
; //----alpha_mask_bgr24g
318 typedef alpha_mask_u8
<3, 0> alpha_mask_bgr24b
; //----alpha_mask_bgr24b
320 typedef alpha_mask_u8
<4, 0> alpha_mask_rgba32r
; //----alpha_mask_rgba32r
321 typedef alpha_mask_u8
<4, 1> alpha_mask_rgba32g
; //----alpha_mask_rgba32g
322 typedef alpha_mask_u8
<4, 2> alpha_mask_rgba32b
; //----alpha_mask_rgba32b
323 typedef alpha_mask_u8
<4, 3> alpha_mask_rgba32a
; //----alpha_mask_rgba32a
325 typedef alpha_mask_u8
<4, 1> alpha_mask_argb32r
; //----alpha_mask_argb32r
326 typedef alpha_mask_u8
<4, 2> alpha_mask_argb32g
; //----alpha_mask_argb32g
327 typedef alpha_mask_u8
<4, 3> alpha_mask_argb32b
; //----alpha_mask_argb32b
328 typedef alpha_mask_u8
<4, 0> alpha_mask_argb32a
; //----alpha_mask_argb32a
330 typedef alpha_mask_u8
<4, 2> alpha_mask_bgra32r
; //----alpha_mask_bgra32r
331 typedef alpha_mask_u8
<4, 1> alpha_mask_bgra32g
; //----alpha_mask_bgra32g
332 typedef alpha_mask_u8
<4, 0> alpha_mask_bgra32b
; //----alpha_mask_bgra32b
333 typedef alpha_mask_u8
<4, 3> alpha_mask_bgra32a
; //----alpha_mask_bgra32a
335 typedef alpha_mask_u8
<4, 3> alpha_mask_abgr32r
; //----alpha_mask_abgr32r
336 typedef alpha_mask_u8
<4, 2> alpha_mask_abgr32g
; //----alpha_mask_abgr32g
337 typedef alpha_mask_u8
<4, 1> alpha_mask_abgr32b
; //----alpha_mask_abgr32b
338 typedef alpha_mask_u8
<4, 0> alpha_mask_abgr32a
; //----alpha_mask_abgr32a
340 typedef alpha_mask_u8
<3, 0, rgb_to_gray_mask_u8
<0, 1, 2> > alpha_mask_rgb24gray
; //----alpha_mask_rgb24gray
341 typedef alpha_mask_u8
<3, 0, rgb_to_gray_mask_u8
<2, 1, 0> > alpha_mask_bgr24gray
; //----alpha_mask_bgr24gray
342 typedef alpha_mask_u8
<4, 0, rgb_to_gray_mask_u8
<0, 1, 2> > alpha_mask_rgba32gray
; //----alpha_mask_rgba32gray
343 typedef alpha_mask_u8
<4, 1, rgb_to_gray_mask_u8
<0, 1, 2> > alpha_mask_argb32gray
; //----alpha_mask_argb32gray
344 typedef alpha_mask_u8
<4, 0, rgb_to_gray_mask_u8
<2, 1, 0> > alpha_mask_bgra32gray
; //----alpha_mask_bgra32gray
345 typedef alpha_mask_u8
<4, 1, rgb_to_gray_mask_u8
<2, 1, 0> > alpha_mask_abgr32gray
; //----alpha_mask_abgr32gray
349 //==========================================================amask_no_clip_u8
350 template<unsigned Step
=1, unsigned Offset
=0, class MaskF
=one_component_mask_u8
>
351 class amask_no_clip_u8
354 typedef int8u cover_type
;
355 typedef amask_no_clip_u8
<Step
, Offset
, MaskF
> self_type
;
363 amask_no_clip_u8() : m_rbuf(0) {}
364 amask_no_clip_u8(rendering_buffer
& rbuf
) : m_rbuf(&rbuf
) {}
366 void attach(rendering_buffer
& rbuf
) { m_rbuf
= &rbuf
; }
368 MaskF
& mask_function() { return m_mask_function
; }
369 const MaskF
& mask_function() const { return m_mask_function
; }
372 //--------------------------------------------------------------------
373 cover_type
pixel(int x
, int y
) const
375 return (cover_type
)m_mask_function
.calculate(
376 m_rbuf
->row_ptr(y
) + x
* Step
+ Offset
);
380 //--------------------------------------------------------------------
381 cover_type
combine_pixel(int x
, int y
, cover_type val
) const
383 return (cover_type
)((cover_full
+ val
*
384 m_mask_function
.calculate(
385 m_rbuf
->row_ptr(y
) + x
* Step
+ Offset
)) >>
390 //--------------------------------------------------------------------
391 void fill_hspan(int x
, int y
, cover_type
* dst
, int num_pix
) const
393 const int8u
* mask
= m_rbuf
->row_ptr(y
) + x
* Step
+ Offset
;
396 *dst
++ = (cover_type
)m_mask_function
.calculate(mask
);
404 //--------------------------------------------------------------------
405 void combine_hspan(int x
, int y
, cover_type
* dst
, int num_pix
) const
407 const int8u
* mask
= m_rbuf
->row_ptr(y
) + x
* Step
+ Offset
;
410 *dst
= (cover_type
)((cover_full
+ (*dst
) *
411 m_mask_function
.calculate(mask
)) >>
420 //--------------------------------------------------------------------
421 void fill_vspan(int x
, int y
, cover_type
* dst
, int num_pix
) const
423 const int8u
* mask
= m_rbuf
->row_ptr(y
) + x
* Step
+ Offset
;
426 *dst
++ = (cover_type
)m_mask_function
.calculate(mask
);
427 mask
+= m_rbuf
->stride();
433 //--------------------------------------------------------------------
434 void combine_vspan(int x
, int y
, cover_type
* dst
, int num_pix
) const
436 const int8u
* mask
= m_rbuf
->row_ptr(y
) + x
* Step
+ Offset
;
439 *dst
= (cover_type
)((cover_full
+ (*dst
) *
440 m_mask_function
.calculate(mask
)) >>
443 mask
+= m_rbuf
->stride();
449 amask_no_clip_u8(const self_type
&);
450 const self_type
& operator = (const self_type
&);
452 rendering_buffer
* m_rbuf
;
453 MaskF m_mask_function
;
457 typedef amask_no_clip_u8
<1, 0> amask_no_clip_gray8
; //----amask_no_clip_gray8
459 typedef amask_no_clip_u8
<3, 0> amask_no_clip_rgb24r
; //----amask_no_clip_rgb24r
460 typedef amask_no_clip_u8
<3, 1> amask_no_clip_rgb24g
; //----amask_no_clip_rgb24g
461 typedef amask_no_clip_u8
<3, 2> amask_no_clip_rgb24b
; //----amask_no_clip_rgb24b
463 typedef amask_no_clip_u8
<3, 2> amask_no_clip_bgr24r
; //----amask_no_clip_bgr24r
464 typedef amask_no_clip_u8
<3, 1> amask_no_clip_bgr24g
; //----amask_no_clip_bgr24g
465 typedef amask_no_clip_u8
<3, 0> amask_no_clip_bgr24b
; //----amask_no_clip_bgr24b
467 typedef amask_no_clip_u8
<4, 0> amask_no_clip_rgba32r
; //----amask_no_clip_rgba32r
468 typedef amask_no_clip_u8
<4, 1> amask_no_clip_rgba32g
; //----amask_no_clip_rgba32g
469 typedef amask_no_clip_u8
<4, 2> amask_no_clip_rgba32b
; //----amask_no_clip_rgba32b
470 typedef amask_no_clip_u8
<4, 3> amask_no_clip_rgba32a
; //----amask_no_clip_rgba32a
472 typedef amask_no_clip_u8
<4, 1> amask_no_clip_argb32r
; //----amask_no_clip_argb32r
473 typedef amask_no_clip_u8
<4, 2> amask_no_clip_argb32g
; //----amask_no_clip_argb32g
474 typedef amask_no_clip_u8
<4, 3> amask_no_clip_argb32b
; //----amask_no_clip_argb32b
475 typedef amask_no_clip_u8
<4, 0> amask_no_clip_argb32a
; //----amask_no_clip_argb32a
477 typedef amask_no_clip_u8
<4, 2> amask_no_clip_bgra32r
; //----amask_no_clip_bgra32r
478 typedef amask_no_clip_u8
<4, 1> amask_no_clip_bgra32g
; //----amask_no_clip_bgra32g
479 typedef amask_no_clip_u8
<4, 0> amask_no_clip_bgra32b
; //----amask_no_clip_bgra32b
480 typedef amask_no_clip_u8
<4, 3> amask_no_clip_bgra32a
; //----amask_no_clip_bgra32a
482 typedef amask_no_clip_u8
<4, 3> amask_no_clip_abgr32r
; //----amask_no_clip_abgr32r
483 typedef amask_no_clip_u8
<4, 2> amask_no_clip_abgr32g
; //----amask_no_clip_abgr32g
484 typedef amask_no_clip_u8
<4, 1> amask_no_clip_abgr32b
; //----amask_no_clip_abgr32b
485 typedef amask_no_clip_u8
<4, 0> amask_no_clip_abgr32a
; //----amask_no_clip_abgr32a
487 typedef amask_no_clip_u8
<3, 0, rgb_to_gray_mask_u8
<0, 1, 2> > amask_no_clip_rgb24gray
; //----amask_no_clip_rgb24gray
488 typedef amask_no_clip_u8
<3, 0, rgb_to_gray_mask_u8
<2, 1, 0> > amask_no_clip_bgr24gray
; //----amask_no_clip_bgr24gray
489 typedef amask_no_clip_u8
<4, 0, rgb_to_gray_mask_u8
<0, 1, 2> > amask_no_clip_rgba32gray
; //----amask_no_clip_rgba32gray
490 typedef amask_no_clip_u8
<4, 1, rgb_to_gray_mask_u8
<0, 1, 2> > amask_no_clip_argb32gray
; //----amask_no_clip_argb32gray
491 typedef amask_no_clip_u8
<4, 0, rgb_to_gray_mask_u8
<2, 1, 0> > amask_no_clip_bgra32gray
; //----amask_no_clip_bgra32gray
492 typedef amask_no_clip_u8
<4, 1, rgb_to_gray_mask_u8
<2, 1, 0> > amask_no_clip_abgr32gray
; //----amask_no_clip_abgr32gray