5 /* pre-shuffled: just load these into XMM reg instead of load-scalar/shufps sequence */
6 #define FLOATSCALE (float)(PRECACHE_OUTPUT_SIZE)
7 #define CLAMPMAXVAL ( ((float) (PRECACHE_OUTPUT_SIZE - 1)) / PRECACHE_OUTPUT_SIZE )
8 static const ALIGN
float floatScaleX4
[4] =
9 { FLOATSCALE
, FLOATSCALE
, FLOATSCALE
, FLOATSCALE
};
10 static const ALIGN
float clampMaxValueX4
[4] =
11 { CLAMPMAXVAL
, CLAMPMAXVAL
, CLAMPMAXVAL
, CLAMPMAXVAL
};
13 void qcms_transform_data_rgb_out_lut_sse2(qcms_transform
*transform
,
19 float (*mat
)[4] = transform
->matrix
;
21 /* Ensure we have a buffer that's 16 byte aligned regardless of the original
22 * stack alignment. We can't use __attribute__((aligned(16))) or __declspec(align(32))
23 * because they don't work on stack variables. gcc 4.4 does do the right thing
24 * on x86 but that's too new for us right now. For more info: gcc bug #16660 */
25 float const * input
= (float*)(((uintptr_t)&input_back
[16]) & ~0xf);
26 /* share input and output locations to save having to keep the
27 * locations in separate registers */
28 uint32_t const * output
= (uint32_t*)input
;
30 /* deref *transform now to avoid it in loop */
31 const float *igtbl_r
= transform
->input_gamma_table_r
;
32 const float *igtbl_g
= transform
->input_gamma_table_g
;
33 const float *igtbl_b
= transform
->input_gamma_table_b
;
35 /* deref *transform now to avoid it in loop */
36 const uint8_t *otdata_r
= &transform
->output_table_r
->data
[0];
37 const uint8_t *otdata_g
= &transform
->output_table_g
->data
[0];
38 const uint8_t *otdata_b
= &transform
->output_table_b
->data
[0];
40 /* input matrix values never change */
41 const __m128 mat0
= _mm_load_ps(mat
[0]);
42 const __m128 mat1
= _mm_load_ps(mat
[1]);
43 const __m128 mat2
= _mm_load_ps(mat
[2]);
45 /* these values don't change, either */
46 const __m128 max
= _mm_load_ps(clampMaxValueX4
);
47 const __m128 min
= _mm_setzero_ps();
48 const __m128 scale
= _mm_load_ps(floatScaleX4
);
50 /* working variables */
51 __m128 vec_r
, vec_g
, vec_b
, result
;
57 /* one pixel is handled outside of the loop */
60 /* setup for transforming 1st pixel */
61 vec_r
= _mm_load_ss(&igtbl_r
[src
[0]]);
62 vec_g
= _mm_load_ss(&igtbl_g
[src
[1]]);
63 vec_b
= _mm_load_ss(&igtbl_b
[src
[2]]);
66 /* transform all but final pixel */
68 for (i
=0; i
<length
; i
++)
70 /* position values from gamma tables */
71 vec_r
= _mm_shuffle_ps(vec_r
, vec_r
, 0);
72 vec_g
= _mm_shuffle_ps(vec_g
, vec_g
, 0);
73 vec_b
= _mm_shuffle_ps(vec_b
, vec_b
, 0);
76 vec_r
= _mm_mul_ps(vec_r
, mat0
);
77 vec_g
= _mm_mul_ps(vec_g
, mat1
);
78 vec_b
= _mm_mul_ps(vec_b
, mat2
);
80 /* crunch, crunch, crunch */
81 vec_r
= _mm_add_ps(vec_r
, _mm_add_ps(vec_g
, vec_b
));
82 vec_r
= _mm_max_ps(min
, vec_r
);
83 vec_r
= _mm_min_ps(max
, vec_r
);
84 result
= _mm_mul_ps(vec_r
, scale
);
86 /* store calc'd output tables indices */
87 _mm_store_si128((__m128i
*)output
, _mm_cvtps_epi32(result
));
89 /* load for next loop while store completes */
90 vec_r
= _mm_load_ss(&igtbl_r
[src
[0]]);
91 vec_g
= _mm_load_ss(&igtbl_g
[src
[1]]);
92 vec_b
= _mm_load_ss(&igtbl_b
[src
[2]]);
95 /* use calc'd indices to output RGB values */
96 dest
[0] = otdata_r
[output
[0]];
97 dest
[1] = otdata_g
[output
[1]];
98 dest
[2] = otdata_b
[output
[2]];
102 /* handle final (maybe only) pixel */
104 vec_r
= _mm_shuffle_ps(vec_r
, vec_r
, 0);
105 vec_g
= _mm_shuffle_ps(vec_g
, vec_g
, 0);
106 vec_b
= _mm_shuffle_ps(vec_b
, vec_b
, 0);
108 vec_r
= _mm_mul_ps(vec_r
, mat0
);
109 vec_g
= _mm_mul_ps(vec_g
, mat1
);
110 vec_b
= _mm_mul_ps(vec_b
, mat2
);
112 vec_r
= _mm_add_ps(vec_r
, _mm_add_ps(vec_g
, vec_b
));
113 vec_r
= _mm_max_ps(min
, vec_r
);
114 vec_r
= _mm_min_ps(max
, vec_r
);
115 result
= _mm_mul_ps(vec_r
, scale
);
117 _mm_store_si128((__m128i
*)output
, _mm_cvtps_epi32(result
));
119 dest
[0] = otdata_r
[output
[0]];
120 dest
[1] = otdata_g
[output
[1]];
121 dest
[2] = otdata_b
[output
[2]];
124 void qcms_transform_data_rgba_out_lut_sse2(qcms_transform
*transform
,
130 float (*mat
)[4] = transform
->matrix
;
132 /* Ensure we have a buffer that's 16 byte aligned regardless of the original
133 * stack alignment. We can't use __attribute__((aligned(16))) or __declspec(align(32))
134 * because they don't work on stack variables. gcc 4.4 does do the right thing
135 * on x86 but that's too new for us right now. For more info: gcc bug #16660 */
136 float const * input
= (float*)(((uintptr_t)&input_back
[16]) & ~0xf);
137 /* share input and output locations to save having to keep the
138 * locations in separate registers */
139 uint32_t const * output
= (uint32_t*)input
;
141 /* deref *transform now to avoid it in loop */
142 const float *igtbl_r
= transform
->input_gamma_table_r
;
143 const float *igtbl_g
= transform
->input_gamma_table_g
;
144 const float *igtbl_b
= transform
->input_gamma_table_b
;
146 /* deref *transform now to avoid it in loop */
147 const uint8_t *otdata_r
= &transform
->output_table_r
->data
[0];
148 const uint8_t *otdata_g
= &transform
->output_table_g
->data
[0];
149 const uint8_t *otdata_b
= &transform
->output_table_b
->data
[0];
151 /* input matrix values never change */
152 const __m128 mat0
= _mm_load_ps(mat
[0]);
153 const __m128 mat1
= _mm_load_ps(mat
[1]);
154 const __m128 mat2
= _mm_load_ps(mat
[2]);
156 /* these values don't change, either */
157 const __m128 max
= _mm_load_ps(clampMaxValueX4
);
158 const __m128 min
= _mm_setzero_ps();
159 const __m128 scale
= _mm_load_ps(floatScaleX4
);
161 /* working variables */
162 __m128 vec_r
, vec_g
, vec_b
, result
;
169 /* one pixel is handled outside of the loop */
172 /* setup for transforming 1st pixel */
173 vec_r
= _mm_load_ss(&igtbl_r
[src
[0]]);
174 vec_g
= _mm_load_ss(&igtbl_g
[src
[1]]);
175 vec_b
= _mm_load_ss(&igtbl_b
[src
[2]]);
179 /* transform all but final pixel */
181 for (i
=0; i
<length
; i
++)
183 /* position values from gamma tables */
184 vec_r
= _mm_shuffle_ps(vec_r
, vec_r
, 0);
185 vec_g
= _mm_shuffle_ps(vec_g
, vec_g
, 0);
186 vec_b
= _mm_shuffle_ps(vec_b
, vec_b
, 0);
189 vec_r
= _mm_mul_ps(vec_r
, mat0
);
190 vec_g
= _mm_mul_ps(vec_g
, mat1
);
191 vec_b
= _mm_mul_ps(vec_b
, mat2
);
193 /* store alpha for this pixel; load alpha for next */
197 /* crunch, crunch, crunch */
198 vec_r
= _mm_add_ps(vec_r
, _mm_add_ps(vec_g
, vec_b
));
199 vec_r
= _mm_max_ps(min
, vec_r
);
200 vec_r
= _mm_min_ps(max
, vec_r
);
201 result
= _mm_mul_ps(vec_r
, scale
);
203 /* store calc'd output tables indices */
204 _mm_store_si128((__m128i
*)output
, _mm_cvtps_epi32(result
));
206 /* load gamma values for next loop while store completes */
207 vec_r
= _mm_load_ss(&igtbl_r
[src
[0]]);
208 vec_g
= _mm_load_ss(&igtbl_g
[src
[1]]);
209 vec_b
= _mm_load_ss(&igtbl_b
[src
[2]]);
212 /* use calc'd indices to output RGB values */
213 dest
[0] = otdata_r
[output
[0]];
214 dest
[1] = otdata_g
[output
[1]];
215 dest
[2] = otdata_b
[output
[2]];
219 /* handle final (maybe only) pixel */
221 vec_r
= _mm_shuffle_ps(vec_r
, vec_r
, 0);
222 vec_g
= _mm_shuffle_ps(vec_g
, vec_g
, 0);
223 vec_b
= _mm_shuffle_ps(vec_b
, vec_b
, 0);
225 vec_r
= _mm_mul_ps(vec_r
, mat0
);
226 vec_g
= _mm_mul_ps(vec_g
, mat1
);
227 vec_b
= _mm_mul_ps(vec_b
, mat2
);
231 vec_r
= _mm_add_ps(vec_r
, _mm_add_ps(vec_g
, vec_b
));
232 vec_r
= _mm_max_ps(min
, vec_r
);
233 vec_r
= _mm_min_ps(max
, vec_r
);
234 result
= _mm_mul_ps(vec_r
, scale
);
236 _mm_store_si128((__m128i
*)output
, _mm_cvtps_epi32(result
));
238 dest
[0] = otdata_r
[output
[0]];
239 dest
[1] = otdata_g
[output
[1]];
240 dest
[2] = otdata_b
[output
[2]];