Update mojo sdk to rev 1dc8a9a5db73d3718d99917fadf31f5fb2ebad4f
[chromium-blink-merge.git] / third_party / libwebp / dsp / upsampling_neon.c
blobd31ed4d6aa2162c5bfa36d7495db7a122121fe47
1 // Copyright 2011 Google Inc. All Rights Reserved.
2 //
3 // Use of this source code is governed by a BSD-style license
4 // that can be found in the COPYING file in the root of the source
5 // tree. An additional intellectual property rights grant can be found
6 // in the file PATENTS. All contributing project authors may
7 // be found in the AUTHORS file in the root of the source tree.
8 // -----------------------------------------------------------------------------
9 //
10 // NEON version of YUV to RGB upsampling functions.
12 // Author: mans@mansr.com (Mans Rullgard)
13 // Based on SSE code by: somnath@google.com (Somnath Banerjee)
15 #include "./dsp.h"
17 #if defined(WEBP_USE_NEON)
19 #include <assert.h>
20 #include <arm_neon.h>
21 #include <string.h>
22 #include "./neon.h"
23 #include "./yuv.h"
25 #ifdef FANCY_UPSAMPLING
27 //-----------------------------------------------------------------------------
28 // U/V upsampling
30 // Loads 9 pixels each from rows r1 and r2 and generates 16 pixels.
31 #define UPSAMPLE_16PIXELS(r1, r2, out) { \
32 uint8x8_t a = vld1_u8(r1); \
33 uint8x8_t b = vld1_u8(r1 + 1); \
34 uint8x8_t c = vld1_u8(r2); \
35 uint8x8_t d = vld1_u8(r2 + 1); \
37 uint16x8_t al = vshll_n_u8(a, 1); \
38 uint16x8_t bl = vshll_n_u8(b, 1); \
39 uint16x8_t cl = vshll_n_u8(c, 1); \
40 uint16x8_t dl = vshll_n_u8(d, 1); \
42 uint8x8_t diag1, diag2; \
43 uint16x8_t sl; \
45 /* a + b + c + d */ \
46 sl = vaddl_u8(a, b); \
47 sl = vaddw_u8(sl, c); \
48 sl = vaddw_u8(sl, d); \
50 al = vaddq_u16(sl, al); /* 3a + b + c + d */ \
51 bl = vaddq_u16(sl, bl); /* a + 3b + c + d */ \
53 al = vaddq_u16(al, dl); /* 3a + b + c + 3d */ \
54 bl = vaddq_u16(bl, cl); /* a + 3b + 3c + d */ \
56 diag2 = vshrn_n_u16(al, 3); \
57 diag1 = vshrn_n_u16(bl, 3); \
59 a = vrhadd_u8(a, diag1); \
60 b = vrhadd_u8(b, diag2); \
61 c = vrhadd_u8(c, diag2); \
62 d = vrhadd_u8(d, diag1); \
64 { \
65 uint8x8x2_t a_b, c_d; \
66 INIT_VECTOR2(a_b, a, b); \
67 INIT_VECTOR2(c_d, c, d); \
68 vst2_u8(out, a_b); \
69 vst2_u8(out + 32, c_d); \
70 } \
73 // Turn the macro into a function for reducing code-size when non-critical
74 static void Upsample16Pixels(const uint8_t *r1, const uint8_t *r2,
75 uint8_t *out) {
76 UPSAMPLE_16PIXELS(r1, r2, out);
79 #define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \
80 uint8_t r1[9], r2[9]; \
81 memcpy(r1, (tb), (num_pixels)); \
82 memcpy(r2, (bb), (num_pixels)); \
83 /* replicate last byte */ \
84 memset(r1 + (num_pixels), r1[(num_pixels) - 1], 9 - (num_pixels)); \
85 memset(r2 + (num_pixels), r2[(num_pixels) - 1], 9 - (num_pixels)); \
86 Upsample16Pixels(r1, r2, out); \
89 //-----------------------------------------------------------------------------
90 // YUV->RGB conversion
92 static const int16_t kCoeffs[4] = { kYScale, kVToR, kUToG, kVToG };
94 #define v255 vdup_n_u8(255)
96 #define STORE_Rgb(out, r, g, b) do { \
97 uint8x8x3_t r_g_b; \
98 INIT_VECTOR3(r_g_b, r, g, b); \
99 vst3_u8(out, r_g_b); \
100 } while (0)
102 #define STORE_Bgr(out, r, g, b) do { \
103 uint8x8x3_t b_g_r; \
104 INIT_VECTOR3(b_g_r, b, g, r); \
105 vst3_u8(out, b_g_r); \
106 } while (0)
108 #define STORE_Rgba(out, r, g, b) do { \
109 uint8x8x4_t r_g_b_v255; \
110 INIT_VECTOR4(r_g_b_v255, r, g, b, v255); \
111 vst4_u8(out, r_g_b_v255); \
112 } while (0)
114 #define STORE_Bgra(out, r, g, b) do { \
115 uint8x8x4_t b_g_r_v255; \
116 INIT_VECTOR4(b_g_r_v255, b, g, r, v255); \
117 vst4_u8(out, b_g_r_v255); \
118 } while (0)
120 #define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) { \
121 int i; \
122 for (i = 0; i < N; i += 8) { \
123 const int off = ((cur_x) + i) * XSTEP; \
124 uint8x8_t y = vld1_u8((src_y) + (cur_x) + i); \
125 uint8x8_t u = vld1_u8((src_uv) + i); \
126 uint8x8_t v = vld1_u8((src_uv) + i + 16); \
127 const int16x8_t yy = vreinterpretq_s16_u16(vsubl_u8(y, u16)); \
128 const int16x8_t uu = vreinterpretq_s16_u16(vsubl_u8(u, u128)); \
129 const int16x8_t vv = vreinterpretq_s16_u16(vsubl_u8(v, u128)); \
130 int32x4_t yl = vmull_lane_s16(vget_low_s16(yy), cf16, 0); \
131 int32x4_t yh = vmull_lane_s16(vget_high_s16(yy), cf16, 0); \
132 const int32x4_t rl = vmlal_lane_s16(yl, vget_low_s16(vv), cf16, 1);\
133 const int32x4_t rh = vmlal_lane_s16(yh, vget_high_s16(vv), cf16, 1);\
134 int32x4_t gl = vmlsl_lane_s16(yl, vget_low_s16(uu), cf16, 2); \
135 int32x4_t gh = vmlsl_lane_s16(yh, vget_high_s16(uu), cf16, 2); \
136 const int32x4_t bl = vmovl_s16(vget_low_s16(uu)); \
137 const int32x4_t bh = vmovl_s16(vget_high_s16(uu)); \
138 gl = vmlsl_lane_s16(gl, vget_low_s16(vv), cf16, 3); \
139 gh = vmlsl_lane_s16(gh, vget_high_s16(vv), cf16, 3); \
140 yl = vmlaq_lane_s32(yl, bl, cf32, 0); \
141 yh = vmlaq_lane_s32(yh, bh, cf32, 0); \
142 /* vrshrn_n_s32() already incorporates the rounding constant */ \
143 y = vqmovun_s16(vcombine_s16(vrshrn_n_s32(rl, YUV_FIX2), \
144 vrshrn_n_s32(rh, YUV_FIX2))); \
145 u = vqmovun_s16(vcombine_s16(vrshrn_n_s32(gl, YUV_FIX2), \
146 vrshrn_n_s32(gh, YUV_FIX2))); \
147 v = vqmovun_s16(vcombine_s16(vrshrn_n_s32(yl, YUV_FIX2), \
148 vrshrn_n_s32(yh, YUV_FIX2))); \
149 STORE_ ## FMT(out + off, y, u, v); \
153 #define CONVERT1(FUNC, XSTEP, N, src_y, src_uv, rgb, cur_x) { \
154 int i; \
155 for (i = 0; i < N; i++) { \
156 const int off = ((cur_x) + i) * XSTEP; \
157 const int y = src_y[(cur_x) + i]; \
158 const int u = (src_uv)[i]; \
159 const int v = (src_uv)[i + 16]; \
160 FUNC(y, u, v, rgb + off); \
164 #define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv, \
165 top_dst, bottom_dst, cur_x, len) { \
166 CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x) \
167 if (bottom_y != NULL) { \
168 CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x) \
172 #define CONVERT2RGB_1(FUNC, XSTEP, top_y, bottom_y, uv, \
173 top_dst, bottom_dst, cur_x, len) { \
174 CONVERT1(FUNC, XSTEP, len, top_y, uv, top_dst, cur_x); \
175 if (bottom_y != NULL) { \
176 CONVERT1(FUNC, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \
180 #define NEON_UPSAMPLE_FUNC(FUNC_NAME, FMT, XSTEP) \
181 static void FUNC_NAME(const uint8_t *top_y, const uint8_t *bottom_y, \
182 const uint8_t *top_u, const uint8_t *top_v, \
183 const uint8_t *cur_u, const uint8_t *cur_v, \
184 uint8_t *top_dst, uint8_t *bottom_dst, int len) { \
185 int block; \
186 /* 16 byte aligned array to cache reconstructed u and v */ \
187 uint8_t uv_buf[2 * 32 + 15]; \
188 uint8_t *const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
189 const int uv_len = (len + 1) >> 1; \
190 /* 9 pixels must be read-able for each block */ \
191 const int num_blocks = (uv_len - 1) >> 3; \
192 const int leftover = uv_len - num_blocks * 8; \
193 const int last_pos = 1 + 16 * num_blocks; \
195 const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
196 const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
198 const int16x4_t cf16 = vld1_s16(kCoeffs); \
199 const int32x2_t cf32 = vdup_n_s32(kUToB); \
200 const uint8x8_t u16 = vdup_n_u8(16); \
201 const uint8x8_t u128 = vdup_n_u8(128); \
203 /* Treat the first pixel in regular way */ \
204 assert(top_y != NULL); \
206 const int u0 = (top_u[0] + u_diag) >> 1; \
207 const int v0 = (top_v[0] + v_diag) >> 1; \
208 VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst); \
210 if (bottom_y != NULL) { \
211 const int u0 = (cur_u[0] + u_diag) >> 1; \
212 const int v0 = (cur_v[0] + v_diag) >> 1; \
213 VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst); \
216 for (block = 0; block < num_blocks; ++block) { \
217 UPSAMPLE_16PIXELS(top_u, cur_u, r_uv); \
218 UPSAMPLE_16PIXELS(top_v, cur_v, r_uv + 16); \
219 CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, r_uv, \
220 top_dst, bottom_dst, 16 * block + 1, 16); \
221 top_u += 8; \
222 cur_u += 8; \
223 top_v += 8; \
224 cur_v += 8; \
227 UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv); \
228 UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16); \
229 CONVERT2RGB_1(VP8YuvTo ## FMT, XSTEP, top_y, bottom_y, r_uv, \
230 top_dst, bottom_dst, last_pos, len - last_pos); \
233 // NEON variants of the fancy upsampler.
234 NEON_UPSAMPLE_FUNC(UpsampleRgbLinePair, Rgb, 3)
235 NEON_UPSAMPLE_FUNC(UpsampleBgrLinePair, Bgr, 3)
236 NEON_UPSAMPLE_FUNC(UpsampleRgbaLinePair, Rgba, 4)
237 NEON_UPSAMPLE_FUNC(UpsampleBgraLinePair, Bgra, 4)
239 #endif // FANCY_UPSAMPLING
241 #endif // WEBP_USE_NEON
243 //------------------------------------------------------------------------------
245 extern void WebPInitUpsamplersNEON(void);
247 #ifdef FANCY_UPSAMPLING
249 extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
251 void WebPInitUpsamplersNEON(void) {
252 #if defined(WEBP_USE_NEON)
253 WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair;
254 WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair;
255 WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair;
256 WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair;
257 WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair;
258 WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair;
259 #endif // WEBP_USE_NEON
262 #else
264 // this empty function is to avoid an empty .o
265 void WebPInitUpsamplersNEON(void) {}
267 #endif // FANCY_UPSAMPLING