1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "remoting/codec/video_encoder_vpx.h"
8 #include "base/command_line.h"
9 #include "base/logging.h"
10 #include "base/sys_info.h"
11 #include "remoting/base/util.h"
12 #include "remoting/proto/video.pb.h"
13 #include "third_party/libyuv/include/libyuv/convert_from_argb.h"
14 #include "third_party/webrtc/modules/desktop_capture/desktop_frame.h"
15 #include "third_party/webrtc/modules/desktop_capture/desktop_geometry.h"
16 #include "third_party/webrtc/modules/desktop_capture/desktop_region.h"
19 #define VPX_CODEC_DISABLE_COMPAT 1
20 #include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
21 #include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
28 // Name of command-line flag to enable VP9 to use I444 by default.
29 const char kEnableI444SwitchName
[] = "enable-i444";
31 // Number of bytes in an RGBx pixel.
32 const int kBytesPerRgbPixel
= 4;
34 // Defines the dimension of a macro block. This is used to compute the active
35 // map for the encoder.
36 const int kMacroBlockSize
= 16;
38 // Magic encoder profile numbers for I420 and I444 input formats.
39 const int kVp9I420ProfileNumber
= 0;
40 const int kVp9I444ProfileNumber
= 1;
42 void SetCommonCodecParameters(const webrtc::DesktopSize
& size
,
43 vpx_codec_enc_cfg_t
* config
) {
44 // Use millisecond granularity time base.
45 config
->g_timebase
.num
= 1;
46 config
->g_timebase
.den
= 1000;
48 // Adjust default target bit-rate to account for actual desktop size.
49 config
->rc_target_bitrate
= size
.width() * size
.height() *
50 config
->rc_target_bitrate
/ config
->g_w
/ config
->g_h
;
52 config
->g_w
= size
.width();
53 config
->g_h
= size
.height();
54 config
->g_pass
= VPX_RC_ONE_PASS
;
56 // Start emitting packets immediately.
57 config
->g_lag_in_frames
= 0;
59 // Since the transport layer is reliable, keyframes should not be necessary.
60 // However, due to crbug.com/440223, decoding fails after 30,000 non-key
61 // frames, so take the hit of an "unnecessary" key-frame every 10,000 frames.
62 config
->kf_min_dist
= 10000;
63 config
->kf_max_dist
= 10000;
65 // Using 2 threads gives a great boost in performance for most systems with
66 // adequate processing power. NB: Going to multiple threads on low end
67 // windows systems can really hurt performance.
68 // http://crbug.com/99179
69 config
->g_threads
= (base::SysInfo::NumberOfProcessors() > 2) ? 2 : 1;
72 ScopedVpxCodec
CreateVP8Codec(const webrtc::DesktopSize
& size
) {
73 ScopedVpxCodec
codec(new vpx_codec_ctx_t
);
75 // Configure the encoder.
76 vpx_codec_enc_cfg_t config
;
77 const vpx_codec_iface_t
* algo
= vpx_codec_vp8_cx();
79 vpx_codec_err_t ret
= vpx_codec_enc_config_default(algo
, &config
, 0);
80 if (ret
!= VPX_CODEC_OK
)
81 return ScopedVpxCodec();
83 SetCommonCodecParameters(size
, &config
);
85 // Value of 2 means using the real time profile. This is basically a
86 // redundant option since we explicitly select real time mode when doing
90 // Clamping the quantizer constrains the worst-case quality and CPU usage.
91 config
.rc_min_quantizer
= 20;
92 config
.rc_max_quantizer
= 30;
94 if (vpx_codec_enc_init(codec
.get(), algo
, &config
, 0))
95 return ScopedVpxCodec();
97 // Value of 16 will have the smallest CPU load. This turns off subpixel
99 if (vpx_codec_control(codec
.get(), VP8E_SET_CPUUSED
, 16))
100 return ScopedVpxCodec();
102 // Use the lowest level of noise sensitivity so as to spend less time
103 // on motion estimation and inter-prediction mode.
104 if (vpx_codec_control(codec
.get(), VP8E_SET_NOISE_SENSITIVITY
, 0))
105 return ScopedVpxCodec();
110 ScopedVpxCodec
CreateVP9Codec(const webrtc::DesktopSize
& size
,
112 bool lossless_encode
) {
113 ScopedVpxCodec
codec(new vpx_codec_ctx_t
);
115 // Configure the encoder.
116 vpx_codec_enc_cfg_t config
;
117 const vpx_codec_iface_t
* algo
= vpx_codec_vp9_cx();
119 vpx_codec_err_t ret
= vpx_codec_enc_config_default(algo
, &config
, 0);
120 if (ret
!= VPX_CODEC_OK
)
121 return ScopedVpxCodec();
123 SetCommonCodecParameters(size
, &config
);
125 // Configure VP9 for I420 or I444 source frames.
127 lossless_color
? kVp9I444ProfileNumber
: kVp9I420ProfileNumber
;
129 if (lossless_encode
) {
130 // Disable quantization entirely, putting the encoder in "lossless" mode.
131 config
.rc_min_quantizer
= 0;
132 config
.rc_max_quantizer
= 0;
134 // Lossy encode using the same settings as for VP8.
135 config
.rc_min_quantizer
= 20;
136 config
.rc_max_quantizer
= 30;
139 if (vpx_codec_enc_init(codec
.get(), algo
, &config
, 0))
140 return ScopedVpxCodec();
142 // Request the lowest-CPU usage that VP9 supports, which depends on whether
143 // we are encoding lossy or lossless.
144 // Note that this is configured via the same parameter as for VP8.
145 int cpu_used
= lossless_encode
? 5 : 6;
146 if (vpx_codec_control(codec
.get(), VP8E_SET_CPUUSED
, cpu_used
))
147 return ScopedVpxCodec();
149 // Use the lowest level of noise sensitivity so as to spend less time
150 // on motion estimation and inter-prediction mode.
151 if (vpx_codec_control(codec
.get(), VP9E_SET_NOISE_SENSITIVITY
, 0))
152 return ScopedVpxCodec();
157 void CreateImage(bool use_i444
,
158 const webrtc::DesktopSize
& size
,
159 scoped_ptr
<vpx_image_t
>* out_image
,
160 scoped_ptr
<uint8
[]>* out_image_buffer
) {
161 DCHECK(!size
.is_empty());
163 scoped_ptr
<vpx_image_t
> image(new vpx_image_t());
164 memset(image
.get(), 0, sizeof(vpx_image_t
));
166 // libvpx seems to require both to be assigned.
167 image
->d_w
= size
.width();
168 image
->w
= size
.width();
169 image
->d_h
= size
.height();
170 image
->h
= size
.height();
172 // libvpx should derive chroma shifts from|fmt| but currently has a bug:
173 // https://code.google.com/p/webm/issues/detail?id=627
175 image
->fmt
= VPX_IMG_FMT_I444
;
176 image
->x_chroma_shift
= 0;
177 image
->y_chroma_shift
= 0;
179 image
->fmt
= VPX_IMG_FMT_YV12
;
180 image
->x_chroma_shift
= 1;
181 image
->y_chroma_shift
= 1;
184 // libyuv's fast-path requires 16-byte aligned pointers and strides, so pad
185 // the Y, U and V planes' strides to multiples of 16 bytes.
186 const int y_stride
= ((image
->w
- 1) & ~15) + 16;
187 const int uv_unaligned_stride
= y_stride
>> image
->x_chroma_shift
;
188 const int uv_stride
= ((uv_unaligned_stride
- 1) & ~15) + 16;
190 // libvpx accesses the source image in macro blocks, and will over-read
191 // if the image is not padded out to the next macroblock: crbug.com/119633.
192 // Pad the Y, U and V planes' height out to compensate.
193 // Assuming macroblocks are 16x16, aligning the planes' strides above also
194 // macroblock aligned them.
195 DCHECK_EQ(16, kMacroBlockSize
);
196 const int y_rows
= ((image
->h
- 1) & ~(kMacroBlockSize
-1)) + kMacroBlockSize
;
197 const int uv_rows
= y_rows
>> image
->y_chroma_shift
;
199 // Allocate a YUV buffer large enough for the aligned data & padding.
200 const int buffer_size
= y_stride
* y_rows
+ 2*uv_stride
* uv_rows
;
201 scoped_ptr
<uint8
[]> image_buffer(new uint8
[buffer_size
]);
203 // Reset image value to 128 so we just need to fill in the y plane.
204 memset(image_buffer
.get(), 128, buffer_size
);
206 // Fill in the information for |image_|.
207 unsigned char* uchar_buffer
=
208 reinterpret_cast<unsigned char*>(image_buffer
.get());
209 image
->planes
[0] = uchar_buffer
;
210 image
->planes
[1] = image
->planes
[0] + y_stride
* y_rows
;
211 image
->planes
[2] = image
->planes
[1] + uv_stride
* uv_rows
;
212 image
->stride
[0] = y_stride
;
213 image
->stride
[1] = uv_stride
;
214 image
->stride
[2] = uv_stride
;
216 *out_image
= image
.Pass();
217 *out_image_buffer
= image_buffer
.Pass();
223 scoped_ptr
<VideoEncoderVpx
> VideoEncoderVpx::CreateForVP8() {
224 return make_scoped_ptr(new VideoEncoderVpx(false));
228 scoped_ptr
<VideoEncoderVpx
> VideoEncoderVpx::CreateForVP9() {
229 return make_scoped_ptr(new VideoEncoderVpx(true));
232 VideoEncoderVpx::~VideoEncoderVpx() {}
234 void VideoEncoderVpx::SetLosslessEncode(bool want_lossless
) {
235 if (use_vp9_
&& (want_lossless
!= lossless_encode_
)) {
236 lossless_encode_
= want_lossless
;
237 codec_
.reset(); // Force encoder re-initialization.
241 void VideoEncoderVpx::SetLosslessColor(bool want_lossless
) {
242 if (use_vp9_
&& (want_lossless
!= lossless_color_
)) {
243 lossless_color_
= want_lossless
;
244 codec_
.reset(); // Force encoder re-initialization.
248 scoped_ptr
<VideoPacket
> VideoEncoderVpx::Encode(
249 const webrtc::DesktopFrame
& frame
) {
250 DCHECK_LE(32, frame
.size().width());
251 DCHECK_LE(32, frame
.size().height());
253 base::TimeTicks encode_start_time
= base::TimeTicks::Now();
256 !frame
.size().equals(webrtc::DesktopSize(image_
->w
, image_
->h
))) {
257 bool ret
= Initialize(frame
.size());
258 // TODO(hclam): Handle error better.
259 CHECK(ret
) << "Initialization of encoder failed";
261 // Set now as the base for timestamp calculation.
262 timestamp_base_
= encode_start_time
;
265 // Convert the updated capture data ready for encode.
266 webrtc::DesktopRegion updated_region
;
267 PrepareImage(frame
, &updated_region
);
269 // Update active map based on updated region.
270 PrepareActiveMap(updated_region
);
272 // Apply active map to the encoder.
273 vpx_active_map_t act_map
;
274 act_map
.rows
= active_map_height_
;
275 act_map
.cols
= active_map_width_
;
276 act_map
.active_map
= active_map_
.get();
277 if (vpx_codec_control(codec_
.get(), VP8E_SET_ACTIVEMAP
, &act_map
)) {
278 LOG(ERROR
) << "Unable to apply active map";
281 // Do the actual encoding.
282 int timestamp
= (encode_start_time
- timestamp_base_
).InMilliseconds();
283 vpx_codec_err_t ret
= vpx_codec_encode(
284 codec_
.get(), image_
.get(), timestamp
, 1, 0, VPX_DL_REALTIME
);
285 DCHECK_EQ(ret
, VPX_CODEC_OK
)
286 << "Encoding error: " << vpx_codec_err_to_string(ret
) << "\n"
287 << "Details: " << vpx_codec_error(codec_
.get()) << "\n"
288 << vpx_codec_error_detail(codec_
.get());
290 // Read the encoded data.
291 vpx_codec_iter_t iter
= NULL
;
292 bool got_data
= false;
294 // TODO(hclam): Make sure we get exactly one frame from the packet.
295 // TODO(hclam): We should provide the output buffer to avoid one copy.
296 scoped_ptr
<VideoPacket
> packet(
297 helper_
.CreateVideoPacketWithUpdatedRegion(frame
, updated_region
));
298 packet
->mutable_format()->set_encoding(VideoPacketFormat::ENCODING_VP8
);
301 const vpx_codec_cx_pkt_t
* vpx_packet
=
302 vpx_codec_get_cx_data(codec_
.get(), &iter
);
306 switch (vpx_packet
->kind
) {
307 case VPX_CODEC_CX_FRAME_PKT
:
309 packet
->set_data(vpx_packet
->data
.frame
.buf
, vpx_packet
->data
.frame
.sz
);
316 // Note the time taken to encode the pixel data.
317 packet
->set_encode_time_ms(
318 (base::TimeTicks::Now() - encode_start_time
).InMillisecondsRoundedUp());
320 return packet
.Pass();
323 VideoEncoderVpx::VideoEncoderVpx(bool use_vp9
)
325 lossless_encode_(false),
326 lossless_color_(false),
327 active_map_width_(0),
328 active_map_height_(0) {
330 // Use I444 colour space, by default, if specified on the command-line.
331 if (CommandLine::ForCurrentProcess()->HasSwitch(kEnableI444SwitchName
)) {
332 SetLosslessColor(true);
337 bool VideoEncoderVpx::Initialize(const webrtc::DesktopSize
& size
) {
338 DCHECK(use_vp9_
|| !lossless_color_
);
339 DCHECK(use_vp9_
|| !lossless_encode_
);
343 // (Re)Create the VPX image structure and pixel buffer.
344 CreateImage(lossless_color_
, size
, &image_
, &image_buffer_
);
346 // Initialize active map.
347 active_map_width_
= (image_
->w
+ kMacroBlockSize
- 1) / kMacroBlockSize
;
348 active_map_height_
= (image_
->h
+ kMacroBlockSize
- 1) / kMacroBlockSize
;
349 active_map_
.reset(new uint8
[active_map_width_
* active_map_height_
]);
351 // (Re)Initialize the codec.
353 codec_
= CreateVP9Codec(size
, lossless_color_
, lossless_encode_
);
355 codec_
= CreateVP8Codec(size
);
361 void VideoEncoderVpx::PrepareImage(const webrtc::DesktopFrame
& frame
,
362 webrtc::DesktopRegion
* updated_region
) {
363 if (frame
.updated_region().is_empty()) {
364 updated_region
->Clear();
368 // Align the region to macroblocks, to avoid encoding artefacts.
369 // This also ensures that all rectangles have even-aligned top-left, which
370 // is required for ConvertRGBToYUVWithRect() to work.
371 std::vector
<webrtc::DesktopRect
> aligned_rects
;
372 for (webrtc::DesktopRegion::Iterator
r(frame
.updated_region());
373 !r
.IsAtEnd(); r
.Advance()) {
374 const webrtc::DesktopRect
& rect
= r
.rect();
375 aligned_rects
.push_back(AlignRect(webrtc::DesktopRect::MakeLTRB(
376 rect
.left(), rect
.top(), rect
.right(), rect
.bottom())));
378 DCHECK(!aligned_rects
.empty());
379 updated_region
->Clear();
380 updated_region
->AddRects(&aligned_rects
[0], aligned_rects
.size());
382 // Clip back to the screen dimensions, in case they're not macroblock aligned.
383 // The conversion routines don't require even width & height, so this is safe
384 // even if the source dimensions are not even.
385 updated_region
->IntersectWith(
386 webrtc::DesktopRect::MakeWH(image_
->w
, image_
->h
));
388 // Convert the updated region to YUV ready for encoding.
389 const uint8
* rgb_data
= frame
.data();
390 const int rgb_stride
= frame
.stride();
391 const int y_stride
= image_
->stride
[0];
392 DCHECK_EQ(image_
->stride
[1], image_
->stride
[2]);
393 const int uv_stride
= image_
->stride
[1];
394 uint8
* y_data
= image_
->planes
[0];
395 uint8
* u_data
= image_
->planes
[1];
396 uint8
* v_data
= image_
->planes
[2];
398 switch (image_
->fmt
) {
399 case VPX_IMG_FMT_I444
:
400 for (webrtc::DesktopRegion::Iterator
r(*updated_region
); !r
.IsAtEnd();
402 const webrtc::DesktopRect
& rect
= r
.rect();
403 int rgb_offset
= rgb_stride
* rect
.top() +
404 rect
.left() * kBytesPerRgbPixel
;
405 int yuv_offset
= uv_stride
* rect
.top() + rect
.left();
406 libyuv::ARGBToI444(rgb_data
+ rgb_offset
, rgb_stride
,
407 y_data
+ yuv_offset
, y_stride
,
408 u_data
+ yuv_offset
, uv_stride
,
409 v_data
+ yuv_offset
, uv_stride
,
410 rect
.width(), rect
.height());
413 case VPX_IMG_FMT_YV12
:
414 for (webrtc::DesktopRegion::Iterator
r(*updated_region
); !r
.IsAtEnd();
416 const webrtc::DesktopRect
& rect
= r
.rect();
417 int rgb_offset
= rgb_stride
* rect
.top() +
418 rect
.left() * kBytesPerRgbPixel
;
419 int y_offset
= y_stride
* rect
.top() + rect
.left();
420 int uv_offset
= uv_stride
* rect
.top() / 2 + rect
.left() / 2;
421 libyuv::ARGBToI420(rgb_data
+ rgb_offset
, rgb_stride
,
422 y_data
+ y_offset
, y_stride
,
423 u_data
+ uv_offset
, uv_stride
,
424 v_data
+ uv_offset
, uv_stride
,
425 rect
.width(), rect
.height());
434 void VideoEncoderVpx::PrepareActiveMap(
435 const webrtc::DesktopRegion
& updated_region
) {
436 // Clear active map first.
437 memset(active_map_
.get(), 0, active_map_width_
* active_map_height_
);
439 // Mark updated areas active.
440 for (webrtc::DesktopRegion::Iterator
r(updated_region
); !r
.IsAtEnd();
442 const webrtc::DesktopRect
& rect
= r
.rect();
443 int left
= rect
.left() / kMacroBlockSize
;
444 int right
= (rect
.right() - 1) / kMacroBlockSize
;
445 int top
= rect
.top() / kMacroBlockSize
;
446 int bottom
= (rect
.bottom() - 1) / kMacroBlockSize
;
447 DCHECK_LT(right
, active_map_width_
);
448 DCHECK_LT(bottom
, active_map_height_
);
450 uint8
* map
= active_map_
.get() + top
* active_map_width_
;
451 for (int y
= top
; y
<= bottom
; ++y
) {
452 for (int x
= left
; x
<= right
; ++x
)
454 map
+= active_map_width_
;
459 } // namespace remoting