Roll src/third_party/WebKit f36d5e0:68b67cd (svn 193299:193303)
[chromium-blink-merge.git] / remoting / codec / video_encoder_vpx.cc
blob18700fe301cfecaf23a15629c445a9106785cf7c
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "remoting/codec/video_encoder_vpx.h"
7 #include "base/bind.h"
8 #include "base/command_line.h"
9 #include "base/logging.h"
10 #include "base/sys_info.h"
11 #include "remoting/base/util.h"
12 #include "remoting/proto/video.pb.h"
13 #include "third_party/libyuv/include/libyuv/convert_from_argb.h"
14 #include "third_party/webrtc/modules/desktop_capture/desktop_frame.h"
15 #include "third_party/webrtc/modules/desktop_capture/desktop_geometry.h"
16 #include "third_party/webrtc/modules/desktop_capture/desktop_region.h"
18 extern "C" {
19 #define VPX_CODEC_DISABLE_COMPAT 1
20 #include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
21 #include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
24 namespace remoting {
26 namespace {
28 // Name of command-line flag to enable VP9 to use I444 by default.
29 const char kEnableI444SwitchName[] = "enable-i444";
31 // Number of bytes in an RGBx pixel.
32 const int kBytesPerRgbPixel = 4;
34 // Defines the dimension of a macro block. This is used to compute the active
35 // map for the encoder.
36 const int kMacroBlockSize = 16;
38 // Magic encoder profile numbers for I420 and I444 input formats.
39 const int kVp9I420ProfileNumber = 0;
40 const int kVp9I444ProfileNumber = 1;
42 void SetCommonCodecParameters(vpx_codec_enc_cfg_t* config,
43 const webrtc::DesktopSize& size) {
44 // Use millisecond granularity time base.
45 config->g_timebase.num = 1;
46 config->g_timebase.den = 1000;
48 // Adjust default target bit-rate to account for actual desktop size.
49 config->rc_target_bitrate = size.width() * size.height() *
50 config->rc_target_bitrate / config->g_w / config->g_h;
52 config->g_w = size.width();
53 config->g_h = size.height();
54 config->g_pass = VPX_RC_ONE_PASS;
56 // Start emitting packets immediately.
57 config->g_lag_in_frames = 0;
59 // Since the transport layer is reliable, keyframes should not be necessary.
60 // However, due to crbug.com/440223, decoding fails after 30,000 non-key
61 // frames, so take the hit of an "unnecessary" key-frame every 10,000 frames.
62 config->kf_min_dist = 10000;
63 config->kf_max_dist = 10000;
65 // Using 2 threads gives a great boost in performance for most systems with
66 // adequate processing power. NB: Going to multiple threads on low end
67 // windows systems can really hurt performance.
68 // http://crbug.com/99179
69 config->g_threads = (base::SysInfo::NumberOfProcessors() > 2) ? 2 : 1;
72 void SetVp8CodecParameters(vpx_codec_enc_cfg_t* config,
73 const webrtc::DesktopSize& size) {
74 SetCommonCodecParameters(config, size);
76 // Value of 2 means using the real time profile. This is basically a
77 // redundant option since we explicitly select real time mode when doing
78 // encoding.
79 config->g_profile = 2;
81 // Clamping the quantizer constrains the worst-case quality and CPU usage.
82 config->rc_min_quantizer = 20;
83 config->rc_max_quantizer = 30;
86 void SetVp9CodecParameters(vpx_codec_enc_cfg_t* config,
87 const webrtc::DesktopSize& size,
88 bool lossless_color,
89 bool lossless_encode) {
90 SetCommonCodecParameters(config, size);
92 // Configure VP9 for I420 or I444 source frames.
93 config->g_profile =
94 lossless_color ? kVp9I444ProfileNumber : kVp9I420ProfileNumber;
96 if (lossless_encode) {
97 // Disable quantization entirely, putting the encoder in "lossless" mode.
98 config->rc_min_quantizer = 0;
99 config->rc_max_quantizer = 0;
100 } else {
101 // Lossy encode using the same settings as for VP8.
102 config->rc_min_quantizer = 20;
103 config->rc_max_quantizer = 30;
107 void SetVp8CodecOptions(vpx_codec_ctx_t* codec) {
108 // CPUUSED of 16 will have the smallest CPU load. This turns off sub-pixel
109 // motion search.
110 vpx_codec_err_t ret = vpx_codec_control(codec, VP8E_SET_CPUUSED, 16);
111 DCHECK_EQ(VPX_CODEC_OK, ret) << "Failed to set CPUUSED";
113 // Use the lowest level of noise sensitivity so as to spend less time
114 // on motion estimation and inter-prediction mode.
115 ret = vpx_codec_control(codec, VP8E_SET_NOISE_SENSITIVITY, 0);
116 DCHECK_EQ(VPX_CODEC_OK, ret) << "Failed to set noise sensitivity";
119 void SetVp9CodecOptions(vpx_codec_ctx_t* codec, bool lossless_encode) {
120 // Request the lowest-CPU usage that VP9 supports, which depends on whether
121 // we are encoding lossy or lossless.
122 // Note that this is configured via the same parameter as for VP8.
123 int cpu_used = lossless_encode ? 5 : 6;
124 vpx_codec_err_t ret = vpx_codec_control(codec, VP8E_SET_CPUUSED, cpu_used);
125 DCHECK_EQ(VPX_CODEC_OK, ret) << "Failed to set CPUUSED";
127 // Use the lowest level of noise sensitivity so as to spend less time
128 // on motion estimation and inter-prediction mode.
129 ret = vpx_codec_control(codec, VP9E_SET_NOISE_SENSITIVITY, 0);
130 DCHECK_EQ(VPX_CODEC_OK, ret) << "Failed to set noise sensitivity";
132 // Configure the codec to tune it for screen media.
133 ret = vpx_codec_control(
134 codec, VP9E_SET_TUNE_CONTENT, VP9E_CONTENT_SCREEN);
135 DCHECK_EQ(VPX_CODEC_OK, ret) << "Failed to set screen content mode";
138 void CreateImage(bool use_i444,
139 const webrtc::DesktopSize& size,
140 scoped_ptr<vpx_image_t>* out_image,
141 scoped_ptr<uint8[]>* out_image_buffer) {
142 DCHECK(!size.is_empty());
144 scoped_ptr<vpx_image_t> image(new vpx_image_t());
145 memset(image.get(), 0, sizeof(vpx_image_t));
147 // libvpx seems to require both to be assigned.
148 image->d_w = size.width();
149 image->w = size.width();
150 image->d_h = size.height();
151 image->h = size.height();
153 // libvpx should derive chroma shifts from|fmt| but currently has a bug:
154 // https://code.google.com/p/webm/issues/detail?id=627
155 if (use_i444) {
156 image->fmt = VPX_IMG_FMT_I444;
157 image->x_chroma_shift = 0;
158 image->y_chroma_shift = 0;
159 } else { // I420
160 image->fmt = VPX_IMG_FMT_YV12;
161 image->x_chroma_shift = 1;
162 image->y_chroma_shift = 1;
165 // libyuv's fast-path requires 16-byte aligned pointers and strides, so pad
166 // the Y, U and V planes' strides to multiples of 16 bytes.
167 const int y_stride = ((image->w - 1) & ~15) + 16;
168 const int uv_unaligned_stride = y_stride >> image->x_chroma_shift;
169 const int uv_stride = ((uv_unaligned_stride - 1) & ~15) + 16;
171 // libvpx accesses the source image in macro blocks, and will over-read
172 // if the image is not padded out to the next macroblock: crbug.com/119633.
173 // Pad the Y, U and V planes' height out to compensate.
174 // Assuming macroblocks are 16x16, aligning the planes' strides above also
175 // macroblock aligned them.
176 static_assert(kMacroBlockSize == 16, "macroblock_size_not_16");
177 const int y_rows = ((image->h - 1) & ~(kMacroBlockSize-1)) + kMacroBlockSize;
178 const int uv_rows = y_rows >> image->y_chroma_shift;
180 // Allocate a YUV buffer large enough for the aligned data & padding.
181 const int buffer_size = y_stride * y_rows + 2*uv_stride * uv_rows;
182 scoped_ptr<uint8[]> image_buffer(new uint8[buffer_size]);
184 // Reset image value to 128 so we just need to fill in the y plane.
185 memset(image_buffer.get(), 128, buffer_size);
187 // Fill in the information for |image_|.
188 unsigned char* uchar_buffer =
189 reinterpret_cast<unsigned char*>(image_buffer.get());
190 image->planes[0] = uchar_buffer;
191 image->planes[1] = image->planes[0] + y_stride * y_rows;
192 image->planes[2] = image->planes[1] + uv_stride * uv_rows;
193 image->stride[0] = y_stride;
194 image->stride[1] = uv_stride;
195 image->stride[2] = uv_stride;
197 *out_image = image.Pass();
198 *out_image_buffer = image_buffer.Pass();
201 } // namespace
203 // static
204 scoped_ptr<VideoEncoderVpx> VideoEncoderVpx::CreateForVP8() {
205 return make_scoped_ptr(new VideoEncoderVpx(false));
208 // static
209 scoped_ptr<VideoEncoderVpx> VideoEncoderVpx::CreateForVP9() {
210 return make_scoped_ptr(new VideoEncoderVpx(true));
213 VideoEncoderVpx::~VideoEncoderVpx() {}
215 void VideoEncoderVpx::SetLosslessEncode(bool want_lossless) {
216 if (use_vp9_ && (want_lossless != lossless_encode_)) {
217 lossless_encode_ = want_lossless;
218 if (codec_)
219 Configure(webrtc::DesktopSize(image_->w, image_->h));
223 void VideoEncoderVpx::SetLosslessColor(bool want_lossless) {
224 if (use_vp9_ && (want_lossless != lossless_color_)) {
225 lossless_color_ = want_lossless;
226 // TODO(wez): Switch to ConfigureCodec() path once libvpx supports it.
227 // See https://code.google.com/p/webm/issues/detail?id=913.
228 //if (codec_)
229 // Configure(webrtc::DesktopSize(image_->w, image_->h));
230 codec_.reset();
234 scoped_ptr<VideoPacket> VideoEncoderVpx::Encode(
235 const webrtc::DesktopFrame& frame) {
236 DCHECK_LE(32, frame.size().width());
237 DCHECK_LE(32, frame.size().height());
239 base::TimeTicks encode_start_time = base::TimeTicks::Now();
241 // Create or reconfigure the codec to match the size of |frame|.
242 if (!codec_ ||
243 !frame.size().equals(webrtc::DesktopSize(image_->w, image_->h))) {
244 Configure(frame.size());
247 // Convert the updated capture data ready for encode.
248 webrtc::DesktopRegion updated_region;
249 PrepareImage(frame, &updated_region);
251 // Update active map based on updated region.
252 PrepareActiveMap(updated_region);
254 // Apply active map to the encoder.
255 vpx_active_map_t act_map;
256 act_map.rows = active_map_height_;
257 act_map.cols = active_map_width_;
258 act_map.active_map = active_map_.get();
259 if (vpx_codec_control(codec_.get(), VP8E_SET_ACTIVEMAP, &act_map)) {
260 LOG(ERROR) << "Unable to apply active map";
263 // Do the actual encoding.
264 int timestamp = (encode_start_time - timestamp_base_).InMilliseconds();
265 vpx_codec_err_t ret = vpx_codec_encode(
266 codec_.get(), image_.get(), timestamp, 1, 0, VPX_DL_REALTIME);
267 DCHECK_EQ(ret, VPX_CODEC_OK)
268 << "Encoding error: " << vpx_codec_err_to_string(ret) << "\n"
269 << "Details: " << vpx_codec_error(codec_.get()) << "\n"
270 << vpx_codec_error_detail(codec_.get());
272 // Read the encoded data.
273 vpx_codec_iter_t iter = NULL;
274 bool got_data = false;
276 // TODO(hclam): Make sure we get exactly one frame from the packet.
277 // TODO(hclam): We should provide the output buffer to avoid one copy.
278 scoped_ptr<VideoPacket> packet(
279 helper_.CreateVideoPacketWithUpdatedRegion(frame, updated_region));
280 packet->mutable_format()->set_encoding(VideoPacketFormat::ENCODING_VP8);
282 while (!got_data) {
283 const vpx_codec_cx_pkt_t* vpx_packet =
284 vpx_codec_get_cx_data(codec_.get(), &iter);
285 if (!vpx_packet)
286 continue;
288 switch (vpx_packet->kind) {
289 case VPX_CODEC_CX_FRAME_PKT:
290 got_data = true;
291 packet->set_data(vpx_packet->data.frame.buf, vpx_packet->data.frame.sz);
292 break;
293 default:
294 break;
298 // Note the time taken to encode the pixel data.
299 packet->set_encode_time_ms(
300 (base::TimeTicks::Now() - encode_start_time).InMillisecondsRoundedUp());
302 return packet.Pass();
305 VideoEncoderVpx::VideoEncoderVpx(bool use_vp9)
306 : use_vp9_(use_vp9),
307 lossless_encode_(false),
308 lossless_color_(false),
309 active_map_width_(0),
310 active_map_height_(0) {
311 if (use_vp9_) {
312 // Use I444 colour space, by default, if specified on the command-line.
313 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
314 kEnableI444SwitchName)) {
315 SetLosslessColor(true);
320 void VideoEncoderVpx::Configure(const webrtc::DesktopSize& size) {
321 DCHECK(use_vp9_ || !lossless_color_);
322 DCHECK(use_vp9_ || !lossless_encode_);
324 // (Re)Create the VPX image structure and pixel buffer.
325 CreateImage(lossless_color_, size, &image_, &image_buffer_);
327 // Initialize active map.
328 active_map_width_ = (size.width() + kMacroBlockSize - 1) / kMacroBlockSize;
329 active_map_height_ = (size.height() + kMacroBlockSize - 1) / kMacroBlockSize;
330 active_map_.reset(new uint8[active_map_width_ * active_map_height_]);
332 // TODO(wez): Remove this hack once VPX can handle frame size reconfiguration.
333 // See https://code.google.com/p/webm/issues/detail?id=912.
334 if (codec_) {
335 // If the frame size has changed then force re-creation of the codec.
336 if (codec_->config.enc->g_w != static_cast<unsigned int>(size.width()) ||
337 codec_->config.enc->g_h != static_cast<unsigned int>(size.height())) {
338 codec_.reset();
342 // (Re)Set the base for frame timestamps if the codec is being (re)created.
343 if (!codec_) {
344 timestamp_base_ = base::TimeTicks::Now();
347 // Fetch a default configuration for the desired codec.
348 const vpx_codec_iface_t* interface =
349 use_vp9_ ? vpx_codec_vp9_cx() : vpx_codec_vp8_cx();
350 vpx_codec_enc_cfg_t config;
351 vpx_codec_err_t ret = vpx_codec_enc_config_default(interface, &config, 0);
352 DCHECK_EQ(VPX_CODEC_OK, ret) << "Failed to fetch default configuration";
354 // Customize the default configuration to our needs.
355 if (use_vp9_) {
356 SetVp9CodecParameters(&config, size, lossless_color_, lossless_encode_);
357 } else {
358 SetVp8CodecParameters(&config, size);
361 // Initialize or re-configure the codec with the custom configuration.
362 if (!codec_) {
363 codec_.reset(new vpx_codec_ctx_t);
364 ret = vpx_codec_enc_init(codec_.get(), interface, &config, 0);
365 CHECK_EQ(VPX_CODEC_OK, ret) << "Failed to initialize codec";
366 } else {
367 ret = vpx_codec_enc_config_set(codec_.get(), &config);
368 CHECK_EQ(VPX_CODEC_OK, ret) << "Failed to reconfigure codec";
371 // Apply further customizations to the codec now it's initialized.
372 if (use_vp9_) {
373 SetVp9CodecOptions(codec_.get(), lossless_encode_);
374 } else {
375 SetVp8CodecOptions(codec_.get());
379 void VideoEncoderVpx::PrepareImage(const webrtc::DesktopFrame& frame,
380 webrtc::DesktopRegion* updated_region) {
381 if (frame.updated_region().is_empty()) {
382 updated_region->Clear();
383 return;
386 // Pad each rectangle to avoid the block-artefact filters in libvpx from
387 // introducing artefacts; VP9 includes up to 8px either side, and VP8 up to
388 // 3px, so unchanged pixels up to that far out may still be affected by the
389 // changes in the updated region, and so must be listed in the active map.
390 // After padding we align each rectangle to 16x16 active-map macroblocks.
391 // This implicitly ensures all rects have even top-left coords, which is
392 // is required by ConvertRGBToYUVWithRect().
393 // TODO(wez): Do we still need 16x16 align, or is even alignment sufficient?
394 updated_region->Clear();
395 int padding = use_vp9_ ? 8 : 3;
396 for (webrtc::DesktopRegion::Iterator r(frame.updated_region());
397 !r.IsAtEnd(); r.Advance()) {
398 const webrtc::DesktopRect& rect = r.rect();
399 updated_region->AddRect(AlignRect(webrtc::DesktopRect::MakeLTRB(
400 rect.left() - padding, rect.top() - padding, rect.right() + padding,
401 rect.bottom() + padding)));
403 DCHECK(!updated_region->is_empty());
405 // Clip back to the screen dimensions, in case they're not macroblock aligned.
406 // The conversion routines don't require even width & height, so this is safe
407 // even if the source dimensions are not even.
408 updated_region->IntersectWith(
409 webrtc::DesktopRect::MakeWH(image_->w, image_->h));
411 // Convert the updated region to YUV ready for encoding.
412 const uint8* rgb_data = frame.data();
413 const int rgb_stride = frame.stride();
414 const int y_stride = image_->stride[0];
415 DCHECK_EQ(image_->stride[1], image_->stride[2]);
416 const int uv_stride = image_->stride[1];
417 uint8* y_data = image_->planes[0];
418 uint8* u_data = image_->planes[1];
419 uint8* v_data = image_->planes[2];
421 switch (image_->fmt) {
422 case VPX_IMG_FMT_I444:
423 for (webrtc::DesktopRegion::Iterator r(*updated_region); !r.IsAtEnd();
424 r.Advance()) {
425 const webrtc::DesktopRect& rect = r.rect();
426 int rgb_offset = rgb_stride * rect.top() +
427 rect.left() * kBytesPerRgbPixel;
428 int yuv_offset = uv_stride * rect.top() + rect.left();
429 libyuv::ARGBToI444(rgb_data + rgb_offset, rgb_stride,
430 y_data + yuv_offset, y_stride,
431 u_data + yuv_offset, uv_stride,
432 v_data + yuv_offset, uv_stride,
433 rect.width(), rect.height());
435 break;
436 case VPX_IMG_FMT_YV12:
437 for (webrtc::DesktopRegion::Iterator r(*updated_region); !r.IsAtEnd();
438 r.Advance()) {
439 const webrtc::DesktopRect& rect = r.rect();
440 int rgb_offset = rgb_stride * rect.top() +
441 rect.left() * kBytesPerRgbPixel;
442 int y_offset = y_stride * rect.top() + rect.left();
443 int uv_offset = uv_stride * rect.top() / 2 + rect.left() / 2;
444 libyuv::ARGBToI420(rgb_data + rgb_offset, rgb_stride,
445 y_data + y_offset, y_stride,
446 u_data + uv_offset, uv_stride,
447 v_data + uv_offset, uv_stride,
448 rect.width(), rect.height());
450 break;
451 default:
452 NOTREACHED();
453 break;
457 void VideoEncoderVpx::PrepareActiveMap(
458 const webrtc::DesktopRegion& updated_region) {
459 // Clear active map first.
460 memset(active_map_.get(), 0, active_map_width_ * active_map_height_);
462 // Mark updated areas active.
463 for (webrtc::DesktopRegion::Iterator r(updated_region); !r.IsAtEnd();
464 r.Advance()) {
465 const webrtc::DesktopRect& rect = r.rect();
466 int left = rect.left() / kMacroBlockSize;
467 int right = (rect.right() - 1) / kMacroBlockSize;
468 int top = rect.top() / kMacroBlockSize;
469 int bottom = (rect.bottom() - 1) / kMacroBlockSize;
470 DCHECK_LT(right, active_map_width_);
471 DCHECK_LT(bottom, active_map_height_);
473 uint8* map = active_map_.get() + top * active_map_width_;
474 for (int y = top; y <= bottom; ++y) {
475 for (int x = left; x <= right; ++x)
476 map[x] = 1;
477 map += active_map_width_;
482 } // namespace remoting