Add ICU message format support
[chromium-blink-merge.git] / content / renderer / pepper / video_decoder_shim.cc
blob512211778a819f102f02f9cc052a8cccb241bbd1
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/renderer/pepper/video_decoder_shim.h"
7 #include <GLES2/gl2.h>
8 #include <GLES2/gl2ext.h>
9 #include <GLES2/gl2extchromium.h>
11 #include "base/bind.h"
12 #include "base/location.h"
13 #include "base/thread_task_runner_handle.h"
14 #ifndef NDEBUG
15 #include "base/logging.h"
16 #endif
17 #include "base/numerics/safe_conversions.h"
18 #include "base/single_thread_task_runner.h"
19 #include "cc/blink/context_provider_web_context.h"
20 #include "content/public/renderer/render_thread.h"
21 #include "content/renderer/pepper/pepper_video_decoder_host.h"
22 #include "content/renderer/render_thread_impl.h"
23 #include "gpu/command_buffer/client/gles2_implementation.h"
24 #include "media/base/decoder_buffer.h"
25 #include "media/base/limits.h"
26 #include "media/base/video_decoder.h"
27 #include "media/blink/skcanvas_video_renderer.h"
28 #include "media/filters/ffmpeg_video_decoder.h"
29 #include "media/filters/vpx_video_decoder.h"
30 #include "media/video/picture.h"
31 #include "media/video/video_decode_accelerator.h"
32 #include "ppapi/c/pp_errors.h"
33 #include "third_party/skia/include/gpu/GrTypes.h"
35 namespace content {
37 static const uint32_t kGrInvalidateState =
38 kRenderTarget_GrGLBackendState | kTextureBinding_GrGLBackendState |
39 kView_GrGLBackendState | kVertex_GrGLBackendState |
40 kProgram_GrGLBackendState | kPixelStore_GrGLBackendState;
42 // YUV->RGB converter class using a shader and FBO.
43 class VideoDecoderShim::YUVConverter {
44 public:
45 YUVConverter(const scoped_refptr<cc_blink::ContextProviderWebContext>&);
46 ~YUVConverter();
47 bool Initialize();
48 void Convert(const scoped_refptr<media::VideoFrame>& frame, GLuint tex_out);
50 private:
51 GLuint CreateShader();
52 GLuint CompileShader(const char* name, GLuint type, const char* code);
53 GLuint CreateProgram(const char* name, GLuint vshader, GLuint fshader);
54 GLuint CreateTexture();
56 scoped_refptr<cc_blink::ContextProviderWebContext> context_provider_;
57 gpu::gles2::GLES2Interface* gl_;
58 GLuint frame_buffer_;
59 GLuint vertex_buffer_;
60 GLuint program_;
62 GLuint y_texture_;
63 GLuint u_texture_;
64 GLuint v_texture_;
65 GLuint a_texture_;
67 GLuint internal_format_;
68 GLuint format_;
69 media::VideoPixelFormat video_format_;
71 GLuint y_width_;
72 GLuint y_height_;
74 GLuint uv_width_;
75 GLuint uv_height_;
76 uint32_t uv_height_divisor_;
77 uint32_t uv_width_divisor_;
79 GLint yuv_matrix_loc_;
80 GLint yuv_adjust_loc_;
82 DISALLOW_COPY_AND_ASSIGN(YUVConverter);
85 VideoDecoderShim::YUVConverter::YUVConverter(
86 const scoped_refptr<cc_blink::ContextProviderWebContext>& context_provider)
87 : context_provider_(context_provider),
88 gl_(context_provider_->ContextGL()),
89 frame_buffer_(0),
90 vertex_buffer_(0),
91 program_(0),
92 y_texture_(0),
93 u_texture_(0),
94 v_texture_(0),
95 a_texture_(0),
96 internal_format_(0),
97 format_(0),
98 video_format_(media::PIXEL_FORMAT_UNKNOWN),
99 y_width_(2),
100 y_height_(2),
101 uv_width_(2),
102 uv_height_(2),
103 uv_height_divisor_(1),
104 uv_width_divisor_(1),
105 yuv_matrix_loc_(0),
106 yuv_adjust_loc_(0) {
107 DCHECK(gl_);
110 VideoDecoderShim::YUVConverter::~YUVConverter() {
111 if (y_texture_)
112 gl_->DeleteTextures(1, &y_texture_);
114 if (u_texture_)
115 gl_->DeleteTextures(1, &u_texture_);
117 if (v_texture_)
118 gl_->DeleteTextures(1, &v_texture_);
120 if (a_texture_)
121 gl_->DeleteTextures(1, &a_texture_);
123 if (frame_buffer_)
124 gl_->DeleteFramebuffers(1, &frame_buffer_);
126 if (vertex_buffer_)
127 gl_->DeleteBuffers(1, &vertex_buffer_);
129 if (program_)
130 gl_->DeleteProgram(program_);
133 GLuint VideoDecoderShim::YUVConverter::CreateTexture() {
134 GLuint tex = 0;
136 gl_->GenTextures(1, &tex);
137 gl_->BindTexture(GL_TEXTURE_2D, tex);
139 // Create texture with default size - will be resized upon first frame.
140 gl_->TexImage2D(GL_TEXTURE_2D, 0, internal_format_, 2, 2, 0, format_,
141 GL_UNSIGNED_BYTE, NULL);
143 gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
144 gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
145 gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
146 gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
148 gl_->BindTexture(GL_TEXTURE_2D, 0);
150 return tex;
153 GLuint VideoDecoderShim::YUVConverter::CompileShader(const char* name,
154 GLuint type,
155 const char* code) {
156 GLuint shader = gl_->CreateShader(type);
158 gl_->ShaderSource(shader, 1, (const GLchar**)&code, NULL);
159 gl_->CompileShader(shader);
161 #ifndef NDEBUG
162 GLint status = 0;
164 gl_->GetShaderiv(shader, GL_COMPILE_STATUS, &status);
165 if (status != GL_TRUE) {
166 GLint max_length = 0;
167 GLint actual_length = 0;
168 gl_->GetShaderiv(shader, GL_INFO_LOG_LENGTH, &max_length);
170 // The max_length includes the NULL character.
171 std::string error_log(max_length, 0);
172 gl_->GetShaderInfoLog(shader, max_length, &actual_length, &error_log[0]);
174 LOG(ERROR) << name << " shader compilation failed: " << error_log.c_str();
175 gl_->DeleteShader(shader);
176 return 0;
178 #endif
180 return shader;
183 GLuint VideoDecoderShim::YUVConverter::CreateProgram(const char* name,
184 GLuint vshader,
185 GLuint fshader) {
186 GLuint program = gl_->CreateProgram();
187 gl_->AttachShader(program, vshader);
188 gl_->AttachShader(program, fshader);
190 gl_->BindAttribLocation(program, 0, "position");
192 gl_->LinkProgram(program);
194 #ifndef NDEBUG
195 GLint status = 0;
197 gl_->GetProgramiv(program, GL_LINK_STATUS, &status);
198 if (status != GL_TRUE) {
199 GLint max_length = 0;
200 GLint actual_length = 0;
201 gl_->GetProgramiv(program, GL_INFO_LOG_LENGTH, &max_length);
203 // The max_length includes the NULL character.
204 std::string error_log(max_length, 0);
205 gl_->GetProgramInfoLog(program, max_length, &actual_length, &error_log[0]);
207 LOG(ERROR) << name << " program linking failed: " << error_log.c_str();
208 return 0;
210 #endif
212 return program;
215 GLuint VideoDecoderShim::YUVConverter::CreateShader() {
216 const char* vert_shader =
217 "precision mediump float;\n"
218 "attribute vec2 position;\n"
219 "varying vec2 texcoord;\n"
220 "void main()\n"
221 "{\n"
222 " gl_Position = vec4( position.xy, 0, 1 );\n"
223 " texcoord = position*0.5+0.5;\n"
224 "}";
226 const char* frag_shader =
227 "precision mediump float;\n"
228 "varying vec2 texcoord;\n"
229 "uniform sampler2D y_sampler;\n"
230 "uniform sampler2D u_sampler;\n"
231 "uniform sampler2D v_sampler;\n"
232 "uniform sampler2D a_sampler;\n"
233 "uniform mat3 yuv_matrix;\n"
234 "uniform vec3 yuv_adjust;\n"
235 "void main()\n"
236 "{\n"
237 " vec3 yuv = vec3(texture2D(y_sampler, texcoord).x,\n"
238 " texture2D(u_sampler, texcoord).x,\n"
239 " texture2D(v_sampler, texcoord).x) +\n"
240 " yuv_adjust;\n"
241 " gl_FragColor = vec4(yuv_matrix * yuv, texture2D(a_sampler, "
242 "texcoord).x);\n"
243 "}";
245 GLuint vertex_shader =
246 CompileShader("Vertex Shader", GL_VERTEX_SHADER, vert_shader);
247 if (!vertex_shader) {
248 return 0;
251 GLuint fragment_shader =
252 CompileShader("Fragment Shader", GL_FRAGMENT_SHADER, frag_shader);
253 if (!fragment_shader) {
254 gl_->DeleteShader(vertex_shader);
255 return 0;
258 GLuint program =
259 CreateProgram("YUVConverter Program", vertex_shader, fragment_shader);
261 gl_->DeleteShader(vertex_shader);
262 gl_->DeleteShader(fragment_shader);
264 if (!program) {
265 return 0;
268 gl_->UseProgram(program);
270 GLint uniform_location;
271 uniform_location = gl_->GetUniformLocation(program, "y_sampler");
272 DCHECK(uniform_location != -1);
273 gl_->Uniform1i(uniform_location, 0);
275 uniform_location = gl_->GetUniformLocation(program, "u_sampler");
276 DCHECK(uniform_location != -1);
277 gl_->Uniform1i(uniform_location, 1);
279 uniform_location = gl_->GetUniformLocation(program, "v_sampler");
280 DCHECK(uniform_location != -1);
281 gl_->Uniform1i(uniform_location, 2);
283 uniform_location = gl_->GetUniformLocation(program, "a_sampler");
284 DCHECK(uniform_location != -1);
285 gl_->Uniform1i(uniform_location, 3);
287 gl_->UseProgram(0);
289 yuv_matrix_loc_ = gl_->GetUniformLocation(program, "yuv_matrix");
290 DCHECK(yuv_matrix_loc_ != -1);
292 yuv_adjust_loc_ = gl_->GetUniformLocation(program, "yuv_adjust");
293 DCHECK(yuv_adjust_loc_ != -1);
295 return program;
298 bool VideoDecoderShim::YUVConverter::Initialize() {
299 // If texture_rg extension is not available, use slower GL_LUMINANCE.
300 if (context_provider_->ContextCapabilities().gpu.texture_rg) {
301 internal_format_ = GL_RED_EXT;
302 format_ = GL_RED_EXT;
303 } else {
304 internal_format_ = GL_LUMINANCE;
305 format_ = GL_LUMINANCE;
308 if (context_provider_->ContextCapabilities().gpu.max_texture_image_units <
309 4) {
310 // We support YUVA textures and require 4 texture units in the fragment
311 // stage.
312 return false;
315 gl_->TraceBeginCHROMIUM("YUVConverter", "YUVConverterContext");
316 gl_->GenFramebuffers(1, &frame_buffer_);
318 y_texture_ = CreateTexture();
319 u_texture_ = CreateTexture();
320 v_texture_ = CreateTexture();
321 a_texture_ = CreateTexture();
323 // Vertex positions. Also converted to texcoords in vertex shader.
324 GLfloat vertex_positions[] = {-1.f, -1.f, 1.f, -1.f, -1.f, 1.f, 1.f, 1.f};
326 gl_->GenBuffers(1, &vertex_buffer_);
327 gl_->BindBuffer(GL_ARRAY_BUFFER, vertex_buffer_);
328 gl_->BufferData(GL_ARRAY_BUFFER, 2 * sizeof(GLfloat) * 4, vertex_positions,
329 GL_STATIC_DRAW);
330 gl_->BindBuffer(GL_ARRAY_BUFFER, 0);
332 program_ = CreateShader();
334 gl_->TraceEndCHROMIUM();
336 context_provider_->InvalidateGrContext(kGrInvalidateState);
338 return (program_ != 0);
341 void VideoDecoderShim::YUVConverter::Convert(
342 const scoped_refptr<media::VideoFrame>& frame,
343 GLuint tex_out) {
344 const float* yuv_matrix = 0;
345 const float* yuv_adjust = 0;
347 if (video_format_ != frame->format()) {
348 // The constants below were taken from cc/output/gl_renderer.cc.
349 // These values are magic numbers that are used in the transformation from
350 // YUV to RGB color values. They are taken from the following webpage:
351 // http://www.fourcc.org/fccyvrgb.php
352 const float yuv_to_rgb_rec601[9] = {
353 1.164f, 1.164f, 1.164f, 0.0f, -.391f, 2.018f, 1.596f, -.813f, 0.0f,
355 const float yuv_to_rgb_jpeg[9] = {
356 1.f, 1.f, 1.f, 0.0f, -.34414f, 1.772f, 1.402f, -.71414f, 0.0f,
358 const float yuv_to_rgb_rec709[9] = {
359 1.164f, 1.164f, 1.164f, 0.0f, -0.213f, 2.112f, 1.793f, -0.533f, 0.0f,
362 // These values map to 16, 128, and 128 respectively, and are computed
363 // as a fraction over 256 (e.g. 16 / 256 = 0.0625).
364 // They are used in the YUV to RGBA conversion formula:
365 // Y - 16 : Gives 16 values of head and footroom for overshooting
366 // U - 128 : Turns unsigned U into signed U [-128,127]
367 // V - 128 : Turns unsigned V into signed V [-128,127]
368 const float yuv_adjust_constrained[3] = {
369 -0.0625f, -0.5f, -0.5f,
371 // Same as above, but without the head and footroom.
372 const float yuv_adjust_full[3] = {
373 0.0f, -0.5f, -0.5f,
376 yuv_adjust = yuv_adjust_constrained;
377 yuv_matrix = yuv_to_rgb_rec601;
379 int result;
380 if (frame->metadata()->GetInteger(media::VideoFrameMetadata::COLOR_SPACE,
381 &result)) {
382 if (result == media::COLOR_SPACE_JPEG) {
383 yuv_matrix = yuv_to_rgb_jpeg;
384 yuv_adjust = yuv_adjust_full;
385 } else if (result == media::COLOR_SPACE_HD_REC709) {
386 yuv_matrix = yuv_to_rgb_rec709;
390 switch (frame->format()) {
391 case media::PIXEL_FORMAT_YV12: // 420
392 case media::PIXEL_FORMAT_YV12A:
393 case media::PIXEL_FORMAT_I420:
394 uv_height_divisor_ = 2;
395 uv_width_divisor_ = 2;
396 break;
397 case media::PIXEL_FORMAT_YV16: // 422
398 uv_width_divisor_ = 2;
399 uv_height_divisor_ = 1;
400 break;
401 case media::PIXEL_FORMAT_YV24: // 444
402 uv_width_divisor_ = 1;
403 uv_height_divisor_ = 1;
404 break;
406 default:
407 NOTREACHED();
410 video_format_ = frame->format();
412 // Zero these so everything is reset below.
413 y_width_ = y_height_ = 0;
416 gl_->TraceBeginCHROMIUM("YUVConverter", "YUVConverterContext");
418 uint32_t ywidth = frame->coded_size().width();
419 uint32_t yheight = frame->coded_size().height();
421 DCHECK_EQ(frame->stride(media::VideoFrame::kUPlane),
422 frame->stride(media::VideoFrame::kVPlane));
424 uint32_t ystride = frame->stride(media::VideoFrame::kYPlane);
425 uint32_t uvstride = frame->stride(media::VideoFrame::kUPlane);
427 // The following code assumes that extended GLES 2.0 state like
428 // UNPACK_SKIP* (if available) are set to defaults.
429 gl_->PixelStorei(GL_UNPACK_ALIGNMENT, 1);
431 if (ywidth != y_width_ || yheight != y_height_) {
432 y_width_ = ywidth;
433 y_height_ = yheight;
435 uv_width_ = y_width_ / uv_width_divisor_;
436 uv_height_ = y_height_ / uv_height_divisor_;
438 // Re-create to resize the textures and upload data.
439 gl_->PixelStorei(GL_UNPACK_ROW_LENGTH, ystride);
440 gl_->ActiveTexture(GL_TEXTURE0);
441 gl_->BindTexture(GL_TEXTURE_2D, y_texture_);
442 gl_->TexImage2D(GL_TEXTURE_2D, 0, internal_format_, y_width_, y_height_, 0,
443 format_, GL_UNSIGNED_BYTE,
444 frame->data(media::VideoFrame::kYPlane));
446 if (video_format_ == media::PIXEL_FORMAT_YV12A) {
447 DCHECK_EQ(frame->stride(media::VideoFrame::kYPlane),
448 frame->stride(media::VideoFrame::kAPlane));
449 gl_->ActiveTexture(GL_TEXTURE3);
450 gl_->BindTexture(GL_TEXTURE_2D, a_texture_);
451 gl_->TexImage2D(GL_TEXTURE_2D, 0, internal_format_, y_width_, y_height_,
452 0, format_, GL_UNSIGNED_BYTE,
453 frame->data(media::VideoFrame::kAPlane));
454 } else {
455 // if there is no alpha channel, then create a 2x2 texture with full
456 // alpha.
457 gl_->PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
458 const uint8_t alpha[4] = {0xff, 0xff, 0xff, 0xff};
459 gl_->ActiveTexture(GL_TEXTURE3);
460 gl_->BindTexture(GL_TEXTURE_2D, a_texture_);
461 gl_->TexImage2D(GL_TEXTURE_2D, 0, internal_format_, 2, 2, 0, format_,
462 GL_UNSIGNED_BYTE, alpha);
465 gl_->PixelStorei(GL_UNPACK_ROW_LENGTH, uvstride);
466 gl_->ActiveTexture(GL_TEXTURE1);
467 gl_->BindTexture(GL_TEXTURE_2D, u_texture_);
468 gl_->TexImage2D(GL_TEXTURE_2D, 0, internal_format_, uv_width_, uv_height_,
469 0, format_, GL_UNSIGNED_BYTE,
470 frame->data(media::VideoFrame::kUPlane));
472 gl_->ActiveTexture(GL_TEXTURE2);
473 gl_->BindTexture(GL_TEXTURE_2D, v_texture_);
474 gl_->TexImage2D(GL_TEXTURE_2D, 0, internal_format_, uv_width_, uv_height_,
475 0, format_, GL_UNSIGNED_BYTE,
476 frame->data(media::VideoFrame::kVPlane));
477 } else {
478 // Bind textures and upload texture data
479 gl_->PixelStorei(GL_UNPACK_ROW_LENGTH, ystride);
480 gl_->ActiveTexture(GL_TEXTURE0);
481 gl_->BindTexture(GL_TEXTURE_2D, y_texture_);
482 gl_->TexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, y_width_, y_height_, format_,
483 GL_UNSIGNED_BYTE,
484 frame->data(media::VideoFrame::kYPlane));
486 if (video_format_ == media::PIXEL_FORMAT_YV12A) {
487 DCHECK_EQ(frame->stride(media::VideoFrame::kYPlane),
488 frame->stride(media::VideoFrame::kAPlane));
489 gl_->ActiveTexture(GL_TEXTURE3);
490 gl_->BindTexture(GL_TEXTURE_2D, a_texture_);
491 gl_->TexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, y_width_, y_height_, format_,
492 GL_UNSIGNED_BYTE,
493 frame->data(media::VideoFrame::kAPlane));
494 } else {
495 gl_->ActiveTexture(GL_TEXTURE3);
496 gl_->BindTexture(GL_TEXTURE_2D, a_texture_);
499 gl_->PixelStorei(GL_UNPACK_ROW_LENGTH, uvstride);
500 gl_->ActiveTexture(GL_TEXTURE1);
501 gl_->BindTexture(GL_TEXTURE_2D, u_texture_);
502 gl_->TexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, uv_width_, uv_height_, format_,
503 GL_UNSIGNED_BYTE,
504 frame->data(media::VideoFrame::kUPlane));
506 gl_->ActiveTexture(GL_TEXTURE2);
507 gl_->BindTexture(GL_TEXTURE_2D, v_texture_);
508 gl_->TexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, uv_width_, uv_height_, format_,
509 GL_UNSIGNED_BYTE,
510 frame->data(media::VideoFrame::kVPlane));
513 gl_->BindFramebuffer(GL_FRAMEBUFFER, frame_buffer_);
514 gl_->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
515 tex_out, 0);
517 #ifndef NDEBUG
518 // We should probably check for framebuffer complete here, but that
519 // will slow this method down so check only in debug mode.
520 GLint status = gl_->CheckFramebufferStatus(GL_FRAMEBUFFER);
521 if (status != GL_FRAMEBUFFER_COMPLETE) {
522 return;
524 #endif
526 gl_->Viewport(0, 0, ywidth, yheight);
528 gl_->UseProgram(program_);
530 if (yuv_matrix) {
531 gl_->UniformMatrix3fv(yuv_matrix_loc_, 1, 0, yuv_matrix);
532 gl_->Uniform3fv(yuv_adjust_loc_, 1, yuv_adjust);
535 gl_->BindBuffer(GL_ARRAY_BUFFER, vertex_buffer_);
536 gl_->EnableVertexAttribArray(0);
537 gl_->VertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(GLfloat),
538 static_cast<const void*>(0));
540 gl_->DrawArrays(GL_TRIANGLE_STRIP, 0, 4);
542 // The YUVConverter shares the context with Skia and possibly other modules
543 // that may make OpenGL calls. To be a "good OpenGL citizen" for other
544 // (non-Skia) modules that may share this context we restore
545 // buffer/texture/state bindings to OpenGL defaults here. If we were only
546 // sharing the context with Skia this may not be necessary as we also
547 // Invalidate the GrContext below so that Skia is aware that its state
548 // caches need to be reset.
550 gl_->BindBuffer(GL_ARRAY_BUFFER, 0);
551 gl_->DisableVertexAttribArray(0);
552 gl_->UseProgram(0);
553 gl_->BindFramebuffer(GL_FRAMEBUFFER, 0);
555 gl_->BindTexture(GL_TEXTURE_2D, 0);
557 gl_->ActiveTexture(GL_TEXTURE2);
558 gl_->BindTexture(GL_TEXTURE_2D, 0);
560 gl_->ActiveTexture(GL_TEXTURE1);
561 gl_->BindTexture(GL_TEXTURE_2D, 0);
563 gl_->ActiveTexture(GL_TEXTURE0);
564 gl_->BindTexture(GL_TEXTURE_2D, 0);
565 gl_->PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
567 gl_->TraceEndCHROMIUM();
569 context_provider_->InvalidateGrContext(kGrInvalidateState);
572 struct VideoDecoderShim::PendingDecode {
573 PendingDecode(uint32_t decode_id,
574 const scoped_refptr<media::DecoderBuffer>& buffer);
575 ~PendingDecode();
577 const uint32_t decode_id;
578 const scoped_refptr<media::DecoderBuffer> buffer;
581 VideoDecoderShim::PendingDecode::PendingDecode(
582 uint32_t decode_id,
583 const scoped_refptr<media::DecoderBuffer>& buffer)
584 : decode_id(decode_id), buffer(buffer) {
587 VideoDecoderShim::PendingDecode::~PendingDecode() {
590 struct VideoDecoderShim::PendingFrame {
591 explicit PendingFrame(uint32_t decode_id);
592 PendingFrame(uint32_t decode_id,
593 const scoped_refptr<media::VideoFrame>& frame);
594 ~PendingFrame();
596 const uint32_t decode_id;
597 scoped_refptr<media::VideoFrame> video_frame;
599 private:
600 // This could be expensive to copy, so guard against that.
601 DISALLOW_COPY_AND_ASSIGN(PendingFrame);
604 VideoDecoderShim::PendingFrame::PendingFrame(uint32_t decode_id)
605 : decode_id(decode_id) {
608 VideoDecoderShim::PendingFrame::PendingFrame(
609 uint32_t decode_id,
610 const scoped_refptr<media::VideoFrame>& frame)
611 : decode_id(decode_id), video_frame(frame) {
614 VideoDecoderShim::PendingFrame::~PendingFrame() {
617 // DecoderImpl runs the underlying VideoDecoder on the media thread, receiving
618 // calls from the VideoDecodeShim on the main thread and sending results back.
619 // This class is constructed on the main thread, but used and destructed on the
620 // media thread.
621 class VideoDecoderShim::DecoderImpl {
622 public:
623 explicit DecoderImpl(const base::WeakPtr<VideoDecoderShim>& proxy);
624 ~DecoderImpl();
626 void Initialize(media::VideoDecoderConfig config);
627 void Decode(uint32_t decode_id, scoped_refptr<media::DecoderBuffer> buffer);
628 void Reset();
629 void Stop();
631 private:
632 void OnInitDone(bool success);
633 void DoDecode();
634 void OnDecodeComplete(media::VideoDecoder::Status status);
635 void OnOutputComplete(const scoped_refptr<media::VideoFrame>& frame);
636 void OnResetComplete();
638 // WeakPtr is bound to main_message_loop_. Use only in shim callbacks.
639 base::WeakPtr<VideoDecoderShim> shim_;
640 scoped_ptr<media::VideoDecoder> decoder_;
641 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
642 // Queue of decodes waiting for the decoder.
643 typedef std::queue<PendingDecode> PendingDecodeQueue;
644 PendingDecodeQueue pending_decodes_;
645 bool awaiting_decoder_;
646 // VideoDecoder returns pictures without information about the decode buffer
647 // that generated it, but VideoDecoder implementations used in this class
648 // (media::FFmpegVideoDecoder and media::VpxVideoDecoder) always generate
649 // corresponding frames before decode is finished. |decode_id_| is used to
650 // store id of the current buffer while Decode() call is pending.
651 uint32_t decode_id_;
653 base::WeakPtrFactory<DecoderImpl> weak_ptr_factory_;
656 VideoDecoderShim::DecoderImpl::DecoderImpl(
657 const base::WeakPtr<VideoDecoderShim>& proxy)
658 : shim_(proxy),
659 main_task_runner_(base::ThreadTaskRunnerHandle::Get()),
660 awaiting_decoder_(false),
661 decode_id_(0),
662 weak_ptr_factory_(this) {
665 VideoDecoderShim::DecoderImpl::~DecoderImpl() {
666 DCHECK(pending_decodes_.empty());
669 void VideoDecoderShim::DecoderImpl::Initialize(
670 media::VideoDecoderConfig config) {
671 DCHECK(!decoder_);
672 #if !defined(MEDIA_DISABLE_LIBVPX)
673 if (config.codec() == media::kCodecVP9) {
674 decoder_.reset(
675 new media::VpxVideoDecoder(base::ThreadTaskRunnerHandle::Get()));
676 } else
677 #endif
679 scoped_ptr<media::FFmpegVideoDecoder> ffmpeg_video_decoder(
680 new media::FFmpegVideoDecoder(base::ThreadTaskRunnerHandle::Get()));
681 ffmpeg_video_decoder->set_decode_nalus(true);
682 decoder_ = ffmpeg_video_decoder.Pass();
685 // VpxVideoDecoder and FFmpegVideoDecoder support only one pending Decode()
686 // request.
687 DCHECK_EQ(decoder_->GetMaxDecodeRequests(), 1);
689 decoder_->Initialize(
690 config, true /* low_delay */,
691 base::Bind(&VideoDecoderShim::DecoderImpl::OnInitDone,
692 weak_ptr_factory_.GetWeakPtr()),
693 base::Bind(&VideoDecoderShim::DecoderImpl::OnOutputComplete,
694 weak_ptr_factory_.GetWeakPtr()));
697 void VideoDecoderShim::DecoderImpl::Decode(
698 uint32_t decode_id,
699 scoped_refptr<media::DecoderBuffer> buffer) {
700 DCHECK(decoder_);
701 pending_decodes_.push(PendingDecode(decode_id, buffer));
702 DoDecode();
705 void VideoDecoderShim::DecoderImpl::Reset() {
706 DCHECK(decoder_);
707 // Abort all pending decodes.
708 while (!pending_decodes_.empty()) {
709 const PendingDecode& decode = pending_decodes_.front();
710 scoped_ptr<PendingFrame> pending_frame(new PendingFrame(decode.decode_id));
711 main_task_runner_->PostTask(
712 FROM_HERE, base::Bind(&VideoDecoderShim::OnDecodeComplete, shim_,
713 media::VideoDecoder::kAborted, decode.decode_id));
714 pending_decodes_.pop();
716 decoder_->Reset(base::Bind(&VideoDecoderShim::DecoderImpl::OnResetComplete,
717 weak_ptr_factory_.GetWeakPtr()));
720 void VideoDecoderShim::DecoderImpl::Stop() {
721 DCHECK(decoder_);
722 // Clear pending decodes now. We don't want OnDecodeComplete to call DoDecode
723 // again.
724 while (!pending_decodes_.empty())
725 pending_decodes_.pop();
726 decoder_.reset();
727 // This instance is deleted once we exit this scope.
730 void VideoDecoderShim::DecoderImpl::OnInitDone(bool success) {
731 int32_t result = success ? PP_OK : PP_ERROR_NOTSUPPORTED;
733 // Calculate how many textures the shim should create.
734 uint32_t shim_texture_pool_size = media::limits::kMaxVideoFrames + 1;
735 main_task_runner_->PostTask(
736 FROM_HERE, base::Bind(&VideoDecoderShim::OnInitializeComplete, shim_,
737 result, shim_texture_pool_size));
740 void VideoDecoderShim::DecoderImpl::DoDecode() {
741 if (pending_decodes_.empty() || awaiting_decoder_)
742 return;
744 awaiting_decoder_ = true;
745 const PendingDecode& decode = pending_decodes_.front();
746 decode_id_ = decode.decode_id;
747 decoder_->Decode(decode.buffer,
748 base::Bind(&VideoDecoderShim::DecoderImpl::OnDecodeComplete,
749 weak_ptr_factory_.GetWeakPtr()));
750 pending_decodes_.pop();
753 void VideoDecoderShim::DecoderImpl::OnDecodeComplete(
754 media::VideoDecoder::Status status) {
755 DCHECK(awaiting_decoder_);
756 awaiting_decoder_ = false;
758 int32_t result;
759 switch (status) {
760 case media::VideoDecoder::kOk:
761 case media::VideoDecoder::kAborted:
762 result = PP_OK;
763 break;
764 case media::VideoDecoder::kDecodeError:
765 result = PP_ERROR_RESOURCE_FAILED;
766 break;
767 default:
768 NOTREACHED();
769 result = PP_ERROR_FAILED;
770 break;
773 main_task_runner_->PostTask(
774 FROM_HERE, base::Bind(&VideoDecoderShim::OnDecodeComplete, shim_, result,
775 decode_id_));
777 DoDecode();
780 void VideoDecoderShim::DecoderImpl::OnOutputComplete(
781 const scoped_refptr<media::VideoFrame>& frame) {
782 // Software decoders are expected to generated frames only when a Decode()
783 // call is pending.
784 DCHECK(awaiting_decoder_);
786 scoped_ptr<PendingFrame> pending_frame;
787 if (!frame->metadata()->IsTrue(media::VideoFrameMetadata::END_OF_STREAM))
788 pending_frame.reset(new PendingFrame(decode_id_, frame));
789 else
790 pending_frame.reset(new PendingFrame(decode_id_));
792 main_task_runner_->PostTask(
793 FROM_HERE, base::Bind(&VideoDecoderShim::OnOutputComplete, shim_,
794 base::Passed(&pending_frame)));
797 void VideoDecoderShim::DecoderImpl::OnResetComplete() {
798 main_task_runner_->PostTask(
799 FROM_HERE, base::Bind(&VideoDecoderShim::OnResetComplete, shim_));
802 VideoDecoderShim::VideoDecoderShim(PepperVideoDecoderHost* host)
803 : state_(UNINITIALIZED),
804 host_(host),
805 media_task_runner_(
806 RenderThreadImpl::current()->GetMediaThreadTaskRunner()),
807 context_provider_(
808 RenderThreadImpl::current()->SharedMainThreadContextProvider()),
809 texture_pool_size_(0),
810 num_pending_decodes_(0),
811 yuv_converter_(new YUVConverter(context_provider_)),
812 weak_ptr_factory_(this) {
813 DCHECK(host_);
814 DCHECK(media_task_runner_.get());
815 DCHECK(context_provider_.get());
816 decoder_impl_.reset(new DecoderImpl(weak_ptr_factory_.GetWeakPtr()));
819 VideoDecoderShim::~VideoDecoderShim() {
820 DCHECK(RenderThreadImpl::current());
821 // Delete any remaining textures.
822 TextureIdMap::iterator it = texture_id_map_.begin();
823 for (; it != texture_id_map_.end(); ++it)
824 DeleteTexture(it->second);
825 texture_id_map_.clear();
827 FlushCommandBuffer();
829 weak_ptr_factory_.InvalidateWeakPtrs();
830 // No more callbacks from the delegate will be received now.
832 // The callback now holds the only reference to the DecoderImpl, which will be
833 // deleted when Stop completes.
834 media_task_runner_->PostTask(
835 FROM_HERE,
836 base::Bind(&VideoDecoderShim::DecoderImpl::Stop,
837 base::Owned(decoder_impl_.release())));
840 bool VideoDecoderShim::Initialize(
841 media::VideoCodecProfile profile,
842 media::VideoDecodeAccelerator::Client* client) {
843 DCHECK_EQ(client, host_);
844 DCHECK(RenderThreadImpl::current());
845 DCHECK_EQ(state_, UNINITIALIZED);
846 media::VideoCodec codec = media::kUnknownVideoCodec;
847 if (profile <= media::H264PROFILE_MAX)
848 codec = media::kCodecH264;
849 else if (profile <= media::VP8PROFILE_MAX)
850 codec = media::kCodecVP8;
851 else if (profile <= media::VP9PROFILE_MAX)
852 codec = media::kCodecVP9;
853 DCHECK_NE(codec, media::kUnknownVideoCodec);
855 if (!yuv_converter_->Initialize()) {
856 return false;
859 media::VideoDecoderConfig config(
860 codec, profile, media::PIXEL_FORMAT_YV12, media::COLOR_SPACE_UNSPECIFIED,
861 gfx::Size(32, 24), // Small sizes that won't fail.
862 gfx::Rect(32, 24), gfx::Size(32, 24),
863 NULL /* extra_data */, // TODO(bbudge) Verify this isn't needed.
864 0 /* extra_data_size */, false /* decryption */);
866 media_task_runner_->PostTask(
867 FROM_HERE,
868 base::Bind(&VideoDecoderShim::DecoderImpl::Initialize,
869 base::Unretained(decoder_impl_.get()),
870 config));
871 // Return success, even though we are asynchronous, to mimic
872 // media::VideoDecodeAccelerator.
873 return true;
876 void VideoDecoderShim::Decode(const media::BitstreamBuffer& bitstream_buffer) {
877 DCHECK(RenderThreadImpl::current());
878 DCHECK_EQ(state_, DECODING);
880 // We need the address of the shared memory, so we can copy the buffer.
881 const uint8_t* buffer = host_->DecodeIdToAddress(bitstream_buffer.id());
882 DCHECK(buffer);
884 media_task_runner_->PostTask(
885 FROM_HERE,
886 base::Bind(
887 &VideoDecoderShim::DecoderImpl::Decode,
888 base::Unretained(decoder_impl_.get()),
889 bitstream_buffer.id(),
890 media::DecoderBuffer::CopyFrom(buffer, bitstream_buffer.size())));
891 num_pending_decodes_++;
894 void VideoDecoderShim::AssignPictureBuffers(
895 const std::vector<media::PictureBuffer>& buffers) {
896 DCHECK(RenderThreadImpl::current());
897 DCHECK_EQ(state_, DECODING);
898 if (buffers.empty()) {
899 NOTREACHED();
900 return;
902 DCHECK_EQ(buffers.size(), pending_texture_mailboxes_.size());
903 GLuint num_textures = base::checked_cast<GLuint>(buffers.size());
904 std::vector<uint32_t> local_texture_ids(num_textures);
905 gpu::gles2::GLES2Interface* gles2 = context_provider_->ContextGL();
906 for (uint32_t i = 0; i < num_textures; i++) {
907 local_texture_ids[i] = gles2->CreateAndConsumeTextureCHROMIUM(
908 GL_TEXTURE_2D, pending_texture_mailboxes_[i].name);
909 // Map the plugin texture id to the local texture id.
910 uint32_t plugin_texture_id = buffers[i].texture_id();
911 texture_id_map_[plugin_texture_id] = local_texture_ids[i];
912 available_textures_.insert(plugin_texture_id);
914 pending_texture_mailboxes_.clear();
915 SendPictures();
918 void VideoDecoderShim::ReusePictureBuffer(int32 picture_buffer_id) {
919 DCHECK(RenderThreadImpl::current());
920 uint32_t texture_id = static_cast<uint32_t>(picture_buffer_id);
921 if (textures_to_dismiss_.find(texture_id) != textures_to_dismiss_.end()) {
922 DismissTexture(texture_id);
923 } else if (texture_id_map_.find(texture_id) != texture_id_map_.end()) {
924 available_textures_.insert(texture_id);
925 SendPictures();
926 } else {
927 NOTREACHED();
931 void VideoDecoderShim::Flush() {
932 DCHECK(RenderThreadImpl::current());
933 DCHECK_EQ(state_, DECODING);
934 state_ = FLUSHING;
937 void VideoDecoderShim::Reset() {
938 DCHECK(RenderThreadImpl::current());
939 DCHECK_EQ(state_, DECODING);
940 state_ = RESETTING;
941 media_task_runner_->PostTask(
942 FROM_HERE,
943 base::Bind(&VideoDecoderShim::DecoderImpl::Reset,
944 base::Unretained(decoder_impl_.get())));
947 void VideoDecoderShim::Destroy() {
948 delete this;
951 void VideoDecoderShim::OnInitializeComplete(int32_t result,
952 uint32_t texture_pool_size) {
953 DCHECK(RenderThreadImpl::current());
954 DCHECK(host_);
956 if (result == PP_OK) {
957 state_ = DECODING;
958 texture_pool_size_ = texture_pool_size;
961 host_->OnInitializeComplete(result);
964 void VideoDecoderShim::OnDecodeComplete(int32_t result, uint32_t decode_id) {
965 DCHECK(RenderThreadImpl::current());
966 DCHECK(host_);
968 if (result == PP_ERROR_RESOURCE_FAILED) {
969 host_->NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
970 return;
973 num_pending_decodes_--;
974 completed_decodes_.push(decode_id);
976 // If frames are being queued because we're out of textures, don't notify
977 // the host that decode has completed. This exerts "back pressure" to keep
978 // the host from sending buffers that will cause pending_frames_ to grow.
979 if (pending_frames_.empty())
980 NotifyCompletedDecodes();
983 void VideoDecoderShim::OnOutputComplete(scoped_ptr<PendingFrame> frame) {
984 DCHECK(RenderThreadImpl::current());
985 DCHECK(host_);
987 if (frame->video_frame) {
988 if (texture_size_ != frame->video_frame->coded_size()) {
989 // If the size has changed, all current textures must be dismissed. Add
990 // all textures to |textures_to_dismiss_| and dismiss any that aren't in
991 // use by the plugin. We will dismiss the rest as they are recycled.
992 for (TextureIdMap::const_iterator it = texture_id_map_.begin();
993 it != texture_id_map_.end();
994 ++it) {
995 textures_to_dismiss_.insert(it->first);
997 for (TextureIdSet::const_iterator it = available_textures_.begin();
998 it != available_textures_.end();
999 ++it) {
1000 DismissTexture(*it);
1002 available_textures_.clear();
1003 FlushCommandBuffer();
1005 DCHECK(pending_texture_mailboxes_.empty());
1006 for (uint32_t i = 0; i < texture_pool_size_; i++)
1007 pending_texture_mailboxes_.push_back(gpu::Mailbox::Generate());
1009 host_->RequestTextures(texture_pool_size_,
1010 frame->video_frame->coded_size(), GL_TEXTURE_2D,
1011 pending_texture_mailboxes_);
1012 texture_size_ = frame->video_frame->coded_size();
1015 pending_frames_.push(linked_ptr<PendingFrame>(frame.release()));
1016 SendPictures();
1020 void VideoDecoderShim::SendPictures() {
1021 DCHECK(RenderThreadImpl::current());
1022 DCHECK(host_);
1023 while (!pending_frames_.empty() && !available_textures_.empty()) {
1024 const linked_ptr<PendingFrame>& frame = pending_frames_.front();
1026 TextureIdSet::iterator it = available_textures_.begin();
1027 uint32_t texture_id = *it;
1028 available_textures_.erase(it);
1030 uint32_t local_texture_id = texture_id_map_[texture_id];
1032 yuv_converter_->Convert(frame->video_frame, local_texture_id);
1034 host_->PictureReady(media::Picture(texture_id, frame->decode_id,
1035 frame->video_frame->visible_rect(),
1036 false));
1037 pending_frames_.pop();
1040 FlushCommandBuffer();
1042 if (pending_frames_.empty()) {
1043 // If frames aren't backing up, notify the host of any completed decodes so
1044 // it can send more buffers.
1045 NotifyCompletedDecodes();
1047 if (state_ == FLUSHING && !num_pending_decodes_) {
1048 state_ = DECODING;
1049 host_->NotifyFlushDone();
1054 void VideoDecoderShim::OnResetComplete() {
1055 DCHECK(RenderThreadImpl::current());
1056 DCHECK(host_);
1058 while (!pending_frames_.empty())
1059 pending_frames_.pop();
1060 NotifyCompletedDecodes();
1062 // Dismiss any old textures now.
1063 while (!textures_to_dismiss_.empty())
1064 DismissTexture(*textures_to_dismiss_.begin());
1066 state_ = DECODING;
1067 host_->NotifyResetDone();
1070 void VideoDecoderShim::NotifyCompletedDecodes() {
1071 while (!completed_decodes_.empty()) {
1072 host_->NotifyEndOfBitstreamBuffer(completed_decodes_.front());
1073 completed_decodes_.pop();
1077 void VideoDecoderShim::DismissTexture(uint32_t texture_id) {
1078 DCHECK(host_);
1079 textures_to_dismiss_.erase(texture_id);
1080 DCHECK(texture_id_map_.find(texture_id) != texture_id_map_.end());
1081 DeleteTexture(texture_id_map_[texture_id]);
1082 texture_id_map_.erase(texture_id);
1083 host_->DismissPictureBuffer(texture_id);
1086 void VideoDecoderShim::DeleteTexture(uint32_t texture_id) {
1087 gpu::gles2::GLES2Interface* gles2 = context_provider_->ContextGL();
1088 gles2->DeleteTextures(1, &texture_id);
1091 void VideoDecoderShim::FlushCommandBuffer() {
1092 context_provider_->ContextGL()->Flush();
1095 } // namespace content