1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/renderer/pepper/video_decoder_shim.h"
8 #include <GLES2/gl2ext.h>
9 #include <GLES2/gl2extchromium.h>
11 #include "base/bind.h"
12 #include "base/location.h"
13 #include "base/thread_task_runner_handle.h"
15 #include "base/logging.h"
17 #include "base/numerics/safe_conversions.h"
18 #include "base/single_thread_task_runner.h"
19 #include "cc/blink/context_provider_web_context.h"
20 #include "content/public/renderer/render_thread.h"
21 #include "content/renderer/pepper/pepper_video_decoder_host.h"
22 #include "content/renderer/render_thread_impl.h"
23 #include "gpu/command_buffer/client/gles2_implementation.h"
24 #include "media/base/decoder_buffer.h"
25 #include "media/base/limits.h"
26 #include "media/base/video_decoder.h"
27 #include "media/blink/skcanvas_video_renderer.h"
28 #include "media/filters/ffmpeg_video_decoder.h"
29 #include "media/filters/vpx_video_decoder.h"
30 #include "media/video/picture.h"
31 #include "media/video/video_decode_accelerator.h"
32 #include "ppapi/c/pp_errors.h"
33 #include "third_party/skia/include/gpu/GrTypes.h"
37 static const uint32_t kGrInvalidateState
=
38 kRenderTarget_GrGLBackendState
| kTextureBinding_GrGLBackendState
|
39 kView_GrGLBackendState
| kVertex_GrGLBackendState
|
40 kProgram_GrGLBackendState
| kPixelStore_GrGLBackendState
;
44 bool IsCodecSupported(media::VideoCodec codec
) {
45 #if !defined(MEDIA_DISABLE_LIBVPX)
46 if (codec
== media::kCodecVP9
)
50 return media::FFmpegVideoDecoder::IsCodecSupported(codec
);
55 // YUV->RGB converter class using a shader and FBO.
56 class VideoDecoderShim::YUVConverter
{
58 YUVConverter(const scoped_refptr
<cc_blink::ContextProviderWebContext
>&);
61 void Convert(const scoped_refptr
<media::VideoFrame
>& frame
, GLuint tex_out
);
64 GLuint
CreateShader();
65 GLuint
CompileShader(const char* name
, GLuint type
, const char* code
);
66 GLuint
CreateProgram(const char* name
, GLuint vshader
, GLuint fshader
);
67 GLuint
CreateTexture();
69 scoped_refptr
<cc_blink::ContextProviderWebContext
> context_provider_
;
70 gpu::gles2::GLES2Interface
* gl_
;
72 GLuint vertex_buffer_
;
80 GLuint internal_format_
;
82 media::VideoPixelFormat video_format_
;
89 uint32_t uv_height_divisor_
;
90 uint32_t uv_width_divisor_
;
92 GLint yuv_matrix_loc_
;
93 GLint yuv_adjust_loc_
;
95 DISALLOW_COPY_AND_ASSIGN(YUVConverter
);
98 VideoDecoderShim::YUVConverter::YUVConverter(
99 const scoped_refptr
<cc_blink::ContextProviderWebContext
>& context_provider
)
100 : context_provider_(context_provider
),
101 gl_(context_provider_
->ContextGL()),
111 video_format_(media::PIXEL_FORMAT_UNKNOWN
),
116 uv_height_divisor_(1),
117 uv_width_divisor_(1),
123 VideoDecoderShim::YUVConverter::~YUVConverter() {
125 gl_
->DeleteTextures(1, &y_texture_
);
128 gl_
->DeleteTextures(1, &u_texture_
);
131 gl_
->DeleteTextures(1, &v_texture_
);
134 gl_
->DeleteTextures(1, &a_texture_
);
137 gl_
->DeleteFramebuffers(1, &frame_buffer_
);
140 gl_
->DeleteBuffers(1, &vertex_buffer_
);
143 gl_
->DeleteProgram(program_
);
146 GLuint
VideoDecoderShim::YUVConverter::CreateTexture() {
149 gl_
->GenTextures(1, &tex
);
150 gl_
->BindTexture(GL_TEXTURE_2D
, tex
);
152 // Create texture with default size - will be resized upon first frame.
153 gl_
->TexImage2D(GL_TEXTURE_2D
, 0, internal_format_
, 2, 2, 0, format_
,
154 GL_UNSIGNED_BYTE
, NULL
);
156 gl_
->TexParameteri(GL_TEXTURE_2D
, GL_TEXTURE_MIN_FILTER
, GL_LINEAR
);
157 gl_
->TexParameteri(GL_TEXTURE_2D
, GL_TEXTURE_MAG_FILTER
, GL_LINEAR
);
158 gl_
->TexParameteri(GL_TEXTURE_2D
, GL_TEXTURE_WRAP_S
, GL_CLAMP_TO_EDGE
);
159 gl_
->TexParameteri(GL_TEXTURE_2D
, GL_TEXTURE_WRAP_T
, GL_CLAMP_TO_EDGE
);
161 gl_
->BindTexture(GL_TEXTURE_2D
, 0);
166 GLuint
VideoDecoderShim::YUVConverter::CompileShader(const char* name
,
169 GLuint shader
= gl_
->CreateShader(type
);
171 gl_
->ShaderSource(shader
, 1, (const GLchar
**)&code
, NULL
);
172 gl_
->CompileShader(shader
);
177 gl_
->GetShaderiv(shader
, GL_COMPILE_STATUS
, &status
);
178 if (status
!= GL_TRUE
) {
179 GLint max_length
= 0;
180 GLint actual_length
= 0;
181 gl_
->GetShaderiv(shader
, GL_INFO_LOG_LENGTH
, &max_length
);
183 // The max_length includes the NULL character.
184 std::string
error_log(max_length
, 0);
185 gl_
->GetShaderInfoLog(shader
, max_length
, &actual_length
, &error_log
[0]);
187 LOG(ERROR
) << name
<< " shader compilation failed: " << error_log
.c_str();
188 gl_
->DeleteShader(shader
);
196 GLuint
VideoDecoderShim::YUVConverter::CreateProgram(const char* name
,
199 GLuint program
= gl_
->CreateProgram();
200 gl_
->AttachShader(program
, vshader
);
201 gl_
->AttachShader(program
, fshader
);
203 gl_
->BindAttribLocation(program
, 0, "position");
205 gl_
->LinkProgram(program
);
210 gl_
->GetProgramiv(program
, GL_LINK_STATUS
, &status
);
211 if (status
!= GL_TRUE
) {
212 GLint max_length
= 0;
213 GLint actual_length
= 0;
214 gl_
->GetProgramiv(program
, GL_INFO_LOG_LENGTH
, &max_length
);
216 // The max_length includes the NULL character.
217 std::string
error_log(max_length
, 0);
218 gl_
->GetProgramInfoLog(program
, max_length
, &actual_length
, &error_log
[0]);
220 LOG(ERROR
) << name
<< " program linking failed: " << error_log
.c_str();
228 GLuint
VideoDecoderShim::YUVConverter::CreateShader() {
229 const char* vert_shader
=
230 "precision mediump float;\n"
231 "attribute vec2 position;\n"
232 "varying vec2 texcoord;\n"
235 " gl_Position = vec4( position.xy, 0, 1 );\n"
236 " texcoord = position*0.5+0.5;\n"
239 const char* frag_shader
=
240 "precision mediump float;\n"
241 "varying vec2 texcoord;\n"
242 "uniform sampler2D y_sampler;\n"
243 "uniform sampler2D u_sampler;\n"
244 "uniform sampler2D v_sampler;\n"
245 "uniform sampler2D a_sampler;\n"
246 "uniform mat3 yuv_matrix;\n"
247 "uniform vec3 yuv_adjust;\n"
250 " vec3 yuv = vec3(texture2D(y_sampler, texcoord).x,\n"
251 " texture2D(u_sampler, texcoord).x,\n"
252 " texture2D(v_sampler, texcoord).x) +\n"
254 " gl_FragColor = vec4(yuv_matrix * yuv, texture2D(a_sampler, "
258 GLuint vertex_shader
=
259 CompileShader("Vertex Shader", GL_VERTEX_SHADER
, vert_shader
);
260 if (!vertex_shader
) {
264 GLuint fragment_shader
=
265 CompileShader("Fragment Shader", GL_FRAGMENT_SHADER
, frag_shader
);
266 if (!fragment_shader
) {
267 gl_
->DeleteShader(vertex_shader
);
272 CreateProgram("YUVConverter Program", vertex_shader
, fragment_shader
);
274 gl_
->DeleteShader(vertex_shader
);
275 gl_
->DeleteShader(fragment_shader
);
281 gl_
->UseProgram(program
);
283 GLint uniform_location
;
284 uniform_location
= gl_
->GetUniformLocation(program
, "y_sampler");
285 DCHECK(uniform_location
!= -1);
286 gl_
->Uniform1i(uniform_location
, 0);
288 uniform_location
= gl_
->GetUniformLocation(program
, "u_sampler");
289 DCHECK(uniform_location
!= -1);
290 gl_
->Uniform1i(uniform_location
, 1);
292 uniform_location
= gl_
->GetUniformLocation(program
, "v_sampler");
293 DCHECK(uniform_location
!= -1);
294 gl_
->Uniform1i(uniform_location
, 2);
296 uniform_location
= gl_
->GetUniformLocation(program
, "a_sampler");
297 DCHECK(uniform_location
!= -1);
298 gl_
->Uniform1i(uniform_location
, 3);
302 yuv_matrix_loc_
= gl_
->GetUniformLocation(program
, "yuv_matrix");
303 DCHECK(yuv_matrix_loc_
!= -1);
305 yuv_adjust_loc_
= gl_
->GetUniformLocation(program
, "yuv_adjust");
306 DCHECK(yuv_adjust_loc_
!= -1);
311 bool VideoDecoderShim::YUVConverter::Initialize() {
312 // If texture_rg extension is not available, use slower GL_LUMINANCE.
313 if (context_provider_
->ContextCapabilities().gpu
.texture_rg
) {
314 internal_format_
= GL_RED_EXT
;
315 format_
= GL_RED_EXT
;
317 internal_format_
= GL_LUMINANCE
;
318 format_
= GL_LUMINANCE
;
321 if (context_provider_
->ContextCapabilities().gpu
.max_texture_image_units
<
323 // We support YUVA textures and require 4 texture units in the fragment
328 gl_
->TraceBeginCHROMIUM("YUVConverter", "YUVConverterContext");
329 gl_
->GenFramebuffers(1, &frame_buffer_
);
331 y_texture_
= CreateTexture();
332 u_texture_
= CreateTexture();
333 v_texture_
= CreateTexture();
334 a_texture_
= CreateTexture();
336 // Vertex positions. Also converted to texcoords in vertex shader.
337 GLfloat vertex_positions
[] = {-1.f
, -1.f
, 1.f
, -1.f
, -1.f
, 1.f
, 1.f
, 1.f
};
339 gl_
->GenBuffers(1, &vertex_buffer_
);
340 gl_
->BindBuffer(GL_ARRAY_BUFFER
, vertex_buffer_
);
341 gl_
->BufferData(GL_ARRAY_BUFFER
, 2 * sizeof(GLfloat
) * 4, vertex_positions
,
343 gl_
->BindBuffer(GL_ARRAY_BUFFER
, 0);
345 program_
= CreateShader();
347 gl_
->TraceEndCHROMIUM();
349 context_provider_
->InvalidateGrContext(kGrInvalidateState
);
351 return (program_
!= 0);
354 void VideoDecoderShim::YUVConverter::Convert(
355 const scoped_refptr
<media::VideoFrame
>& frame
,
357 const float* yuv_matrix
= 0;
358 const float* yuv_adjust
= 0;
360 if (video_format_
!= frame
->format()) {
361 // The constants below were taken from cc/output/gl_renderer.cc.
362 // These values are magic numbers that are used in the transformation from
363 // YUV to RGB color values. They are taken from the following webpage:
364 // http://www.fourcc.org/fccyvrgb.php
365 const float yuv_to_rgb_rec601
[9] = {
366 1.164f
, 1.164f
, 1.164f
, 0.0f
, -.391f
, 2.018f
, 1.596f
, -.813f
, 0.0f
,
368 const float yuv_to_rgb_jpeg
[9] = {
369 1.f
, 1.f
, 1.f
, 0.0f
, -.34414f
, 1.772f
, 1.402f
, -.71414f
, 0.0f
,
371 const float yuv_to_rgb_rec709
[9] = {
372 1.164f
, 1.164f
, 1.164f
, 0.0f
, -0.213f
, 2.112f
, 1.793f
, -0.533f
, 0.0f
,
375 // These values map to 16, 128, and 128 respectively, and are computed
376 // as a fraction over 256 (e.g. 16 / 256 = 0.0625).
377 // They are used in the YUV to RGBA conversion formula:
378 // Y - 16 : Gives 16 values of head and footroom for overshooting
379 // U - 128 : Turns unsigned U into signed U [-128,127]
380 // V - 128 : Turns unsigned V into signed V [-128,127]
381 const float yuv_adjust_constrained
[3] = {
382 -0.0625f
, -0.5f
, -0.5f
,
384 // Same as above, but without the head and footroom.
385 const float yuv_adjust_full
[3] = {
389 yuv_adjust
= yuv_adjust_constrained
;
390 yuv_matrix
= yuv_to_rgb_rec601
;
393 if (frame
->metadata()->GetInteger(media::VideoFrameMetadata::COLOR_SPACE
,
395 if (result
== media::COLOR_SPACE_JPEG
) {
396 yuv_matrix
= yuv_to_rgb_jpeg
;
397 yuv_adjust
= yuv_adjust_full
;
398 } else if (result
== media::COLOR_SPACE_HD_REC709
) {
399 yuv_matrix
= yuv_to_rgb_rec709
;
403 switch (frame
->format()) {
404 case media::PIXEL_FORMAT_YV12
: // 420
405 case media::PIXEL_FORMAT_YV12A
:
406 case media::PIXEL_FORMAT_I420
:
407 uv_height_divisor_
= 2;
408 uv_width_divisor_
= 2;
410 case media::PIXEL_FORMAT_YV16
: // 422
411 uv_width_divisor_
= 2;
412 uv_height_divisor_
= 1;
414 case media::PIXEL_FORMAT_YV24
: // 444
415 uv_width_divisor_
= 1;
416 uv_height_divisor_
= 1;
423 video_format_
= frame
->format();
425 // Zero these so everything is reset below.
426 y_width_
= y_height_
= 0;
429 gl_
->TraceBeginCHROMIUM("YUVConverter", "YUVConverterContext");
431 uint32_t ywidth
= frame
->coded_size().width();
432 uint32_t yheight
= frame
->coded_size().height();
434 DCHECK_EQ(frame
->stride(media::VideoFrame::kUPlane
),
435 frame
->stride(media::VideoFrame::kVPlane
));
437 uint32_t ystride
= frame
->stride(media::VideoFrame::kYPlane
);
438 uint32_t uvstride
= frame
->stride(media::VideoFrame::kUPlane
);
440 // The following code assumes that extended GLES 2.0 state like
441 // UNPACK_SKIP* (if available) are set to defaults.
442 gl_
->PixelStorei(GL_UNPACK_ALIGNMENT
, 1);
444 if (ywidth
!= y_width_
|| yheight
!= y_height_
) {
448 uv_width_
= y_width_
/ uv_width_divisor_
;
449 uv_height_
= y_height_
/ uv_height_divisor_
;
451 // Re-create to resize the textures and upload data.
452 gl_
->PixelStorei(GL_UNPACK_ROW_LENGTH
, ystride
);
453 gl_
->ActiveTexture(GL_TEXTURE0
);
454 gl_
->BindTexture(GL_TEXTURE_2D
, y_texture_
);
455 gl_
->TexImage2D(GL_TEXTURE_2D
, 0, internal_format_
, y_width_
, y_height_
, 0,
456 format_
, GL_UNSIGNED_BYTE
,
457 frame
->data(media::VideoFrame::kYPlane
));
459 if (video_format_
== media::PIXEL_FORMAT_YV12A
) {
460 DCHECK_EQ(frame
->stride(media::VideoFrame::kYPlane
),
461 frame
->stride(media::VideoFrame::kAPlane
));
462 gl_
->ActiveTexture(GL_TEXTURE3
);
463 gl_
->BindTexture(GL_TEXTURE_2D
, a_texture_
);
464 gl_
->TexImage2D(GL_TEXTURE_2D
, 0, internal_format_
, y_width_
, y_height_
,
465 0, format_
, GL_UNSIGNED_BYTE
,
466 frame
->data(media::VideoFrame::kAPlane
));
468 // if there is no alpha channel, then create a 2x2 texture with full
470 gl_
->PixelStorei(GL_UNPACK_ROW_LENGTH
, 0);
471 const uint8_t alpha
[4] = {0xff, 0xff, 0xff, 0xff};
472 gl_
->ActiveTexture(GL_TEXTURE3
);
473 gl_
->BindTexture(GL_TEXTURE_2D
, a_texture_
);
474 gl_
->TexImage2D(GL_TEXTURE_2D
, 0, internal_format_
, 2, 2, 0, format_
,
475 GL_UNSIGNED_BYTE
, alpha
);
478 gl_
->PixelStorei(GL_UNPACK_ROW_LENGTH
, uvstride
);
479 gl_
->ActiveTexture(GL_TEXTURE1
);
480 gl_
->BindTexture(GL_TEXTURE_2D
, u_texture_
);
481 gl_
->TexImage2D(GL_TEXTURE_2D
, 0, internal_format_
, uv_width_
, uv_height_
,
482 0, format_
, GL_UNSIGNED_BYTE
,
483 frame
->data(media::VideoFrame::kUPlane
));
485 gl_
->ActiveTexture(GL_TEXTURE2
);
486 gl_
->BindTexture(GL_TEXTURE_2D
, v_texture_
);
487 gl_
->TexImage2D(GL_TEXTURE_2D
, 0, internal_format_
, uv_width_
, uv_height_
,
488 0, format_
, GL_UNSIGNED_BYTE
,
489 frame
->data(media::VideoFrame::kVPlane
));
491 // Bind textures and upload texture data
492 gl_
->PixelStorei(GL_UNPACK_ROW_LENGTH
, ystride
);
493 gl_
->ActiveTexture(GL_TEXTURE0
);
494 gl_
->BindTexture(GL_TEXTURE_2D
, y_texture_
);
495 gl_
->TexSubImage2D(GL_TEXTURE_2D
, 0, 0, 0, y_width_
, y_height_
, format_
,
497 frame
->data(media::VideoFrame::kYPlane
));
499 if (video_format_
== media::PIXEL_FORMAT_YV12A
) {
500 DCHECK_EQ(frame
->stride(media::VideoFrame::kYPlane
),
501 frame
->stride(media::VideoFrame::kAPlane
));
502 gl_
->ActiveTexture(GL_TEXTURE3
);
503 gl_
->BindTexture(GL_TEXTURE_2D
, a_texture_
);
504 gl_
->TexSubImage2D(GL_TEXTURE_2D
, 0, 0, 0, y_width_
, y_height_
, format_
,
506 frame
->data(media::VideoFrame::kAPlane
));
508 gl_
->ActiveTexture(GL_TEXTURE3
);
509 gl_
->BindTexture(GL_TEXTURE_2D
, a_texture_
);
512 gl_
->PixelStorei(GL_UNPACK_ROW_LENGTH
, uvstride
);
513 gl_
->ActiveTexture(GL_TEXTURE1
);
514 gl_
->BindTexture(GL_TEXTURE_2D
, u_texture_
);
515 gl_
->TexSubImage2D(GL_TEXTURE_2D
, 0, 0, 0, uv_width_
, uv_height_
, format_
,
517 frame
->data(media::VideoFrame::kUPlane
));
519 gl_
->ActiveTexture(GL_TEXTURE2
);
520 gl_
->BindTexture(GL_TEXTURE_2D
, v_texture_
);
521 gl_
->TexSubImage2D(GL_TEXTURE_2D
, 0, 0, 0, uv_width_
, uv_height_
, format_
,
523 frame
->data(media::VideoFrame::kVPlane
));
526 gl_
->BindFramebuffer(GL_FRAMEBUFFER
, frame_buffer_
);
527 gl_
->FramebufferTexture2D(GL_FRAMEBUFFER
, GL_COLOR_ATTACHMENT0
, GL_TEXTURE_2D
,
531 // We should probably check for framebuffer complete here, but that
532 // will slow this method down so check only in debug mode.
533 GLint status
= gl_
->CheckFramebufferStatus(GL_FRAMEBUFFER
);
534 if (status
!= GL_FRAMEBUFFER_COMPLETE
) {
539 gl_
->Viewport(0, 0, ywidth
, yheight
);
541 gl_
->UseProgram(program_
);
544 gl_
->UniformMatrix3fv(yuv_matrix_loc_
, 1, 0, yuv_matrix
);
545 gl_
->Uniform3fv(yuv_adjust_loc_
, 1, yuv_adjust
);
548 gl_
->BindBuffer(GL_ARRAY_BUFFER
, vertex_buffer_
);
549 gl_
->EnableVertexAttribArray(0);
550 gl_
->VertexAttribPointer(0, 2, GL_FLOAT
, GL_FALSE
, 2 * sizeof(GLfloat
),
551 static_cast<const void*>(0));
553 gl_
->DrawArrays(GL_TRIANGLE_STRIP
, 0, 4);
555 // The YUVConverter shares the context with Skia and possibly other modules
556 // that may make OpenGL calls. To be a "good OpenGL citizen" for other
557 // (non-Skia) modules that may share this context we restore
558 // buffer/texture/state bindings to OpenGL defaults here. If we were only
559 // sharing the context with Skia this may not be necessary as we also
560 // Invalidate the GrContext below so that Skia is aware that its state
561 // caches need to be reset.
563 gl_
->BindBuffer(GL_ARRAY_BUFFER
, 0);
564 gl_
->DisableVertexAttribArray(0);
566 gl_
->BindFramebuffer(GL_FRAMEBUFFER
, 0);
568 gl_
->BindTexture(GL_TEXTURE_2D
, 0);
570 gl_
->ActiveTexture(GL_TEXTURE2
);
571 gl_
->BindTexture(GL_TEXTURE_2D
, 0);
573 gl_
->ActiveTexture(GL_TEXTURE1
);
574 gl_
->BindTexture(GL_TEXTURE_2D
, 0);
576 gl_
->ActiveTexture(GL_TEXTURE0
);
577 gl_
->BindTexture(GL_TEXTURE_2D
, 0);
578 gl_
->PixelStorei(GL_UNPACK_ROW_LENGTH
, 0);
580 gl_
->TraceEndCHROMIUM();
582 context_provider_
->InvalidateGrContext(kGrInvalidateState
);
585 struct VideoDecoderShim::PendingDecode
{
586 PendingDecode(uint32_t decode_id
,
587 const scoped_refptr
<media::DecoderBuffer
>& buffer
);
590 const uint32_t decode_id
;
591 const scoped_refptr
<media::DecoderBuffer
> buffer
;
594 VideoDecoderShim::PendingDecode::PendingDecode(
596 const scoped_refptr
<media::DecoderBuffer
>& buffer
)
597 : decode_id(decode_id
), buffer(buffer
) {
600 VideoDecoderShim::PendingDecode::~PendingDecode() {
603 struct VideoDecoderShim::PendingFrame
{
604 explicit PendingFrame(uint32_t decode_id
);
605 PendingFrame(uint32_t decode_id
,
606 const scoped_refptr
<media::VideoFrame
>& frame
);
609 const uint32_t decode_id
;
610 scoped_refptr
<media::VideoFrame
> video_frame
;
613 // This could be expensive to copy, so guard against that.
614 DISALLOW_COPY_AND_ASSIGN(PendingFrame
);
617 VideoDecoderShim::PendingFrame::PendingFrame(uint32_t decode_id
)
618 : decode_id(decode_id
) {
621 VideoDecoderShim::PendingFrame::PendingFrame(
623 const scoped_refptr
<media::VideoFrame
>& frame
)
624 : decode_id(decode_id
), video_frame(frame
) {
627 VideoDecoderShim::PendingFrame::~PendingFrame() {
630 // DecoderImpl runs the underlying VideoDecoder on the media thread, receiving
631 // calls from the VideoDecodeShim on the main thread and sending results back.
632 // This class is constructed on the main thread, but used and destructed on the
634 class VideoDecoderShim::DecoderImpl
{
636 explicit DecoderImpl(const base::WeakPtr
<VideoDecoderShim
>& proxy
);
639 void Initialize(media::VideoDecoderConfig config
);
640 void Decode(uint32_t decode_id
, scoped_refptr
<media::DecoderBuffer
> buffer
);
645 void OnInitDone(bool success
);
647 void OnDecodeComplete(media::VideoDecoder::Status status
);
648 void OnOutputComplete(const scoped_refptr
<media::VideoFrame
>& frame
);
649 void OnResetComplete();
651 // WeakPtr is bound to main_message_loop_. Use only in shim callbacks.
652 base::WeakPtr
<VideoDecoderShim
> shim_
;
653 scoped_ptr
<media::VideoDecoder
> decoder_
;
654 bool initialized_
= false;
655 scoped_refptr
<base::SingleThreadTaskRunner
> main_task_runner_
;
656 // Queue of decodes waiting for the decoder.
657 typedef std::queue
<PendingDecode
> PendingDecodeQueue
;
658 PendingDecodeQueue pending_decodes_
;
659 bool awaiting_decoder_
= false;
660 // VideoDecoder returns pictures without information about the decode buffer
661 // that generated it, but VideoDecoder implementations used in this class
662 // (media::FFmpegVideoDecoder and media::VpxVideoDecoder) always generate
663 // corresponding frames before decode is finished. |decode_id_| is used to
664 // store id of the current buffer while Decode() call is pending.
665 uint32_t decode_id_
= 0;
667 base::WeakPtrFactory
<DecoderImpl
> weak_ptr_factory_
;
670 VideoDecoderShim::DecoderImpl::DecoderImpl(
671 const base::WeakPtr
<VideoDecoderShim
>& proxy
)
673 main_task_runner_(base::ThreadTaskRunnerHandle::Get()),
674 weak_ptr_factory_(this) {
677 VideoDecoderShim::DecoderImpl::~DecoderImpl() {
678 DCHECK(pending_decodes_
.empty());
681 void VideoDecoderShim::DecoderImpl::Initialize(
682 media::VideoDecoderConfig config
) {
684 #if !defined(MEDIA_DISABLE_LIBVPX)
685 if (config
.codec() == media::kCodecVP9
) {
687 new media::VpxVideoDecoder(base::ThreadTaskRunnerHandle::Get()));
691 scoped_ptr
<media::FFmpegVideoDecoder
> ffmpeg_video_decoder(
692 new media::FFmpegVideoDecoder(base::ThreadTaskRunnerHandle::Get()));
693 ffmpeg_video_decoder
->set_decode_nalus(true);
694 decoder_
= ffmpeg_video_decoder
.Pass();
697 // VpxVideoDecoder and FFmpegVideoDecoder support only one pending Decode()
699 DCHECK_EQ(decoder_
->GetMaxDecodeRequests(), 1);
701 decoder_
->Initialize(
702 config
, true /* low_delay */,
703 base::Bind(&VideoDecoderShim::DecoderImpl::OnInitDone
,
704 weak_ptr_factory_
.GetWeakPtr()),
705 base::Bind(&VideoDecoderShim::DecoderImpl::OnOutputComplete
,
706 weak_ptr_factory_
.GetWeakPtr()));
709 void VideoDecoderShim::DecoderImpl::Decode(
711 scoped_refptr
<media::DecoderBuffer
> buffer
) {
713 pending_decodes_
.push(PendingDecode(decode_id
, buffer
));
717 void VideoDecoderShim::DecoderImpl::Reset() {
719 // Abort all pending decodes.
720 while (!pending_decodes_
.empty()) {
721 const PendingDecode
& decode
= pending_decodes_
.front();
722 scoped_ptr
<PendingFrame
> pending_frame(new PendingFrame(decode
.decode_id
));
723 main_task_runner_
->PostTask(
724 FROM_HERE
, base::Bind(&VideoDecoderShim::OnDecodeComplete
, shim_
,
725 media::VideoDecoder::kAborted
, decode
.decode_id
));
726 pending_decodes_
.pop();
728 // Don't need to call Reset() if the |decoder_| hasn't been initialized.
734 decoder_
->Reset(base::Bind(&VideoDecoderShim::DecoderImpl::OnResetComplete
,
735 weak_ptr_factory_
.GetWeakPtr()));
738 void VideoDecoderShim::DecoderImpl::Stop() {
740 // Clear pending decodes now. We don't want OnDecodeComplete to call DoDecode
742 while (!pending_decodes_
.empty())
743 pending_decodes_
.pop();
745 // This instance is deleted once we exit this scope.
748 void VideoDecoderShim::DecoderImpl::OnInitDone(bool success
) {
750 main_task_runner_
->PostTask(
751 FROM_HERE
, base::Bind(&VideoDecoderShim::OnInitializeFailed
, shim_
));
759 void VideoDecoderShim::DecoderImpl::DoDecode() {
760 if (!initialized_
|| pending_decodes_
.empty() || awaiting_decoder_
)
763 awaiting_decoder_
= true;
764 const PendingDecode
& decode
= pending_decodes_
.front();
765 decode_id_
= decode
.decode_id
;
766 decoder_
->Decode(decode
.buffer
,
767 base::Bind(&VideoDecoderShim::DecoderImpl::OnDecodeComplete
,
768 weak_ptr_factory_
.GetWeakPtr()));
769 pending_decodes_
.pop();
772 void VideoDecoderShim::DecoderImpl::OnDecodeComplete(
773 media::VideoDecoder::Status status
) {
774 DCHECK(awaiting_decoder_
);
775 awaiting_decoder_
= false;
779 case media::VideoDecoder::kOk
:
780 case media::VideoDecoder::kAborted
:
783 case media::VideoDecoder::kDecodeError
:
784 result
= PP_ERROR_RESOURCE_FAILED
;
788 result
= PP_ERROR_FAILED
;
792 main_task_runner_
->PostTask(
793 FROM_HERE
, base::Bind(&VideoDecoderShim::OnDecodeComplete
, shim_
, result
,
799 void VideoDecoderShim::DecoderImpl::OnOutputComplete(
800 const scoped_refptr
<media::VideoFrame
>& frame
) {
801 // Software decoders are expected to generated frames only when a Decode()
803 DCHECK(awaiting_decoder_
);
805 scoped_ptr
<PendingFrame
> pending_frame
;
806 if (!frame
->metadata()->IsTrue(media::VideoFrameMetadata::END_OF_STREAM
))
807 pending_frame
.reset(new PendingFrame(decode_id_
, frame
));
809 pending_frame
.reset(new PendingFrame(decode_id_
));
811 main_task_runner_
->PostTask(
812 FROM_HERE
, base::Bind(&VideoDecoderShim::OnOutputComplete
, shim_
,
813 base::Passed(&pending_frame
)));
816 void VideoDecoderShim::DecoderImpl::OnResetComplete() {
817 main_task_runner_
->PostTask(
818 FROM_HERE
, base::Bind(&VideoDecoderShim::OnResetComplete
, shim_
));
821 VideoDecoderShim::VideoDecoderShim(
822 PepperVideoDecoderHost
* host
, uint32_t texture_pool_size
)
823 : state_(UNINITIALIZED
),
826 RenderThreadImpl::current()->GetMediaThreadTaskRunner()),
828 RenderThreadImpl::current()->SharedMainThreadContextProvider()),
829 texture_pool_size_(texture_pool_size
),
830 num_pending_decodes_(0),
831 yuv_converter_(new YUVConverter(context_provider_
)),
832 weak_ptr_factory_(this) {
834 DCHECK(media_task_runner_
.get());
835 DCHECK(context_provider_
.get());
836 decoder_impl_
.reset(new DecoderImpl(weak_ptr_factory_
.GetWeakPtr()));
839 VideoDecoderShim::~VideoDecoderShim() {
840 DCHECK(RenderThreadImpl::current());
841 // Delete any remaining textures.
842 TextureIdMap::iterator it
= texture_id_map_
.begin();
843 for (; it
!= texture_id_map_
.end(); ++it
)
844 DeleteTexture(it
->second
);
845 texture_id_map_
.clear();
847 FlushCommandBuffer();
849 weak_ptr_factory_
.InvalidateWeakPtrs();
850 // No more callbacks from the delegate will be received now.
852 // The callback now holds the only reference to the DecoderImpl, which will be
853 // deleted when Stop completes.
854 media_task_runner_
->PostTask(
856 base::Bind(&VideoDecoderShim::DecoderImpl::Stop
,
857 base::Owned(decoder_impl_
.release())));
860 bool VideoDecoderShim::Initialize(
861 media::VideoCodecProfile profile
,
862 media::VideoDecodeAccelerator::Client
* client
) {
863 DCHECK_EQ(client
, host_
);
864 DCHECK(RenderThreadImpl::current());
865 DCHECK_EQ(state_
, UNINITIALIZED
);
866 media::VideoCodec codec
= media::kUnknownVideoCodec
;
867 if (profile
<= media::H264PROFILE_MAX
)
868 codec
= media::kCodecH264
;
869 else if (profile
<= media::VP8PROFILE_MAX
)
870 codec
= media::kCodecVP8
;
871 else if (profile
<= media::VP9PROFILE_MAX
)
872 codec
= media::kCodecVP9
;
873 DCHECK_NE(codec
, media::kUnknownVideoCodec
);
875 if (!IsCodecSupported(codec
))
878 if (!yuv_converter_
->Initialize())
881 media::VideoDecoderConfig
config(
882 codec
, profile
, media::PIXEL_FORMAT_YV12
, media::COLOR_SPACE_UNSPECIFIED
,
883 gfx::Size(32, 24), // Small sizes that won't fail.
884 gfx::Rect(32, 24), gfx::Size(32, 24),
885 NULL
/* extra_data */, // TODO(bbudge) Verify this isn't needed.
886 0 /* extra_data_size */, false /* decryption */);
888 media_task_runner_
->PostTask(
890 base::Bind(&VideoDecoderShim::DecoderImpl::Initialize
,
891 base::Unretained(decoder_impl_
.get()),
896 // Return success, even though we are asynchronous, to mimic
897 // media::VideoDecodeAccelerator.
901 void VideoDecoderShim::Decode(const media::BitstreamBuffer
& bitstream_buffer
) {
902 DCHECK(RenderThreadImpl::current());
903 DCHECK_EQ(state_
, DECODING
);
905 // We need the address of the shared memory, so we can copy the buffer.
906 const uint8_t* buffer
= host_
->DecodeIdToAddress(bitstream_buffer
.id());
909 media_task_runner_
->PostTask(
912 &VideoDecoderShim::DecoderImpl::Decode
,
913 base::Unretained(decoder_impl_
.get()),
914 bitstream_buffer
.id(),
915 media::DecoderBuffer::CopyFrom(buffer
, bitstream_buffer
.size())));
916 num_pending_decodes_
++;
919 void VideoDecoderShim::AssignPictureBuffers(
920 const std::vector
<media::PictureBuffer
>& buffers
) {
921 DCHECK(RenderThreadImpl::current());
922 DCHECK_NE(state_
, UNINITIALIZED
);
923 if (buffers
.empty()) {
927 DCHECK_EQ(buffers
.size(), pending_texture_mailboxes_
.size());
928 GLuint num_textures
= base::checked_cast
<GLuint
>(buffers
.size());
929 std::vector
<uint32_t> local_texture_ids(num_textures
);
930 gpu::gles2::GLES2Interface
* gles2
= context_provider_
->ContextGL();
931 for (uint32_t i
= 0; i
< num_textures
; i
++) {
932 local_texture_ids
[i
] = gles2
->CreateAndConsumeTextureCHROMIUM(
933 GL_TEXTURE_2D
, pending_texture_mailboxes_
[i
].name
);
934 // Map the plugin texture id to the local texture id.
935 uint32_t plugin_texture_id
= buffers
[i
].texture_id();
936 texture_id_map_
[plugin_texture_id
] = local_texture_ids
[i
];
937 available_textures_
.insert(plugin_texture_id
);
939 pending_texture_mailboxes_
.clear();
943 void VideoDecoderShim::ReusePictureBuffer(int32 picture_buffer_id
) {
944 DCHECK(RenderThreadImpl::current());
945 uint32_t texture_id
= static_cast<uint32_t>(picture_buffer_id
);
946 if (textures_to_dismiss_
.find(texture_id
) != textures_to_dismiss_
.end()) {
947 DismissTexture(texture_id
);
948 } else if (texture_id_map_
.find(texture_id
) != texture_id_map_
.end()) {
949 available_textures_
.insert(texture_id
);
956 void VideoDecoderShim::Flush() {
957 DCHECK(RenderThreadImpl::current());
958 DCHECK_EQ(state_
, DECODING
);
962 void VideoDecoderShim::Reset() {
963 DCHECK(RenderThreadImpl::current());
964 DCHECK_EQ(state_
, DECODING
);
966 media_task_runner_
->PostTask(
968 base::Bind(&VideoDecoderShim::DecoderImpl::Reset
,
969 base::Unretained(decoder_impl_
.get())));
972 void VideoDecoderShim::Destroy() {
976 void VideoDecoderShim::OnInitializeFailed() {
977 DCHECK(RenderThreadImpl::current());
980 host_
->NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE
);
983 void VideoDecoderShim::OnDecodeComplete(int32_t result
, uint32_t decode_id
) {
984 DCHECK(RenderThreadImpl::current());
987 if (result
== PP_ERROR_RESOURCE_FAILED
) {
988 host_
->NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE
);
992 num_pending_decodes_
--;
993 completed_decodes_
.push(decode_id
);
995 // If frames are being queued because we're out of textures, don't notify
996 // the host that decode has completed. This exerts "back pressure" to keep
997 // the host from sending buffers that will cause pending_frames_ to grow.
998 if (pending_frames_
.empty())
999 NotifyCompletedDecodes();
1002 void VideoDecoderShim::OnOutputComplete(scoped_ptr
<PendingFrame
> frame
) {
1003 DCHECK(RenderThreadImpl::current());
1006 if (frame
->video_frame
) {
1007 if (texture_size_
!= frame
->video_frame
->coded_size()) {
1008 // If the size has changed, all current textures must be dismissed. Add
1009 // all textures to |textures_to_dismiss_| and dismiss any that aren't in
1010 // use by the plugin. We will dismiss the rest as they are recycled.
1011 for (TextureIdMap::const_iterator it
= texture_id_map_
.begin();
1012 it
!= texture_id_map_
.end();
1014 textures_to_dismiss_
.insert(it
->first
);
1016 for (TextureIdSet::const_iterator it
= available_textures_
.begin();
1017 it
!= available_textures_
.end();
1019 DismissTexture(*it
);
1021 available_textures_
.clear();
1022 FlushCommandBuffer();
1024 DCHECK(pending_texture_mailboxes_
.empty());
1025 for (uint32_t i
= 0; i
< texture_pool_size_
; i
++)
1026 pending_texture_mailboxes_
.push_back(gpu::Mailbox::Generate());
1028 host_
->RequestTextures(texture_pool_size_
,
1029 frame
->video_frame
->coded_size(), GL_TEXTURE_2D
,
1030 pending_texture_mailboxes_
);
1031 texture_size_
= frame
->video_frame
->coded_size();
1034 pending_frames_
.push(linked_ptr
<PendingFrame
>(frame
.release()));
1039 void VideoDecoderShim::SendPictures() {
1040 DCHECK(RenderThreadImpl::current());
1042 while (!pending_frames_
.empty() && !available_textures_
.empty()) {
1043 const linked_ptr
<PendingFrame
>& frame
= pending_frames_
.front();
1045 TextureIdSet::iterator it
= available_textures_
.begin();
1046 uint32_t texture_id
= *it
;
1047 available_textures_
.erase(it
);
1049 uint32_t local_texture_id
= texture_id_map_
[texture_id
];
1051 yuv_converter_
->Convert(frame
->video_frame
, local_texture_id
);
1053 host_
->PictureReady(media::Picture(texture_id
, frame
->decode_id
,
1054 frame
->video_frame
->visible_rect(),
1056 pending_frames_
.pop();
1059 FlushCommandBuffer();
1061 if (pending_frames_
.empty()) {
1062 // If frames aren't backing up, notify the host of any completed decodes so
1063 // it can send more buffers.
1064 NotifyCompletedDecodes();
1066 if (state_
== FLUSHING
&& !num_pending_decodes_
) {
1068 host_
->NotifyFlushDone();
1073 void VideoDecoderShim::OnResetComplete() {
1074 DCHECK(RenderThreadImpl::current());
1077 while (!pending_frames_
.empty())
1078 pending_frames_
.pop();
1079 NotifyCompletedDecodes();
1081 // Dismiss any old textures now.
1082 while (!textures_to_dismiss_
.empty())
1083 DismissTexture(*textures_to_dismiss_
.begin());
1086 host_
->NotifyResetDone();
1089 void VideoDecoderShim::NotifyCompletedDecodes() {
1090 while (!completed_decodes_
.empty()) {
1091 host_
->NotifyEndOfBitstreamBuffer(completed_decodes_
.front());
1092 completed_decodes_
.pop();
1096 void VideoDecoderShim::DismissTexture(uint32_t texture_id
) {
1098 textures_to_dismiss_
.erase(texture_id
);
1099 DCHECK(texture_id_map_
.find(texture_id
) != texture_id_map_
.end());
1100 DeleteTexture(texture_id_map_
[texture_id
]);
1101 texture_id_map_
.erase(texture_id
);
1102 host_
->DismissPictureBuffer(texture_id
);
1105 void VideoDecoderShim::DeleteTexture(uint32_t texture_id
) {
1106 gpu::gles2::GLES2Interface
* gles2
= context_provider_
->ContextGL();
1107 gles2
->DeleteTextures(1, &texture_id
);
1110 void VideoDecoderShim::FlushCommandBuffer() {
1111 context_provider_
->ContextGL()->Flush();
1114 } // namespace content