1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
10 #include "base/command_line.h"
11 #include "base/logging.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/stl_util.h"
15 #include "content/common/gpu/gpu_channel.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/public/common/content_switches.h"
18 #include "gpu/command_buffer/common/command_buffer.h"
19 #include "ipc/ipc_message_macros.h"
20 #include "ipc/ipc_message_utils.h"
21 #include "ipc/message_filter.h"
22 #include "media/base/limits.h"
23 #include "ui/gl/gl_context.h"
24 #include "ui/gl/gl_surface_egl.h"
27 #include "base/win/windows_version.h"
28 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
29 #elif defined(OS_MACOSX)
30 #include "content/common/gpu/media/vt_video_decode_accelerator.h"
31 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
32 #include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
33 #include "content/common/gpu/media/v4l2_video_device.h"
34 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
35 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
36 #include "ui/gl/gl_context_glx.h"
37 #include "ui/gl/gl_implementation.h"
38 #elif defined(USE_OZONE)
39 #include "media/ozone/media_ozone_platform.h"
40 #elif defined(OS_ANDROID)
41 #include "content/common/gpu/media/android_video_decode_accelerator.h"
44 #include "ui/gfx/size.h"
48 static bool MakeDecoderContextCurrent(
49 const base::WeakPtr
<GpuCommandBufferStub
> stub
) {
51 DLOG(ERROR
) << "Stub is gone; won't MakeCurrent().";
55 if (!stub
->decoder()->MakeCurrent()) {
56 DLOG(ERROR
) << "Failed to MakeCurrent()";
63 // DebugAutoLock works like AutoLock but only acquires the lock when
66 typedef base::AutoLock DebugAutoLock
;
70 explicit DebugAutoLock(base::Lock
&) {}
74 class GpuVideoDecodeAccelerator::MessageFilter
: public IPC::MessageFilter
{
76 MessageFilter(GpuVideoDecodeAccelerator
* owner
, int32 host_route_id
)
77 : owner_(owner
), host_route_id_(host_route_id
) {}
79 void OnChannelError() override
{ sender_
= NULL
; }
81 void OnChannelClosing() override
{ sender_
= NULL
; }
83 void OnFilterAdded(IPC::Sender
* sender
) override
{ sender_
= sender
; }
85 void OnFilterRemoved() override
{
86 // This will delete |owner_| and |this|.
87 owner_
->OnFilterRemoved();
90 bool OnMessageReceived(const IPC::Message
& msg
) override
{
91 if (msg
.routing_id() != host_route_id_
)
94 IPC_BEGIN_MESSAGE_MAP(MessageFilter
, msg
)
95 IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode
, owner_
,
96 GpuVideoDecodeAccelerator::OnDecode
)
97 IPC_MESSAGE_UNHANDLED(return false;)
102 bool SendOnIOThread(IPC::Message
* message
) {
103 DCHECK(!message
->is_sync());
108 return sender_
->Send(message
);
112 ~MessageFilter() override
{}
115 GpuVideoDecodeAccelerator
* owner_
;
116 int32 host_route_id_
;
117 // The sender to which this filter was added.
118 IPC::Sender
* sender_
;
121 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
123 GpuCommandBufferStub
* stub
,
124 const scoped_refptr
<base::MessageLoopProxy
>& io_message_loop
)
125 : host_route_id_(host_route_id
),
128 filter_removed_(true, false),
129 io_message_loop_(io_message_loop
),
130 weak_factory_for_io_(this) {
132 stub_
->AddDestructionObserver(this);
133 child_message_loop_
= base::MessageLoopProxy::current();
134 make_context_current_
=
135 base::Bind(&MakeDecoderContextCurrent
, stub_
->AsWeakPtr());
138 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
139 // This class can only be self-deleted from OnWillDestroyStub(), which means
140 // the VDA has already been destroyed in there.
141 DCHECK(!video_decode_accelerator_
);
144 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message
& msg
) {
145 if (!video_decode_accelerator_
)
149 IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator
, msg
)
150 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode
, OnDecode
)
151 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers
,
152 OnAssignPictureBuffers
)
153 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer
,
154 OnReusePictureBuffer
)
155 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush
, OnFlush
)
156 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset
, OnReset
)
157 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy
, OnDestroy
)
158 IPC_MESSAGE_UNHANDLED(handled
= false)
159 IPC_END_MESSAGE_MAP()
163 void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
164 uint32 requested_num_of_buffers
,
165 const gfx::Size
& dimensions
,
166 uint32 texture_target
) {
167 if (dimensions
.width() > media::limits::kMaxDimension
||
168 dimensions
.height() > media::limits::kMaxDimension
||
169 dimensions
.GetArea() > media::limits::kMaxCanvas
) {
170 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE
);
173 if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
175 requested_num_of_buffers
,
178 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
181 texture_dimensions_
= dimensions
;
182 texture_target_
= texture_target
;
185 void GpuVideoDecodeAccelerator::DismissPictureBuffer(
186 int32 picture_buffer_id
) {
187 // Notify client that picture buffer is now unused.
188 if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer(
189 host_route_id_
, picture_buffer_id
))) {
190 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) "
193 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
194 uncleared_textures_
.erase(picture_buffer_id
);
197 void GpuVideoDecodeAccelerator::PictureReady(
198 const media::Picture
& picture
) {
199 // VDA may call PictureReady on IO thread. SetTextureCleared should run on
200 // the child thread. VDA is responsible to call PictureReady on the child
201 // thread when a picture buffer is delivered the first time.
202 if (child_message_loop_
->BelongsToCurrentThread()) {
203 SetTextureCleared(picture
);
205 DCHECK(io_message_loop_
->BelongsToCurrentThread());
206 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
207 DCHECK_EQ(0u, uncleared_textures_
.count(picture
.picture_buffer_id()));
210 if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
212 picture
.picture_buffer_id(),
213 picture
.bitstream_buffer_id(),
214 picture
.visible_rect()))) {
215 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
219 void GpuVideoDecodeAccelerator::NotifyError(
220 media::VideoDecodeAccelerator::Error error
) {
221 if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(
222 host_route_id_
, error
))) {
223 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
228 void GpuVideoDecodeAccelerator::Initialize(
229 const media::VideoCodecProfile profile
,
230 IPC::Message
* init_done_msg
) {
231 DCHECK(!video_decode_accelerator_
.get());
233 if (!stub_
->channel()->AddRoute(host_route_id_
, this)) {
234 DLOG(ERROR
) << "GpuVideoDecodeAccelerator::Initialize(): "
235 "failed to add route";
236 SendCreateDecoderReply(init_done_msg
, false);
240 // Ensure we will be able to get a GL context at all before initializing
242 if (!make_context_current_
.Run()) {
243 SendCreateDecoderReply(init_done_msg
, false);
249 if (base::win::GetVersion() < base::win::VERSION_WIN7
) {
250 NOTIMPLEMENTED() << "HW video decode acceleration not available.";
251 SendCreateDecoderReply(init_done_msg
, false);
254 DVLOG(0) << "Initializing DXVA HW decoder for windows.";
255 video_decode_accelerator_
.reset(
256 new DXVAVideoDecodeAccelerator(make_context_current_
));
257 #elif defined(OS_MACOSX)
258 video_decode_accelerator_
.reset(new VTVideoDecodeAccelerator(
259 static_cast<CGLContextObj
>(
260 stub_
->decoder()->GetGLContext()->GetHandle()),
261 make_context_current_
));
262 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
263 scoped_ptr
<V4L2Device
> device
= V4L2Device::Create(V4L2Device::kDecoder
);
265 SendCreateDecoderReply(init_done_msg
, false);
268 video_decode_accelerator_
.reset(new V4L2VideoDecodeAccelerator(
269 gfx::GLSurfaceEGL::GetHardwareDisplay(),
270 stub_
->decoder()->GetGLContext()->GetHandle(),
271 weak_factory_for_io_
.GetWeakPtr(),
272 make_context_current_
,
275 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
276 if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL
) {
277 VLOG(1) << "HW video decode acceleration not available without "
279 SendCreateDecoderReply(init_done_msg
, false);
282 gfx::GLContextGLX
* glx_context
=
283 static_cast<gfx::GLContextGLX
*>(stub_
->decoder()->GetGLContext());
284 video_decode_accelerator_
.reset(new VaapiVideoDecodeAccelerator(
285 glx_context
->display(), make_context_current_
));
286 #elif defined(USE_OZONE)
287 media::MediaOzonePlatform
* platform
=
288 media::MediaOzonePlatform::GetInstance();
289 video_decode_accelerator_
.reset(platform
->CreateVideoDecodeAccelerator(
290 make_context_current_
));
291 if (!video_decode_accelerator_
) {
292 SendCreateDecoderReply(init_done_msg
, false);
295 #elif defined(OS_ANDROID)
296 video_decode_accelerator_
.reset(new AndroidVideoDecodeAccelerator(
297 stub_
->decoder()->AsWeakPtr(),
298 make_context_current_
));
300 NOTIMPLEMENTED() << "HW video decode acceleration not available.";
301 SendCreateDecoderReply(init_done_msg
, false);
305 if (video_decode_accelerator_
->CanDecodeOnIOThread()) {
306 filter_
= new MessageFilter(this, host_route_id_
);
307 stub_
->channel()->AddFilter(filter_
.get());
310 if (!video_decode_accelerator_
->Initialize(profile
, this)) {
311 SendCreateDecoderReply(init_done_msg
, false);
315 SendCreateDecoderReply(init_done_msg
, true);
318 // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
319 // true, otherwise on the main thread.
320 void GpuVideoDecodeAccelerator::OnDecode(
321 base::SharedMemoryHandle handle
, int32 id
, uint32 size
) {
322 DCHECK(video_decode_accelerator_
.get());
324 DLOG(ERROR
) << "BitstreamBuffer id " << id
<< " out of range";
325 if (child_message_loop_
->BelongsToCurrentThread()) {
326 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
328 child_message_loop_
->PostTask(
330 base::Bind(&GpuVideoDecodeAccelerator::NotifyError
,
331 base::Unretained(this),
332 media::VideoDecodeAccelerator::INVALID_ARGUMENT
));
336 video_decode_accelerator_
->Decode(media::BitstreamBuffer(id
, handle
, size
));
339 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
340 const std::vector
<int32
>& buffer_ids
,
341 const std::vector
<uint32
>& texture_ids
) {
342 if (buffer_ids
.size() != texture_ids
.size()) {
343 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
347 gpu::gles2::GLES2Decoder
* command_decoder
= stub_
->decoder();
348 gpu::gles2::TextureManager
* texture_manager
=
349 command_decoder
->GetContextGroup()->texture_manager();
351 std::vector
<media::PictureBuffer
> buffers
;
352 std::vector
<scoped_refptr
<gpu::gles2::TextureRef
> > textures
;
353 for (uint32 i
= 0; i
< buffer_ids
.size(); ++i
) {
354 if (buffer_ids
[i
] < 0) {
355 DLOG(ERROR
) << "Buffer id " << buffer_ids
[i
] << " out of range";
356 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
359 gpu::gles2::TextureRef
* texture_ref
= texture_manager
->GetTexture(
362 DLOG(ERROR
) << "Failed to find texture id " << texture_ids
[i
];
363 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
366 gpu::gles2::Texture
* info
= texture_ref
->texture();
367 if (info
->target() != texture_target_
) {
368 DLOG(ERROR
) << "Texture target mismatch for texture id "
370 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
373 if (texture_target_
== GL_TEXTURE_EXTERNAL_OES
||
374 texture_target_
== GL_TEXTURE_RECTANGLE
) {
375 // These textures have their dimensions defined by the underlying storage.
376 // Use |texture_dimensions_| for this size.
377 texture_manager
->SetLevelInfo(texture_ref
,
381 texture_dimensions_
.width(),
382 texture_dimensions_
.height(),
389 // For other targets, texture dimensions should already be defined.
390 GLsizei width
= 0, height
= 0;
391 info
->GetLevelSize(texture_target_
, 0, &width
, &height
);
392 if (width
!= texture_dimensions_
.width() ||
393 height
!= texture_dimensions_
.height()) {
394 DLOG(ERROR
) << "Size mismatch for texture id " << texture_ids
[i
];
395 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
399 uint32 service_texture_id
;
400 if (!command_decoder
->GetServiceTextureId(
401 texture_ids
[i
], &service_texture_id
)) {
402 DLOG(ERROR
) << "Failed to translate texture!";
403 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE
);
406 buffers
.push_back(media::PictureBuffer(
407 buffer_ids
[i
], texture_dimensions_
, service_texture_id
));
408 textures
.push_back(texture_ref
);
410 video_decode_accelerator_
->AssignPictureBuffers(buffers
);
411 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
412 for (uint32 i
= 0; i
< buffer_ids
.size(); ++i
)
413 uncleared_textures_
[buffer_ids
[i
]] = textures
[i
];
416 void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
417 int32 picture_buffer_id
) {
418 DCHECK(video_decode_accelerator_
.get());
419 video_decode_accelerator_
->ReusePictureBuffer(picture_buffer_id
);
422 void GpuVideoDecodeAccelerator::OnFlush() {
423 DCHECK(video_decode_accelerator_
.get());
424 video_decode_accelerator_
->Flush();
427 void GpuVideoDecodeAccelerator::OnReset() {
428 DCHECK(video_decode_accelerator_
.get());
429 video_decode_accelerator_
->Reset();
432 void GpuVideoDecodeAccelerator::OnDestroy() {
433 DCHECK(video_decode_accelerator_
.get());
437 void GpuVideoDecodeAccelerator::OnFilterRemoved() {
438 // We're destroying; cancel all callbacks.
439 weak_factory_for_io_
.InvalidateWeakPtrs();
440 filter_removed_
.Signal();
443 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
444 int32 bitstream_buffer_id
) {
445 if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed(
446 host_route_id_
, bitstream_buffer_id
))) {
448 << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) "
453 void GpuVideoDecodeAccelerator::NotifyFlushDone() {
454 if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_
)))
455 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed";
458 void GpuVideoDecodeAccelerator::NotifyResetDone() {
459 if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_
)))
460 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed";
463 void GpuVideoDecodeAccelerator::OnWillDestroyStub() {
464 // The stub is going away, so we have to stop and destroy VDA here, before
465 // returning, because the VDA may need the GL context to run and/or do its
466 // cleanup. We cannot destroy the VDA before the IO thread message filter is
467 // removed however, since we cannot service incoming messages with VDA gone.
468 // We cannot simply check for existence of VDA on IO thread though, because
469 // we don't want to synchronize the IO thread with the ChildThread.
470 // So we have to wait for the RemoveFilter callback here instead and remove
471 // the VDA after it arrives and before returning.
473 stub_
->channel()->RemoveFilter(filter_
.get());
474 filter_removed_
.Wait();
477 stub_
->channel()->RemoveRoute(host_route_id_
);
478 stub_
->RemoveDestructionObserver(this);
480 video_decode_accelerator_
.reset();
484 void GpuVideoDecodeAccelerator::SetTextureCleared(
485 const media::Picture
& picture
) {
486 DCHECK(child_message_loop_
->BelongsToCurrentThread());
487 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
488 std::map
<int32
, scoped_refptr
<gpu::gles2::TextureRef
> >::iterator it
;
489 it
= uncleared_textures_
.find(picture
.picture_buffer_id());
490 if (it
== uncleared_textures_
.end())
491 return; // the texture has been cleared
493 scoped_refptr
<gpu::gles2::TextureRef
> texture_ref
= it
->second
;
494 GLenum target
= texture_ref
->texture()->target();
495 gpu::gles2::TextureManager
* texture_manager
=
496 stub_
->decoder()->GetContextGroup()->texture_manager();
497 DCHECK(!texture_ref
->texture()->IsLevelCleared(target
, 0));
498 texture_manager
->SetLevelCleared(texture_ref
.get(), target
, 0, true);
499 uncleared_textures_
.erase(it
);
502 bool GpuVideoDecodeAccelerator::Send(IPC::Message
* message
) {
503 if (filter_
.get() && io_message_loop_
->BelongsToCurrentThread())
504 return filter_
->SendOnIOThread(message
);
505 DCHECK(child_message_loop_
->BelongsToCurrentThread());
506 return stub_
->channel()->Send(message
);
509 void GpuVideoDecodeAccelerator::SendCreateDecoderReply(IPC::Message
* message
,
511 GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(message
, succeeded
);
515 } // namespace content