1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
10 #include "base/command_line.h"
11 #include "base/location.h"
12 #include "base/logging.h"
13 #include "base/memory/ref_counted.h"
14 #include "base/single_thread_task_runner.h"
15 #include "base/stl_util.h"
16 #include "base/thread_task_runner_handle.h"
18 #include "content/common/gpu/gpu_channel.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/media/gpu_video_accelerator_util.h"
21 #include "content/public/common/content_switches.h"
22 #include "gpu/command_buffer/common/command_buffer.h"
23 #include "ipc/ipc_message_macros.h"
24 #include "ipc/ipc_message_utils.h"
25 #include "ipc/message_filter.h"
26 #include "media/base/limits.h"
27 #include "ui/gl/gl_context.h"
28 #include "ui/gl/gl_image.h"
29 #include "ui/gl/gl_surface_egl.h"
32 #include "base/win/windows_version.h"
33 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
34 #elif defined(OS_MACOSX)
35 #include "content/common/gpu/media/vt_video_decode_accelerator.h"
36 #elif defined(OS_CHROMEOS)
37 #if defined(USE_V4L2_CODEC)
38 #include "content/common/gpu/media/v4l2_device.h"
39 #include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
40 #include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
42 #if defined(ARCH_CPU_X86_FAMILY)
43 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
44 #include "ui/gl/gl_implementation.h"
46 #elif defined(USE_OZONE)
47 #include "media/ozone/media_ozone_platform.h"
48 #elif defined(OS_ANDROID)
49 #include "content/common/gpu/media/android_copying_backing_strategy.h"
50 #include "content/common/gpu/media/android_video_decode_accelerator.h"
53 #include "ui/gfx/geometry/size.h"
57 static bool MakeDecoderContextCurrent(
58 const base::WeakPtr
<GpuCommandBufferStub
> stub
) {
60 DLOG(ERROR
) << "Stub is gone; won't MakeCurrent().";
64 if (!stub
->decoder()->MakeCurrent()) {
65 DLOG(ERROR
) << "Failed to MakeCurrent()";
72 // DebugAutoLock works like AutoLock but only acquires the lock when
75 typedef base::AutoLock DebugAutoLock
;
79 explicit DebugAutoLock(base::Lock
&) {}
83 class GpuVideoDecodeAccelerator::MessageFilter
: public IPC::MessageFilter
{
85 MessageFilter(GpuVideoDecodeAccelerator
* owner
, int32 host_route_id
)
86 : owner_(owner
), host_route_id_(host_route_id
) {}
88 void OnChannelError() override
{ sender_
= NULL
; }
90 void OnChannelClosing() override
{ sender_
= NULL
; }
92 void OnFilterAdded(IPC::Sender
* sender
) override
{ sender_
= sender
; }
94 void OnFilterRemoved() override
{
95 // This will delete |owner_| and |this|.
96 owner_
->OnFilterRemoved();
99 bool OnMessageReceived(const IPC::Message
& msg
) override
{
100 if (msg
.routing_id() != host_route_id_
)
103 IPC_BEGIN_MESSAGE_MAP(MessageFilter
, msg
)
104 IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode
, owner_
,
105 GpuVideoDecodeAccelerator::OnDecode
)
106 IPC_MESSAGE_UNHANDLED(return false;)
107 IPC_END_MESSAGE_MAP()
111 bool SendOnIOThread(IPC::Message
* message
) {
112 DCHECK(!message
->is_sync());
117 return sender_
->Send(message
);
121 ~MessageFilter() override
{}
124 GpuVideoDecodeAccelerator
* const owner_
;
125 const int32 host_route_id_
;
126 // The sender to which this filter was added.
127 IPC::Sender
* sender_
;
130 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
132 GpuCommandBufferStub
* stub
,
133 const scoped_refptr
<base::SingleThreadTaskRunner
>& io_task_runner
)
134 : host_route_id_(host_route_id
),
137 filter_removed_(true, false),
138 child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
139 io_task_runner_(io_task_runner
),
140 weak_factory_for_io_(this) {
142 stub_
->AddDestructionObserver(this);
143 make_context_current_
=
144 base::Bind(&MakeDecoderContextCurrent
, stub_
->AsWeakPtr());
147 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
148 // This class can only be self-deleted from OnWillDestroyStub(), which means
149 // the VDA has already been destroyed in there.
150 DCHECK(!video_decode_accelerator_
);
153 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message
& msg
) {
154 if (!video_decode_accelerator_
)
158 IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator
, msg
)
159 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode
, OnDecode
)
160 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers
,
161 OnAssignPictureBuffers
)
162 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer
,
163 OnReusePictureBuffer
)
164 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush
, OnFlush
)
165 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset
, OnReset
)
166 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy
, OnDestroy
)
167 IPC_MESSAGE_UNHANDLED(handled
= false)
168 IPC_END_MESSAGE_MAP()
172 void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
173 uint32 requested_num_of_buffers
,
174 const gfx::Size
& dimensions
,
175 uint32 texture_target
) {
176 if (dimensions
.width() > media::limits::kMaxDimension
||
177 dimensions
.height() > media::limits::kMaxDimension
||
178 dimensions
.GetArea() > media::limits::kMaxCanvas
) {
179 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE
);
182 if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
184 requested_num_of_buffers
,
187 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
190 texture_dimensions_
= dimensions
;
191 texture_target_
= texture_target
;
194 void GpuVideoDecodeAccelerator::DismissPictureBuffer(
195 int32 picture_buffer_id
) {
196 // Notify client that picture buffer is now unused.
197 if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer(
198 host_route_id_
, picture_buffer_id
))) {
199 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) "
202 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
203 uncleared_textures_
.erase(picture_buffer_id
);
206 void GpuVideoDecodeAccelerator::PictureReady(
207 const media::Picture
& picture
) {
208 // VDA may call PictureReady on IO thread. SetTextureCleared should run on
209 // the child thread. VDA is responsible to call PictureReady on the child
210 // thread when a picture buffer is delivered the first time.
211 if (child_task_runner_
->BelongsToCurrentThread()) {
212 SetTextureCleared(picture
);
214 DCHECK(io_task_runner_
->BelongsToCurrentThread());
215 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
216 DCHECK_EQ(0u, uncleared_textures_
.count(picture
.picture_buffer_id()));
219 if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
220 host_route_id_
, picture
.picture_buffer_id(),
221 picture
.bitstream_buffer_id(), picture
.visible_rect(),
222 picture
.allow_overlay()))) {
223 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
227 void GpuVideoDecodeAccelerator::NotifyError(
228 media::VideoDecodeAccelerator::Error error
) {
229 if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(
230 host_route_id_
, error
))) {
231 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
236 void GpuVideoDecodeAccelerator::Initialize(
237 const media::VideoCodecProfile profile
,
238 IPC::Message
* init_done_msg
) {
239 DCHECK(!video_decode_accelerator_
.get());
241 if (!stub_
->channel()->AddRoute(host_route_id_
, this)) {
242 DLOG(ERROR
) << "Initialize(): failed to add route";
243 SendCreateDecoderReply(init_done_msg
, false);
247 // Ensure we will be able to get a GL context at all before initializing
249 if (!make_context_current_
.Run()) {
250 SendCreateDecoderReply(init_done_msg
, false);
255 // Array of Create..VDA() function pointers, maybe applicable to the current
256 // platform. This list is ordered by priority of use and it should be the
257 // same as the order of querying supported profiles of VDAs.
258 const GpuVideoDecodeAccelerator::CreateVDAFp create_vda_fps
[] = {
259 &GpuVideoDecodeAccelerator::CreateDXVAVDA
,
260 &GpuVideoDecodeAccelerator::CreateV4L2VDA
,
261 &GpuVideoDecodeAccelerator::CreateV4L2SliceVDA
,
262 &GpuVideoDecodeAccelerator::CreateVaapiVDA
,
263 &GpuVideoDecodeAccelerator::CreateVTVDA
,
264 &GpuVideoDecodeAccelerator::CreateOzoneVDA
,
265 &GpuVideoDecodeAccelerator::CreateAndroidVDA
};
267 for (const auto& create_vda_function
: create_vda_fps
) {
268 video_decode_accelerator_
= (this->*create_vda_function
)();
269 if (!video_decode_accelerator_
||
270 !video_decode_accelerator_
->Initialize(profile
, this))
273 if (video_decode_accelerator_
->CanDecodeOnIOThread()) {
274 filter_
= new MessageFilter(this, host_route_id_
);
275 stub_
->channel()->AddFilter(filter_
.get());
277 SendCreateDecoderReply(init_done_msg
, true);
280 video_decode_accelerator_
.reset();
281 LOG(ERROR
) << "HW video decode not available for profile " << profile
;
282 SendCreateDecoderReply(init_done_msg
, false);
285 scoped_ptr
<media::VideoDecodeAccelerator
>
286 GpuVideoDecodeAccelerator::CreateDXVAVDA() {
287 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
289 if (base::win::GetVersion() >= base::win::VERSION_WIN7
) {
290 DVLOG(0) << "Initializing DXVA HW decoder for windows.";
291 decoder
.reset(new DXVAVideoDecodeAccelerator(make_context_current_
,
292 stub_
->decoder()->GetGLContext()));
294 NOTIMPLEMENTED() << "HW video decode acceleration not available.";
297 return decoder
.Pass();
300 scoped_ptr
<media::VideoDecodeAccelerator
>
301 GpuVideoDecodeAccelerator::CreateV4L2VDA() {
302 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
303 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
304 scoped_refptr
<V4L2Device
> device
= V4L2Device::Create(V4L2Device::kDecoder
);
306 decoder
.reset(new V4L2VideoDecodeAccelerator(
307 gfx::GLSurfaceEGL::GetHardwareDisplay(),
308 stub_
->decoder()->GetGLContext()->GetHandle(),
309 weak_factory_for_io_
.GetWeakPtr(),
310 make_context_current_
,
315 return decoder
.Pass();
318 scoped_ptr
<media::VideoDecodeAccelerator
>
319 GpuVideoDecodeAccelerator::CreateV4L2SliceVDA() {
320 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
321 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
322 scoped_refptr
<V4L2Device
> device
= V4L2Device::Create(V4L2Device::kDecoder
);
324 decoder
.reset(new V4L2SliceVideoDecodeAccelerator(
326 gfx::GLSurfaceEGL::GetHardwareDisplay(),
327 stub_
->decoder()->GetGLContext()->GetHandle(),
328 weak_factory_for_io_
.GetWeakPtr(),
329 make_context_current_
,
333 return decoder
.Pass();
336 void GpuVideoDecodeAccelerator::BindImage(uint32 client_texture_id
,
337 uint32 texture_target
,
338 scoped_refptr
<gfx::GLImage
> image
) {
339 gpu::gles2::GLES2Decoder
* command_decoder
= stub_
->decoder();
340 gpu::gles2::TextureManager
* texture_manager
=
341 command_decoder
->GetContextGroup()->texture_manager();
342 gpu::gles2::TextureRef
* ref
= texture_manager
->GetTexture(client_texture_id
);
344 texture_manager
->SetLevelImage(ref
, texture_target
, 0, image
.get());
347 scoped_ptr
<media::VideoDecodeAccelerator
>
348 GpuVideoDecodeAccelerator::CreateVaapiVDA() {
349 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
350 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
351 decoder
.reset(new VaapiVideoDecodeAccelerator(
352 make_context_current_
, base::Bind(&GpuVideoDecodeAccelerator::BindImage
,
353 base::Unretained(this))));
355 return decoder
.Pass();
358 scoped_ptr
<media::VideoDecodeAccelerator
>
359 GpuVideoDecodeAccelerator::CreateVTVDA() {
360 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
361 #if defined(OS_MACOSX)
362 decoder
.reset(new VTVideoDecodeAccelerator(
363 make_context_current_
, base::Bind(&GpuVideoDecodeAccelerator::BindImage
,
364 base::Unretained(this))));
366 return decoder
.Pass();
369 scoped_ptr
<media::VideoDecodeAccelerator
>
370 GpuVideoDecodeAccelerator::CreateOzoneVDA() {
371 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
372 #if !defined(OS_CHROMEOS) && defined(USE_OZONE)
373 media::MediaOzonePlatform
* platform
=
374 media::MediaOzonePlatform::GetInstance();
375 decoder
.reset(platform
->CreateVideoDecodeAccelerator(make_context_current_
));
377 return decoder
.Pass();
380 scoped_ptr
<media::VideoDecodeAccelerator
>
381 GpuVideoDecodeAccelerator::CreateAndroidVDA() {
382 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
383 #if defined(OS_ANDROID)
384 decoder
.reset(new AndroidVideoDecodeAccelerator(
385 stub_
->decoder()->AsWeakPtr(), make_context_current_
,
386 make_scoped_ptr(new AndroidCopyingBackingStrategy())));
388 return decoder
.Pass();
392 gpu::VideoDecodeAcceleratorSupportedProfiles
393 GpuVideoDecodeAccelerator::GetSupportedProfiles() {
394 media::VideoDecodeAccelerator::SupportedProfiles profiles
;
395 const base::CommandLine
* cmd_line
= base::CommandLine::ForCurrentProcess();
396 if (cmd_line
->HasSwitch(switches::kDisableAcceleratedVideoDecode
))
397 return gpu::VideoDecodeAcceleratorSupportedProfiles();
399 // Query supported profiles for each VDA. The order of querying VDAs should
400 // be the same as the order of initializing VDAs. Then the returned profile
401 // can be initialized by corresponding VDA successfully.
403 profiles
= DXVAVideoDecodeAccelerator::GetSupportedProfiles();
404 #elif defined(OS_CHROMEOS)
405 media::VideoDecodeAccelerator::SupportedProfiles vda_profiles
;
406 #if defined(USE_V4L2_CODEC)
407 vda_profiles
= V4L2VideoDecodeAccelerator::GetSupportedProfiles();
408 GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles
, &profiles
);
409 vda_profiles
= V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles();
410 GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles
, &profiles
);
412 #if defined(ARCH_CPU_X86_FAMILY)
413 vda_profiles
= VaapiVideoDecodeAccelerator::GetSupportedProfiles();
414 GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles
, &profiles
);
416 #elif defined(OS_MACOSX)
417 profiles
= VTVideoDecodeAccelerator::GetSupportedProfiles();
418 #elif defined(OS_ANDROID)
419 profiles
= AndroidVideoDecodeAccelerator::GetSupportedProfiles();
421 return GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeProfiles(profiles
);
424 // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
425 // true, otherwise on the main thread.
426 void GpuVideoDecodeAccelerator::OnDecode(
427 base::SharedMemoryHandle handle
,
430 base::TimeDelta presentation_timestamp
) {
431 DCHECK(video_decode_accelerator_
.get());
433 DLOG(ERROR
) << "BitstreamBuffer id " << id
<< " out of range";
434 if (child_task_runner_
->BelongsToCurrentThread()) {
435 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
437 child_task_runner_
->PostTask(
439 base::Bind(&GpuVideoDecodeAccelerator::NotifyError
,
440 base::Unretained(this),
441 media::VideoDecodeAccelerator::INVALID_ARGUMENT
));
445 video_decode_accelerator_
->Decode(
446 media::BitstreamBuffer(id
, handle
, size
, presentation_timestamp
));
449 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
450 const std::vector
<int32
>& buffer_ids
,
451 const std::vector
<uint32
>& texture_ids
) {
452 if (buffer_ids
.size() != texture_ids
.size()) {
453 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
457 gpu::gles2::GLES2Decoder
* command_decoder
= stub_
->decoder();
458 gpu::gles2::TextureManager
* texture_manager
=
459 command_decoder
->GetContextGroup()->texture_manager();
461 std::vector
<media::PictureBuffer
> buffers
;
462 std::vector
<scoped_refptr
<gpu::gles2::TextureRef
> > textures
;
463 for (uint32 i
= 0; i
< buffer_ids
.size(); ++i
) {
464 if (buffer_ids
[i
] < 0) {
465 DLOG(ERROR
) << "Buffer id " << buffer_ids
[i
] << " out of range";
466 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
469 gpu::gles2::TextureRef
* texture_ref
= texture_manager
->GetTexture(
472 DLOG(ERROR
) << "Failed to find texture id " << texture_ids
[i
];
473 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
476 gpu::gles2::Texture
* info
= texture_ref
->texture();
477 if (info
->target() != texture_target_
) {
478 DLOG(ERROR
) << "Texture target mismatch for texture id "
480 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
483 if (texture_target_
== GL_TEXTURE_EXTERNAL_OES
||
484 texture_target_
== GL_TEXTURE_RECTANGLE_ARB
) {
485 // These textures have their dimensions defined by the underlying storage.
486 // Use |texture_dimensions_| for this size.
487 texture_manager
->SetLevelInfo(
488 texture_ref
, texture_target_
, 0, GL_RGBA
, texture_dimensions_
.width(),
489 texture_dimensions_
.height(), 1, 0, GL_RGBA
, 0, gfx::Rect());
491 // For other targets, texture dimensions should already be defined.
492 GLsizei width
= 0, height
= 0;
493 info
->GetLevelSize(texture_target_
, 0, &width
, &height
, nullptr);
494 if (width
!= texture_dimensions_
.width() ||
495 height
!= texture_dimensions_
.height()) {
496 DLOG(ERROR
) << "Size mismatch for texture id " << texture_ids
[i
];
497 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
501 // TODO(dshwang): after moving to D3D11, remove this. crbug.com/438691
503 video_decode_accelerator_
.get()->GetSurfaceInternalFormat();
504 if (format
!= GL_RGBA
) {
505 texture_manager
->SetLevelInfo(texture_ref
, texture_target_
, 0, format
,
506 width
, height
, 1, 0, format
, 0,
510 buffers
.push_back(media::PictureBuffer(buffer_ids
[i
], texture_dimensions_
,
511 texture_ref
->service_id(),
513 textures
.push_back(texture_ref
);
515 video_decode_accelerator_
->AssignPictureBuffers(buffers
);
516 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
517 for (uint32 i
= 0; i
< buffer_ids
.size(); ++i
)
518 uncleared_textures_
[buffer_ids
[i
]] = textures
[i
];
521 void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
522 int32 picture_buffer_id
) {
523 DCHECK(video_decode_accelerator_
.get());
524 video_decode_accelerator_
->ReusePictureBuffer(picture_buffer_id
);
527 void GpuVideoDecodeAccelerator::OnFlush() {
528 DCHECK(video_decode_accelerator_
.get());
529 video_decode_accelerator_
->Flush();
532 void GpuVideoDecodeAccelerator::OnReset() {
533 DCHECK(video_decode_accelerator_
.get());
534 video_decode_accelerator_
->Reset();
537 void GpuVideoDecodeAccelerator::OnDestroy() {
538 DCHECK(video_decode_accelerator_
.get());
542 void GpuVideoDecodeAccelerator::OnFilterRemoved() {
543 // We're destroying; cancel all callbacks.
544 weak_factory_for_io_
.InvalidateWeakPtrs();
545 filter_removed_
.Signal();
548 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
549 int32 bitstream_buffer_id
) {
550 if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed(
551 host_route_id_
, bitstream_buffer_id
))) {
553 << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) "
558 void GpuVideoDecodeAccelerator::NotifyFlushDone() {
559 if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_
)))
560 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed";
563 void GpuVideoDecodeAccelerator::NotifyResetDone() {
564 if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_
)))
565 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed";
568 void GpuVideoDecodeAccelerator::OnWillDestroyStub() {
569 // The stub is going away, so we have to stop and destroy VDA here, before
570 // returning, because the VDA may need the GL context to run and/or do its
571 // cleanup. We cannot destroy the VDA before the IO thread message filter is
572 // removed however, since we cannot service incoming messages with VDA gone.
573 // We cannot simply check for existence of VDA on IO thread though, because
574 // we don't want to synchronize the IO thread with the ChildThread.
575 // So we have to wait for the RemoveFilter callback here instead and remove
576 // the VDA after it arrives and before returning.
578 stub_
->channel()->RemoveFilter(filter_
.get());
579 filter_removed_
.Wait();
582 stub_
->channel()->RemoveRoute(host_route_id_
);
583 stub_
->RemoveDestructionObserver(this);
585 video_decode_accelerator_
.reset();
589 void GpuVideoDecodeAccelerator::SetTextureCleared(
590 const media::Picture
& picture
) {
591 DCHECK(child_task_runner_
->BelongsToCurrentThread());
592 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
593 std::map
<int32
, scoped_refptr
<gpu::gles2::TextureRef
> >::iterator it
;
594 it
= uncleared_textures_
.find(picture
.picture_buffer_id());
595 if (it
== uncleared_textures_
.end())
596 return; // the texture has been cleared
598 scoped_refptr
<gpu::gles2::TextureRef
> texture_ref
= it
->second
;
599 GLenum target
= texture_ref
->texture()->target();
600 gpu::gles2::TextureManager
* texture_manager
=
601 stub_
->decoder()->GetContextGroup()->texture_manager();
602 DCHECK(!texture_ref
->texture()->IsLevelCleared(target
, 0));
603 texture_manager
->SetLevelCleared(texture_ref
.get(), target
, 0, true);
604 uncleared_textures_
.erase(it
);
607 bool GpuVideoDecodeAccelerator::Send(IPC::Message
* message
) {
608 if (filter_
.get() && io_task_runner_
->BelongsToCurrentThread())
609 return filter_
->SendOnIOThread(message
);
610 DCHECK(child_task_runner_
->BelongsToCurrentThread());
611 return stub_
->channel()->Send(message
);
614 void GpuVideoDecodeAccelerator::SendCreateDecoderReply(IPC::Message
* message
,
616 GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(message
, succeeded
);
620 } // namespace content