1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
10 #include "base/command_line.h"
11 #include "base/location.h"
12 #include "base/logging.h"
13 #include "base/memory/ref_counted.h"
14 #include "base/single_thread_task_runner.h"
15 #include "base/stl_util.h"
16 #include "base/thread_task_runner_handle.h"
18 #include "content/common/gpu/gpu_channel.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/media/gpu_video_accelerator_util.h"
21 #include "content/public/common/content_switches.h"
22 #include "gpu/command_buffer/common/command_buffer.h"
23 #include "ipc/ipc_message_macros.h"
24 #include "ipc/ipc_message_utils.h"
25 #include "ipc/message_filter.h"
26 #include "media/base/limits.h"
27 #include "ui/gl/gl_context.h"
28 #include "ui/gl/gl_image.h"
29 #include "ui/gl/gl_surface_egl.h"
32 #include "base/win/windows_version.h"
33 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
34 #elif defined(OS_MACOSX)
35 #include "content/common/gpu/media/vt_video_decode_accelerator.h"
36 #elif defined(OS_CHROMEOS)
37 #if defined(USE_V4L2_CODEC)
38 #include "content/common/gpu/media/v4l2_device.h"
39 #include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
40 #include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
42 #if defined(ARCH_CPU_X86_FAMILY)
43 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
44 #include "ui/gl/gl_implementation.h"
46 #elif defined(USE_OZONE)
47 #include "media/ozone/media_ozone_platform.h"
48 #elif defined(OS_ANDROID)
49 #include "content/common/gpu/media/android_video_decode_accelerator.h"
52 #include "ui/gfx/geometry/size.h"
56 static bool MakeDecoderContextCurrent(
57 const base::WeakPtr
<GpuCommandBufferStub
> stub
) {
59 DLOG(ERROR
) << "Stub is gone; won't MakeCurrent().";
63 if (!stub
->decoder()->MakeCurrent()) {
64 DLOG(ERROR
) << "Failed to MakeCurrent()";
71 // DebugAutoLock works like AutoLock but only acquires the lock when
74 typedef base::AutoLock DebugAutoLock
;
78 explicit DebugAutoLock(base::Lock
&) {}
82 class GpuVideoDecodeAccelerator::MessageFilter
: public IPC::MessageFilter
{
84 MessageFilter(GpuVideoDecodeAccelerator
* owner
, int32 host_route_id
)
85 : owner_(owner
), host_route_id_(host_route_id
) {}
87 void OnChannelError() override
{ sender_
= NULL
; }
89 void OnChannelClosing() override
{ sender_
= NULL
; }
91 void OnFilterAdded(IPC::Sender
* sender
) override
{ sender_
= sender
; }
93 void OnFilterRemoved() override
{
94 // This will delete |owner_| and |this|.
95 owner_
->OnFilterRemoved();
98 bool OnMessageReceived(const IPC::Message
& msg
) override
{
99 if (msg
.routing_id() != host_route_id_
)
102 IPC_BEGIN_MESSAGE_MAP(MessageFilter
, msg
)
103 IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode
, owner_
,
104 GpuVideoDecodeAccelerator::OnDecode
)
105 IPC_MESSAGE_UNHANDLED(return false;)
106 IPC_END_MESSAGE_MAP()
110 bool SendOnIOThread(IPC::Message
* message
) {
111 DCHECK(!message
->is_sync());
116 return sender_
->Send(message
);
120 ~MessageFilter() override
{}
123 GpuVideoDecodeAccelerator
* const owner_
;
124 const int32 host_route_id_
;
125 // The sender to which this filter was added.
126 IPC::Sender
* sender_
;
129 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
131 GpuCommandBufferStub
* stub
,
132 const scoped_refptr
<base::SingleThreadTaskRunner
>& io_task_runner
)
133 : host_route_id_(host_route_id
),
136 filter_removed_(true, false),
137 child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
138 io_task_runner_(io_task_runner
),
139 weak_factory_for_io_(this) {
141 stub_
->AddDestructionObserver(this);
142 make_context_current_
=
143 base::Bind(&MakeDecoderContextCurrent
, stub_
->AsWeakPtr());
146 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
147 // This class can only be self-deleted from OnWillDestroyStub(), which means
148 // the VDA has already been destroyed in there.
149 DCHECK(!video_decode_accelerator_
);
152 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message
& msg
) {
153 if (!video_decode_accelerator_
)
157 IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator
, msg
)
158 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode
, OnDecode
)
159 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers
,
160 OnAssignPictureBuffers
)
161 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer
,
162 OnReusePictureBuffer
)
163 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush
, OnFlush
)
164 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset
, OnReset
)
165 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy
, OnDestroy
)
166 IPC_MESSAGE_UNHANDLED(handled
= false)
167 IPC_END_MESSAGE_MAP()
171 void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
172 uint32 requested_num_of_buffers
,
173 const gfx::Size
& dimensions
,
174 uint32 texture_target
) {
175 if (dimensions
.width() > media::limits::kMaxDimension
||
176 dimensions
.height() > media::limits::kMaxDimension
||
177 dimensions
.GetArea() > media::limits::kMaxCanvas
) {
178 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE
);
181 if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
183 requested_num_of_buffers
,
186 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
189 texture_dimensions_
= dimensions
;
190 texture_target_
= texture_target
;
193 void GpuVideoDecodeAccelerator::DismissPictureBuffer(
194 int32 picture_buffer_id
) {
195 // Notify client that picture buffer is now unused.
196 if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer(
197 host_route_id_
, picture_buffer_id
))) {
198 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) "
201 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
202 uncleared_textures_
.erase(picture_buffer_id
);
205 void GpuVideoDecodeAccelerator::PictureReady(
206 const media::Picture
& picture
) {
207 // VDA may call PictureReady on IO thread. SetTextureCleared should run on
208 // the child thread. VDA is responsible to call PictureReady on the child
209 // thread when a picture buffer is delivered the first time.
210 if (child_task_runner_
->BelongsToCurrentThread()) {
211 SetTextureCleared(picture
);
213 DCHECK(io_task_runner_
->BelongsToCurrentThread());
214 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
215 DCHECK_EQ(0u, uncleared_textures_
.count(picture
.picture_buffer_id()));
218 if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
219 host_route_id_
, picture
.picture_buffer_id(),
220 picture
.bitstream_buffer_id(), picture
.visible_rect(),
221 picture
.allow_overlay()))) {
222 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
226 void GpuVideoDecodeAccelerator::NotifyError(
227 media::VideoDecodeAccelerator::Error error
) {
228 if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(
229 host_route_id_
, error
))) {
230 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
235 void GpuVideoDecodeAccelerator::Initialize(
236 const media::VideoCodecProfile profile
,
237 IPC::Message
* init_done_msg
) {
238 DCHECK(!video_decode_accelerator_
.get());
240 if (!stub_
->channel()->AddRoute(host_route_id_
, this)) {
241 DLOG(ERROR
) << "Initialize(): failed to add route";
242 SendCreateDecoderReply(init_done_msg
, false);
246 // Ensure we will be able to get a GL context at all before initializing
248 if (!make_context_current_
.Run()) {
249 SendCreateDecoderReply(init_done_msg
, false);
254 // Array of Create..VDA() function pointers, maybe applicable to the current
255 // platform. This list is ordered by priority of use and it should be the
256 // same as the order of querying supported profiles of VDAs.
257 const GpuVideoDecodeAccelerator::CreateVDAFp create_vda_fps
[] = {
258 &GpuVideoDecodeAccelerator::CreateDXVAVDA
,
259 &GpuVideoDecodeAccelerator::CreateV4L2VDA
,
260 &GpuVideoDecodeAccelerator::CreateV4L2SliceVDA
,
261 &GpuVideoDecodeAccelerator::CreateVaapiVDA
,
262 &GpuVideoDecodeAccelerator::CreateVTVDA
,
263 &GpuVideoDecodeAccelerator::CreateOzoneVDA
,
264 &GpuVideoDecodeAccelerator::CreateAndroidVDA
};
266 for (const auto& create_vda_function
: create_vda_fps
) {
267 video_decode_accelerator_
= (this->*create_vda_function
)();
268 if (!video_decode_accelerator_
||
269 !video_decode_accelerator_
->Initialize(profile
, this))
272 if (video_decode_accelerator_
->CanDecodeOnIOThread()) {
273 filter_
= new MessageFilter(this, host_route_id_
);
274 stub_
->channel()->AddFilter(filter_
.get());
276 SendCreateDecoderReply(init_done_msg
, true);
279 video_decode_accelerator_
.reset();
280 LOG(ERROR
) << "HW video decode not available for profile " << profile
;
281 SendCreateDecoderReply(init_done_msg
, false);
284 scoped_ptr
<media::VideoDecodeAccelerator
>
285 GpuVideoDecodeAccelerator::CreateDXVAVDA() {
286 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
288 if (base::win::GetVersion() >= base::win::VERSION_WIN7
) {
289 DVLOG(0) << "Initializing DXVA HW decoder for windows.";
290 decoder
.reset(new DXVAVideoDecodeAccelerator(make_context_current_
,
291 stub_
->decoder()->GetGLContext()));
293 NOTIMPLEMENTED() << "HW video decode acceleration not available.";
296 return decoder
.Pass();
299 scoped_ptr
<media::VideoDecodeAccelerator
>
300 GpuVideoDecodeAccelerator::CreateV4L2VDA() {
301 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
302 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
303 scoped_refptr
<V4L2Device
> device
= V4L2Device::Create(V4L2Device::kDecoder
);
305 decoder
.reset(new V4L2VideoDecodeAccelerator(
306 gfx::GLSurfaceEGL::GetHardwareDisplay(),
307 stub_
->decoder()->GetGLContext()->GetHandle(),
308 weak_factory_for_io_
.GetWeakPtr(),
309 make_context_current_
,
314 return decoder
.Pass();
317 scoped_ptr
<media::VideoDecodeAccelerator
>
318 GpuVideoDecodeAccelerator::CreateV4L2SliceVDA() {
319 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
320 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
321 scoped_refptr
<V4L2Device
> device
= V4L2Device::Create(V4L2Device::kDecoder
);
323 decoder
.reset(new V4L2SliceVideoDecodeAccelerator(
325 gfx::GLSurfaceEGL::GetHardwareDisplay(),
326 stub_
->decoder()->GetGLContext()->GetHandle(),
327 weak_factory_for_io_
.GetWeakPtr(),
328 make_context_current_
,
332 return decoder
.Pass();
335 void GpuVideoDecodeAccelerator::BindImage(uint32 client_texture_id
,
336 uint32 texture_target
,
337 scoped_refptr
<gfx::GLImage
> image
) {
338 gpu::gles2::GLES2Decoder
* command_decoder
= stub_
->decoder();
339 gpu::gles2::TextureManager
* texture_manager
=
340 command_decoder
->GetContextGroup()->texture_manager();
341 gpu::gles2::TextureRef
* ref
= texture_manager
->GetTexture(client_texture_id
);
343 texture_manager
->SetLevelImage(ref
, texture_target
, 0, image
.get());
346 scoped_ptr
<media::VideoDecodeAccelerator
>
347 GpuVideoDecodeAccelerator::CreateVaapiVDA() {
348 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
349 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
350 decoder
.reset(new VaapiVideoDecodeAccelerator(
351 make_context_current_
, base::Bind(&GpuVideoDecodeAccelerator::BindImage
,
352 base::Unretained(this))));
354 return decoder
.Pass();
357 scoped_ptr
<media::VideoDecodeAccelerator
>
358 GpuVideoDecodeAccelerator::CreateVTVDA() {
359 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
360 #if defined(OS_MACOSX)
361 decoder
.reset(new VTVideoDecodeAccelerator(
362 static_cast<CGLContextObj
>(stub_
->decoder()->GetGLContext()->GetHandle()),
363 make_context_current_
));
365 return decoder
.Pass();
368 scoped_ptr
<media::VideoDecodeAccelerator
>
369 GpuVideoDecodeAccelerator::CreateOzoneVDA() {
370 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
371 #if !defined(OS_CHROMEOS) && defined(USE_OZONE)
372 media::MediaOzonePlatform
* platform
=
373 media::MediaOzonePlatform::GetInstance();
374 decoder
.reset(platform
->CreateVideoDecodeAccelerator(make_context_current_
));
376 return decoder
.Pass();
379 scoped_ptr
<media::VideoDecodeAccelerator
>
380 GpuVideoDecodeAccelerator::CreateAndroidVDA() {
381 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
382 #if defined(OS_ANDROID)
383 decoder
.reset(new AndroidVideoDecodeAccelerator(
384 stub_
->decoder()->AsWeakPtr(),
385 make_context_current_
));
387 return decoder
.Pass();
391 gpu::VideoDecodeAcceleratorSupportedProfiles
392 GpuVideoDecodeAccelerator::GetSupportedProfiles() {
393 media::VideoDecodeAccelerator::SupportedProfiles profiles
;
394 const base::CommandLine
* cmd_line
= base::CommandLine::ForCurrentProcess();
395 if (cmd_line
->HasSwitch(switches::kDisableAcceleratedVideoDecode
))
396 return gpu::VideoDecodeAcceleratorSupportedProfiles();
398 // Query supported profiles for each VDA. The order of querying VDAs should
399 // be the same as the order of initializing VDAs. Then the returned profile
400 // can be initialized by corresponding VDA successfully.
402 profiles
= DXVAVideoDecodeAccelerator::GetSupportedProfiles();
403 #elif defined(OS_CHROMEOS)
404 media::VideoDecodeAccelerator::SupportedProfiles vda_profiles
;
405 #if defined(USE_V4L2_CODEC)
406 vda_profiles
= V4L2VideoDecodeAccelerator::GetSupportedProfiles();
407 GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles
, &profiles
);
408 vda_profiles
= V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles();
409 GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles
, &profiles
);
411 #if defined(ARCH_CPU_X86_FAMILY)
412 vda_profiles
= VaapiVideoDecodeAccelerator::GetSupportedProfiles();
413 GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles
, &profiles
);
415 #elif defined(OS_MACOSX)
416 profiles
= VTVideoDecodeAccelerator::GetSupportedProfiles();
417 #elif defined(OS_ANDROID)
418 profiles
= AndroidVideoDecodeAccelerator::GetSupportedProfiles();
420 return GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeProfiles(profiles
);
423 // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
424 // true, otherwise on the main thread.
425 void GpuVideoDecodeAccelerator::OnDecode(
426 base::SharedMemoryHandle handle
, int32 id
, uint32 size
) {
427 DCHECK(video_decode_accelerator_
.get());
429 DLOG(ERROR
) << "BitstreamBuffer id " << id
<< " out of range";
430 if (child_task_runner_
->BelongsToCurrentThread()) {
431 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
433 child_task_runner_
->PostTask(
435 base::Bind(&GpuVideoDecodeAccelerator::NotifyError
,
436 base::Unretained(this),
437 media::VideoDecodeAccelerator::INVALID_ARGUMENT
));
441 video_decode_accelerator_
->Decode(media::BitstreamBuffer(id
, handle
, size
));
444 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
445 const std::vector
<int32
>& buffer_ids
,
446 const std::vector
<uint32
>& texture_ids
) {
447 if (buffer_ids
.size() != texture_ids
.size()) {
448 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
452 gpu::gles2::GLES2Decoder
* command_decoder
= stub_
->decoder();
453 gpu::gles2::TextureManager
* texture_manager
=
454 command_decoder
->GetContextGroup()->texture_manager();
456 std::vector
<media::PictureBuffer
> buffers
;
457 std::vector
<scoped_refptr
<gpu::gles2::TextureRef
> > textures
;
458 for (uint32 i
= 0; i
< buffer_ids
.size(); ++i
) {
459 if (buffer_ids
[i
] < 0) {
460 DLOG(ERROR
) << "Buffer id " << buffer_ids
[i
] << " out of range";
461 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
464 gpu::gles2::TextureRef
* texture_ref
= texture_manager
->GetTexture(
467 DLOG(ERROR
) << "Failed to find texture id " << texture_ids
[i
];
468 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
471 gpu::gles2::Texture
* info
= texture_ref
->texture();
472 if (info
->target() != texture_target_
) {
473 DLOG(ERROR
) << "Texture target mismatch for texture id "
475 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
478 if (texture_target_
== GL_TEXTURE_EXTERNAL_OES
||
479 texture_target_
== GL_TEXTURE_RECTANGLE_ARB
) {
480 // These textures have their dimensions defined by the underlying storage.
481 // Use |texture_dimensions_| for this size.
482 texture_manager
->SetLevelInfo(
483 texture_ref
, texture_target_
, 0, GL_RGBA
, texture_dimensions_
.width(),
484 texture_dimensions_
.height(), 1, 0, GL_RGBA
, 0, gfx::Rect());
486 // For other targets, texture dimensions should already be defined.
487 GLsizei width
= 0, height
= 0;
488 info
->GetLevelSize(texture_target_
, 0, &width
, &height
, nullptr);
489 if (width
!= texture_dimensions_
.width() ||
490 height
!= texture_dimensions_
.height()) {
491 DLOG(ERROR
) << "Size mismatch for texture id " << texture_ids
[i
];
492 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
496 // TODO(dshwang): after moving to D3D11, remove this. crbug.com/438691
498 video_decode_accelerator_
.get()->GetSurfaceInternalFormat();
499 if (format
!= GL_RGBA
) {
500 texture_manager
->SetLevelInfo(texture_ref
, texture_target_
, 0, format
,
501 width
, height
, 1, 0, format
, 0,
505 buffers
.push_back(media::PictureBuffer(buffer_ids
[i
], texture_dimensions_
,
506 texture_ref
->service_id(),
508 textures
.push_back(texture_ref
);
510 video_decode_accelerator_
->AssignPictureBuffers(buffers
);
511 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
512 for (uint32 i
= 0; i
< buffer_ids
.size(); ++i
)
513 uncleared_textures_
[buffer_ids
[i
]] = textures
[i
];
516 void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
517 int32 picture_buffer_id
) {
518 DCHECK(video_decode_accelerator_
.get());
519 video_decode_accelerator_
->ReusePictureBuffer(picture_buffer_id
);
522 void GpuVideoDecodeAccelerator::OnFlush() {
523 DCHECK(video_decode_accelerator_
.get());
524 video_decode_accelerator_
->Flush();
527 void GpuVideoDecodeAccelerator::OnReset() {
528 DCHECK(video_decode_accelerator_
.get());
529 video_decode_accelerator_
->Reset();
532 void GpuVideoDecodeAccelerator::OnDestroy() {
533 DCHECK(video_decode_accelerator_
.get());
537 void GpuVideoDecodeAccelerator::OnFilterRemoved() {
538 // We're destroying; cancel all callbacks.
539 weak_factory_for_io_
.InvalidateWeakPtrs();
540 filter_removed_
.Signal();
543 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
544 int32 bitstream_buffer_id
) {
545 if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed(
546 host_route_id_
, bitstream_buffer_id
))) {
548 << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) "
553 void GpuVideoDecodeAccelerator::NotifyFlushDone() {
554 if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_
)))
555 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed";
558 void GpuVideoDecodeAccelerator::NotifyResetDone() {
559 if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_
)))
560 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed";
563 void GpuVideoDecodeAccelerator::OnWillDestroyStub() {
564 // The stub is going away, so we have to stop and destroy VDA here, before
565 // returning, because the VDA may need the GL context to run and/or do its
566 // cleanup. We cannot destroy the VDA before the IO thread message filter is
567 // removed however, since we cannot service incoming messages with VDA gone.
568 // We cannot simply check for existence of VDA on IO thread though, because
569 // we don't want to synchronize the IO thread with the ChildThread.
570 // So we have to wait for the RemoveFilter callback here instead and remove
571 // the VDA after it arrives and before returning.
573 stub_
->channel()->RemoveFilter(filter_
.get());
574 filter_removed_
.Wait();
577 stub_
->channel()->RemoveRoute(host_route_id_
);
578 stub_
->RemoveDestructionObserver(this);
580 video_decode_accelerator_
.reset();
584 void GpuVideoDecodeAccelerator::SetTextureCleared(
585 const media::Picture
& picture
) {
586 DCHECK(child_task_runner_
->BelongsToCurrentThread());
587 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
588 std::map
<int32
, scoped_refptr
<gpu::gles2::TextureRef
> >::iterator it
;
589 it
= uncleared_textures_
.find(picture
.picture_buffer_id());
590 if (it
== uncleared_textures_
.end())
591 return; // the texture has been cleared
593 scoped_refptr
<gpu::gles2::TextureRef
> texture_ref
= it
->second
;
594 GLenum target
= texture_ref
->texture()->target();
595 gpu::gles2::TextureManager
* texture_manager
=
596 stub_
->decoder()->GetContextGroup()->texture_manager();
597 DCHECK(!texture_ref
->texture()->IsLevelCleared(target
, 0));
598 texture_manager
->SetLevelCleared(texture_ref
.get(), target
, 0, true);
599 uncleared_textures_
.erase(it
);
602 bool GpuVideoDecodeAccelerator::Send(IPC::Message
* message
) {
603 if (filter_
.get() && io_task_runner_
->BelongsToCurrentThread())
604 return filter_
->SendOnIOThread(message
);
605 DCHECK(child_task_runner_
->BelongsToCurrentThread());
606 return stub_
->channel()->Send(message
);
609 void GpuVideoDecodeAccelerator::SendCreateDecoderReply(IPC::Message
* message
,
611 GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(message
, succeeded
);
615 } // namespace content