1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
10 #include "base/command_line.h"
11 #include "base/logging.h"
12 #include "base/memory/ref_counted.h"
13 #include "base/message_loop/message_loop_proxy.h"
14 #include "base/stl_util.h"
16 #include "content/common/gpu/gpu_channel.h"
17 #include "content/common/gpu/gpu_messages.h"
18 #include "content/common/gpu/media/gpu_video_accelerator_util.h"
19 #include "content/public/common/content_switches.h"
20 #include "gpu/command_buffer/common/command_buffer.h"
21 #include "ipc/ipc_message_macros.h"
22 #include "ipc/ipc_message_utils.h"
23 #include "ipc/message_filter.h"
24 #include "media/base/limits.h"
25 #include "ui/gl/gl_context.h"
26 #include "ui/gl/gl_image.h"
27 #include "ui/gl/gl_surface_egl.h"
30 #include "base/win/windows_version.h"
31 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
32 #elif defined(OS_MACOSX)
33 #include "content/common/gpu/media/vt_video_decode_accelerator.h"
34 #elif defined(OS_CHROMEOS)
35 #if defined(USE_V4L2_CODEC)
36 #include "content/common/gpu/media/v4l2_device.h"
37 #include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
38 #include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
40 #if defined(ARCH_CPU_X86_FAMILY)
41 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
42 #include "ui/gl/gl_implementation.h"
44 #elif defined(USE_OZONE)
45 #include "media/ozone/media_ozone_platform.h"
46 #elif defined(OS_ANDROID)
47 #include "content/common/gpu/media/android_video_decode_accelerator.h"
50 #include "ui/gfx/geometry/size.h"
54 static bool MakeDecoderContextCurrent(
55 const base::WeakPtr
<GpuCommandBufferStub
> stub
) {
57 DLOG(ERROR
) << "Stub is gone; won't MakeCurrent().";
61 if (!stub
->decoder()->MakeCurrent()) {
62 DLOG(ERROR
) << "Failed to MakeCurrent()";
69 // DebugAutoLock works like AutoLock but only acquires the lock when
72 typedef base::AutoLock DebugAutoLock
;
76 explicit DebugAutoLock(base::Lock
&) {}
80 class GpuVideoDecodeAccelerator::MessageFilter
: public IPC::MessageFilter
{
82 MessageFilter(GpuVideoDecodeAccelerator
* owner
, int32 host_route_id
)
83 : owner_(owner
), host_route_id_(host_route_id
) {}
85 void OnChannelError() override
{ sender_
= NULL
; }
87 void OnChannelClosing() override
{ sender_
= NULL
; }
89 void OnFilterAdded(IPC::Sender
* sender
) override
{ sender_
= sender
; }
91 void OnFilterRemoved() override
{
92 // This will delete |owner_| and |this|.
93 owner_
->OnFilterRemoved();
96 bool OnMessageReceived(const IPC::Message
& msg
) override
{
97 if (msg
.routing_id() != host_route_id_
)
100 IPC_BEGIN_MESSAGE_MAP(MessageFilter
, msg
)
101 IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode
, owner_
,
102 GpuVideoDecodeAccelerator::OnDecode
)
103 IPC_MESSAGE_UNHANDLED(return false;)
104 IPC_END_MESSAGE_MAP()
108 bool SendOnIOThread(IPC::Message
* message
) {
109 DCHECK(!message
->is_sync());
114 return sender_
->Send(message
);
118 ~MessageFilter() override
{}
121 GpuVideoDecodeAccelerator
* const owner_
;
122 const int32 host_route_id_
;
123 // The sender to which this filter was added.
124 IPC::Sender
* sender_
;
127 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
129 GpuCommandBufferStub
* stub
,
130 const scoped_refptr
<base::MessageLoopProxy
>& io_message_loop
)
131 : host_route_id_(host_route_id
),
134 filter_removed_(true, false),
135 child_message_loop_(base::MessageLoopProxy::current()),
136 io_message_loop_(io_message_loop
),
137 weak_factory_for_io_(this) {
139 stub_
->AddDestructionObserver(this);
140 make_context_current_
=
141 base::Bind(&MakeDecoderContextCurrent
, stub_
->AsWeakPtr());
144 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
145 // This class can only be self-deleted from OnWillDestroyStub(), which means
146 // the VDA has already been destroyed in there.
147 DCHECK(!video_decode_accelerator_
);
150 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message
& msg
) {
151 if (!video_decode_accelerator_
)
155 IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator
, msg
)
156 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode
, OnDecode
)
157 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers
,
158 OnAssignPictureBuffers
)
159 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer
,
160 OnReusePictureBuffer
)
161 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush
, OnFlush
)
162 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset
, OnReset
)
163 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy
, OnDestroy
)
164 IPC_MESSAGE_UNHANDLED(handled
= false)
165 IPC_END_MESSAGE_MAP()
169 void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
170 uint32 requested_num_of_buffers
,
171 const gfx::Size
& dimensions
,
172 uint32 texture_target
) {
173 if (dimensions
.width() > media::limits::kMaxDimension
||
174 dimensions
.height() > media::limits::kMaxDimension
||
175 dimensions
.GetArea() > media::limits::kMaxCanvas
) {
176 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE
);
179 if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
181 requested_num_of_buffers
,
184 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
187 texture_dimensions_
= dimensions
;
188 texture_target_
= texture_target
;
191 void GpuVideoDecodeAccelerator::DismissPictureBuffer(
192 int32 picture_buffer_id
) {
193 // Notify client that picture buffer is now unused.
194 if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer(
195 host_route_id_
, picture_buffer_id
))) {
196 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) "
199 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
200 uncleared_textures_
.erase(picture_buffer_id
);
203 void GpuVideoDecodeAccelerator::PictureReady(
204 const media::Picture
& picture
) {
205 // VDA may call PictureReady on IO thread. SetTextureCleared should run on
206 // the child thread. VDA is responsible to call PictureReady on the child
207 // thread when a picture buffer is delivered the first time.
208 if (child_message_loop_
->BelongsToCurrentThread()) {
209 SetTextureCleared(picture
);
211 DCHECK(io_message_loop_
->BelongsToCurrentThread());
212 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
213 DCHECK_EQ(0u, uncleared_textures_
.count(picture
.picture_buffer_id()));
216 if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
217 host_route_id_
, picture
.picture_buffer_id(),
218 picture
.bitstream_buffer_id(), picture
.visible_rect(),
219 picture
.allow_overlay()))) {
220 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
224 void GpuVideoDecodeAccelerator::NotifyError(
225 media::VideoDecodeAccelerator::Error error
) {
226 if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(
227 host_route_id_
, error
))) {
228 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
233 void GpuVideoDecodeAccelerator::Initialize(
234 const media::VideoCodecProfile profile
,
235 IPC::Message
* init_done_msg
) {
236 DCHECK(!video_decode_accelerator_
.get());
238 if (!stub_
->channel()->AddRoute(host_route_id_
, this)) {
239 DLOG(ERROR
) << "Initialize(): failed to add route";
240 SendCreateDecoderReply(init_done_msg
, false);
244 // Ensure we will be able to get a GL context at all before initializing
246 if (!make_context_current_
.Run()) {
247 SendCreateDecoderReply(init_done_msg
, false);
252 // Array of Create..VDA() function pointers, maybe applicable to the current
253 // platform. This list is ordered by priority of use and it should be the
254 // same as the order of querying supported profiles of VDAs.
255 const GpuVideoDecodeAccelerator::CreateVDAFp create_vda_fps
[] = {
256 &GpuVideoDecodeAccelerator::CreateDXVAVDA
,
257 &GpuVideoDecodeAccelerator::CreateV4L2VDA
,
258 &GpuVideoDecodeAccelerator::CreateV4L2SliceVDA
,
259 &GpuVideoDecodeAccelerator::CreateVaapiVDA
,
260 &GpuVideoDecodeAccelerator::CreateVTVDA
,
261 &GpuVideoDecodeAccelerator::CreateOzoneVDA
,
262 &GpuVideoDecodeAccelerator::CreateAndroidVDA
};
264 for (const auto& create_vda_function
: create_vda_fps
) {
265 video_decode_accelerator_
= (this->*create_vda_function
)();
266 if (!video_decode_accelerator_
||
267 !video_decode_accelerator_
->Initialize(profile
, this))
270 if (video_decode_accelerator_
->CanDecodeOnIOThread()) {
271 filter_
= new MessageFilter(this, host_route_id_
);
272 stub_
->channel()->AddFilter(filter_
.get());
274 SendCreateDecoderReply(init_done_msg
, true);
277 video_decode_accelerator_
.reset();
278 LOG(ERROR
) << "HW video decode not available for profile " << profile
;
279 SendCreateDecoderReply(init_done_msg
, false);
282 scoped_ptr
<media::VideoDecodeAccelerator
>
283 GpuVideoDecodeAccelerator::CreateDXVAVDA() {
284 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
286 if (base::win::GetVersion() >= base::win::VERSION_WIN7
) {
287 DVLOG(0) << "Initializing DXVA HW decoder for windows.";
288 decoder
.reset(new DXVAVideoDecodeAccelerator(make_context_current_
,
289 stub_
->decoder()->GetGLContext()));
291 NOTIMPLEMENTED() << "HW video decode acceleration not available.";
294 return decoder
.Pass();
297 scoped_ptr
<media::VideoDecodeAccelerator
>
298 GpuVideoDecodeAccelerator::CreateV4L2VDA() {
299 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
300 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
301 scoped_refptr
<V4L2Device
> device
= V4L2Device::Create(V4L2Device::kDecoder
);
303 decoder
.reset(new V4L2VideoDecodeAccelerator(
304 gfx::GLSurfaceEGL::GetHardwareDisplay(),
305 stub_
->decoder()->GetGLContext()->GetHandle(),
306 weak_factory_for_io_
.GetWeakPtr(),
307 make_context_current_
,
312 return decoder
.Pass();
315 scoped_ptr
<media::VideoDecodeAccelerator
>
316 GpuVideoDecodeAccelerator::CreateV4L2SliceVDA() {
317 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
318 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
319 scoped_refptr
<V4L2Device
> device
= V4L2Device::Create(V4L2Device::kDecoder
);
321 decoder
.reset(new V4L2SliceVideoDecodeAccelerator(
323 gfx::GLSurfaceEGL::GetHardwareDisplay(),
324 stub_
->decoder()->GetGLContext()->GetHandle(),
325 weak_factory_for_io_
.GetWeakPtr(),
326 make_context_current_
,
330 return decoder
.Pass();
333 void GpuVideoDecodeAccelerator::BindImage(uint32 client_texture_id
,
334 uint32 texture_target
,
335 scoped_refptr
<gfx::GLImage
> image
) {
336 gpu::gles2::GLES2Decoder
* command_decoder
= stub_
->decoder();
337 gpu::gles2::TextureManager
* texture_manager
=
338 command_decoder
->GetContextGroup()->texture_manager();
339 gpu::gles2::TextureRef
* ref
= texture_manager
->GetTexture(client_texture_id
);
341 texture_manager
->SetLevelImage(ref
, texture_target
, 0, image
.get());
344 scoped_ptr
<media::VideoDecodeAccelerator
>
345 GpuVideoDecodeAccelerator::CreateVaapiVDA() {
346 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
347 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
348 decoder
.reset(new VaapiVideoDecodeAccelerator(
349 make_context_current_
, base::Bind(&GpuVideoDecodeAccelerator::BindImage
,
350 base::Unretained(this))));
352 return decoder
.Pass();
355 scoped_ptr
<media::VideoDecodeAccelerator
>
356 GpuVideoDecodeAccelerator::CreateVTVDA() {
357 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
358 #if defined(OS_MACOSX)
359 decoder
.reset(new VTVideoDecodeAccelerator(
360 static_cast<CGLContextObj
>(stub_
->decoder()->GetGLContext()->GetHandle()),
361 make_context_current_
));
363 return decoder
.Pass();
366 scoped_ptr
<media::VideoDecodeAccelerator
>
367 GpuVideoDecodeAccelerator::CreateOzoneVDA() {
368 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
369 #if !defined(OS_CHROMEOS) && defined(USE_OZONE)
370 media::MediaOzonePlatform
* platform
=
371 media::MediaOzonePlatform::GetInstance();
372 decoder
.reset(platform
->CreateVideoDecodeAccelerator(make_context_current_
));
374 return decoder
.Pass();
377 scoped_ptr
<media::VideoDecodeAccelerator
>
378 GpuVideoDecodeAccelerator::CreateAndroidVDA() {
379 scoped_ptr
<media::VideoDecodeAccelerator
> decoder
;
380 #if defined(OS_ANDROID)
381 decoder
.reset(new AndroidVideoDecodeAccelerator(
382 stub_
->decoder()->AsWeakPtr(),
383 make_context_current_
));
385 return decoder
.Pass();
389 gpu::VideoDecodeAcceleratorSupportedProfiles
390 GpuVideoDecodeAccelerator::GetSupportedProfiles() {
391 media::VideoDecodeAccelerator::SupportedProfiles profiles
;
392 const base::CommandLine
* cmd_line
= base::CommandLine::ForCurrentProcess();
393 if (cmd_line
->HasSwitch(switches::kDisableAcceleratedVideoDecode
))
394 return gpu::VideoDecodeAcceleratorSupportedProfiles();
396 // Query supported profiles for each VDA. The order of querying VDAs should
397 // be the same as the order of initializing VDAs. Then the returned profile
398 // can be initialized by corresponding VDA successfully.
400 profiles
= DXVAVideoDecodeAccelerator::GetSupportedProfiles();
401 #elif defined(OS_CHROMEOS)
402 media::VideoDecodeAccelerator::SupportedProfiles vda_profiles
;
403 #if defined(USE_V4L2_CODEC)
404 vda_profiles
= V4L2VideoDecodeAccelerator::GetSupportedProfiles();
405 GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles
, &profiles
);
406 vda_profiles
= V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles();
407 GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles
, &profiles
);
409 #if defined(ARCH_CPU_X86_FAMILY)
410 vda_profiles
= VaapiVideoDecodeAccelerator::GetSupportedProfiles();
411 GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(vda_profiles
, &profiles
);
413 #elif defined(OS_MACOSX)
414 profiles
= VTVideoDecodeAccelerator::GetSupportedProfiles();
415 #elif defined(OS_ANDROID)
416 profiles
= AndroidVideoDecodeAccelerator::GetSupportedProfiles();
418 return GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeProfiles(profiles
);
421 // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
422 // true, otherwise on the main thread.
423 void GpuVideoDecodeAccelerator::OnDecode(
424 base::SharedMemoryHandle handle
, int32 id
, uint32 size
) {
425 DCHECK(video_decode_accelerator_
.get());
427 DLOG(ERROR
) << "BitstreamBuffer id " << id
<< " out of range";
428 if (child_message_loop_
->BelongsToCurrentThread()) {
429 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
431 child_message_loop_
->PostTask(
433 base::Bind(&GpuVideoDecodeAccelerator::NotifyError
,
434 base::Unretained(this),
435 media::VideoDecodeAccelerator::INVALID_ARGUMENT
));
439 video_decode_accelerator_
->Decode(media::BitstreamBuffer(id
, handle
, size
));
442 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
443 const std::vector
<int32
>& buffer_ids
,
444 const std::vector
<uint32
>& texture_ids
) {
445 if (buffer_ids
.size() != texture_ids
.size()) {
446 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
450 gpu::gles2::GLES2Decoder
* command_decoder
= stub_
->decoder();
451 gpu::gles2::TextureManager
* texture_manager
=
452 command_decoder
->GetContextGroup()->texture_manager();
454 std::vector
<media::PictureBuffer
> buffers
;
455 std::vector
<scoped_refptr
<gpu::gles2::TextureRef
> > textures
;
456 for (uint32 i
= 0; i
< buffer_ids
.size(); ++i
) {
457 if (buffer_ids
[i
] < 0) {
458 DLOG(ERROR
) << "Buffer id " << buffer_ids
[i
] << " out of range";
459 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
462 gpu::gles2::TextureRef
* texture_ref
= texture_manager
->GetTexture(
465 DLOG(ERROR
) << "Failed to find texture id " << texture_ids
[i
];
466 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
469 gpu::gles2::Texture
* info
= texture_ref
->texture();
470 if (info
->target() != texture_target_
) {
471 DLOG(ERROR
) << "Texture target mismatch for texture id "
473 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
476 if (texture_target_
== GL_TEXTURE_EXTERNAL_OES
||
477 texture_target_
== GL_TEXTURE_RECTANGLE_ARB
) {
478 // These textures have their dimensions defined by the underlying storage.
479 // Use |texture_dimensions_| for this size.
480 texture_manager
->SetLevelInfo(texture_ref
,
484 texture_dimensions_
.width(),
485 texture_dimensions_
.height(),
492 // For other targets, texture dimensions should already be defined.
493 GLsizei width
= 0, height
= 0;
494 info
->GetLevelSize(texture_target_
, 0, &width
, &height
);
495 if (width
!= texture_dimensions_
.width() ||
496 height
!= texture_dimensions_
.height()) {
497 DLOG(ERROR
) << "Size mismatch for texture id " << texture_ids
[i
];
498 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT
);
502 // TODO(dshwang): after moving to D3D11, remove this. crbug.com/438691
504 video_decode_accelerator_
.get()->GetSurfaceInternalFormat();
505 if (format
!= GL_RGBA
) {
506 texture_manager
->SetLevelInfo(texture_ref
, texture_target_
, 0, format
,
507 width
, height
, 1, 0, format
, 0, false);
510 buffers
.push_back(media::PictureBuffer(buffer_ids
[i
], texture_dimensions_
,
511 texture_ref
->service_id(),
513 textures
.push_back(texture_ref
);
515 video_decode_accelerator_
->AssignPictureBuffers(buffers
);
516 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
517 for (uint32 i
= 0; i
< buffer_ids
.size(); ++i
)
518 uncleared_textures_
[buffer_ids
[i
]] = textures
[i
];
521 void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
522 int32 picture_buffer_id
) {
523 DCHECK(video_decode_accelerator_
.get());
524 video_decode_accelerator_
->ReusePictureBuffer(picture_buffer_id
);
527 void GpuVideoDecodeAccelerator::OnFlush() {
528 DCHECK(video_decode_accelerator_
.get());
529 video_decode_accelerator_
->Flush();
532 void GpuVideoDecodeAccelerator::OnReset() {
533 DCHECK(video_decode_accelerator_
.get());
534 video_decode_accelerator_
->Reset();
537 void GpuVideoDecodeAccelerator::OnDestroy() {
538 DCHECK(video_decode_accelerator_
.get());
542 void GpuVideoDecodeAccelerator::OnFilterRemoved() {
543 // We're destroying; cancel all callbacks.
544 weak_factory_for_io_
.InvalidateWeakPtrs();
545 filter_removed_
.Signal();
548 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
549 int32 bitstream_buffer_id
) {
550 if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed(
551 host_route_id_
, bitstream_buffer_id
))) {
553 << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) "
558 void GpuVideoDecodeAccelerator::NotifyFlushDone() {
559 if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_
)))
560 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed";
563 void GpuVideoDecodeAccelerator::NotifyResetDone() {
564 if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_
)))
565 DLOG(ERROR
) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed";
568 void GpuVideoDecodeAccelerator::OnWillDestroyStub() {
569 // The stub is going away, so we have to stop and destroy VDA here, before
570 // returning, because the VDA may need the GL context to run and/or do its
571 // cleanup. We cannot destroy the VDA before the IO thread message filter is
572 // removed however, since we cannot service incoming messages with VDA gone.
573 // We cannot simply check for existence of VDA on IO thread though, because
574 // we don't want to synchronize the IO thread with the ChildThread.
575 // So we have to wait for the RemoveFilter callback here instead and remove
576 // the VDA after it arrives and before returning.
578 stub_
->channel()->RemoveFilter(filter_
.get());
579 filter_removed_
.Wait();
582 stub_
->channel()->RemoveRoute(host_route_id_
);
583 stub_
->RemoveDestructionObserver(this);
585 video_decode_accelerator_
.reset();
589 void GpuVideoDecodeAccelerator::SetTextureCleared(
590 const media::Picture
& picture
) {
591 DCHECK(child_message_loop_
->BelongsToCurrentThread());
592 DebugAutoLock
auto_lock(debug_uncleared_textures_lock_
);
593 std::map
<int32
, scoped_refptr
<gpu::gles2::TextureRef
> >::iterator it
;
594 it
= uncleared_textures_
.find(picture
.picture_buffer_id());
595 if (it
== uncleared_textures_
.end())
596 return; // the texture has been cleared
598 scoped_refptr
<gpu::gles2::TextureRef
> texture_ref
= it
->second
;
599 GLenum target
= texture_ref
->texture()->target();
600 gpu::gles2::TextureManager
* texture_manager
=
601 stub_
->decoder()->GetContextGroup()->texture_manager();
602 DCHECK(!texture_ref
->texture()->IsLevelCleared(target
, 0));
603 texture_manager
->SetLevelCleared(texture_ref
.get(), target
, 0, true);
604 uncleared_textures_
.erase(it
);
607 bool GpuVideoDecodeAccelerator::Send(IPC::Message
* message
) {
608 if (filter_
.get() && io_message_loop_
->BelongsToCurrentThread())
609 return filter_
->SendOnIOThread(message
);
610 DCHECK(child_message_loop_
->BelongsToCurrentThread());
611 return stub_
->channel()->Send(message
);
614 void GpuVideoDecodeAccelerator::SendCreateDecoderReply(IPC::Message
* message
,
616 GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(message
, succeeded
);
620 } // namespace content