Respond with QuotaExceededError when IndexedDB has no disk space on open.
[chromium-blink-merge.git] / content / common / gpu / media / dxva_video_decode_accelerator.cc
blob97c883e22ba365319cddc1c4d27ade19ced070cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
7 #if !defined(OS_WIN)
8 #error This file should only be built on Windows.
9 #endif // !defined(OS_WIN)
11 #include <ks.h>
12 #include <codecapi.h>
13 #include <mfapi.h>
14 #include <mferror.h>
15 #include <wmcodecdsp.h>
17 #include "base/bind.h"
18 #include "base/callback.h"
19 #include "base/command_line.h"
20 #include "base/debug/trace_event.h"
21 #include "base/logging.h"
22 #include "base/memory/scoped_handle.h"
23 #include "base/memory/scoped_ptr.h"
24 #include "base/memory/shared_memory.h"
25 #include "base/message_loop/message_loop.h"
26 #include "base/win/windows_version.h"
27 #include "media/video/video_decode_accelerator.h"
28 #include "ui/gl/gl_bindings.h"
29 #include "ui/gl/gl_surface_egl.h"
30 #include "ui/gl/gl_switches.h"
32 namespace content {
34 // We only request 5 picture buffers from the client which are used to hold the
35 // decoded samples. These buffers are then reused when the client tells us that
36 // it is done with the buffer.
37 static const int kNumPictureBuffers = 5;
39 bool DXVAVideoDecodeAccelerator::pre_sandbox_init_done_ = false;
40 uint32 DXVAVideoDecodeAccelerator::dev_manager_reset_token_ = 0;
41 IDirect3DDeviceManager9* DXVAVideoDecodeAccelerator::device_manager_ = NULL;
42 IDirect3DDevice9Ex* DXVAVideoDecodeAccelerator::device_ = NULL;
43 IDirect3DQuery9* DXVAVideoDecodeAccelerator::query_ = NULL;
44 IDirect3D9Ex* DXVAVideoDecodeAccelerator::d3d9_ = NULL;
46 #define RETURN_ON_FAILURE(result, log, ret) \
47 do { \
48 if (!(result)) { \
49 DLOG(ERROR) << log; \
50 return ret; \
51 } \
52 } while (0)
54 #define RETURN_ON_HR_FAILURE(result, log, ret) \
55 RETURN_ON_FAILURE(SUCCEEDED(result), \
56 log << ", HRESULT: 0x" << std::hex << result, \
57 ret);
59 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
60 do { \
61 if (!(result)) { \
62 DVLOG(1) << log; \
63 StopOnError(error_code); \
64 return ret; \
65 } \
66 } while (0)
68 #define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret) \
69 RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result), \
70 log << ", HRESULT: 0x" << std::hex << result, \
71 error_code, ret);
73 // Maximum number of iterations we allow before aborting the attempt to flush
74 // the batched queries to the driver and allow torn/corrupt frames to be
75 // rendered.
76 enum { kMaxIterationsForD3DFlush = 10 };
78 static IMFSample* CreateEmptySample() {
79 base::win::ScopedComPtr<IMFSample> sample;
80 HRESULT hr = MFCreateSample(sample.Receive());
81 RETURN_ON_HR_FAILURE(hr, "MFCreateSample failed", NULL);
82 return sample.Detach();
85 // Creates a Media Foundation sample with one buffer of length |buffer_length|
86 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0.
87 static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
88 CHECK_GT(buffer_length, 0);
90 base::win::ScopedComPtr<IMFSample> sample;
91 sample.Attach(CreateEmptySample());
93 base::win::ScopedComPtr<IMFMediaBuffer> buffer;
94 HRESULT hr = E_FAIL;
95 if (align == 0) {
96 // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer
97 // with the align argument being 0.
98 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive());
99 } else {
100 hr = MFCreateAlignedMemoryBuffer(buffer_length,
101 align - 1,
102 buffer.Receive());
104 RETURN_ON_HR_FAILURE(hr, "Failed to create memory buffer for sample", NULL);
106 hr = sample->AddBuffer(buffer);
107 RETURN_ON_HR_FAILURE(hr, "Failed to add buffer to sample", NULL);
109 return sample.Detach();
112 // Creates a Media Foundation sample with one buffer containing a copy of the
113 // given Annex B stream data.
114 // If duration and sample time are not known, provide 0.
115 // |min_size| specifies the minimum size of the buffer (might be required by
116 // the decoder for input). If no alignment is required, provide 0.
117 static IMFSample* CreateInputSample(const uint8* stream, int size,
118 int min_size, int alignment) {
119 CHECK(stream);
120 CHECK_GT(size, 0);
121 base::win::ScopedComPtr<IMFSample> sample;
122 sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size),
123 alignment));
124 RETURN_ON_FAILURE(sample, "Failed to create empty sample", NULL);
126 base::win::ScopedComPtr<IMFMediaBuffer> buffer;
127 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive());
128 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample", NULL);
130 DWORD max_length = 0;
131 DWORD current_length = 0;
132 uint8* destination = NULL;
133 hr = buffer->Lock(&destination, &max_length, &current_length);
134 RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL);
136 CHECK_EQ(current_length, 0u);
137 CHECK_GE(static_cast<int>(max_length), size);
138 memcpy(destination, stream, size);
140 hr = buffer->Unlock();
141 RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL);
143 hr = buffer->SetCurrentLength(size);
144 RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", NULL);
146 return sample.Detach();
149 static IMFSample* CreateSampleFromInputBuffer(
150 const media::BitstreamBuffer& bitstream_buffer,
151 DWORD stream_size,
152 DWORD alignment) {
153 base::SharedMemory shm(bitstream_buffer.handle(), true);
154 RETURN_ON_FAILURE(shm.Map(bitstream_buffer.size()),
155 "Failed in base::SharedMemory::Map", NULL);
157 return CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()),
158 bitstream_buffer.size(),
159 stream_size,
160 alignment);
163 // Maintains information about a DXVA picture buffer, i.e. whether it is
164 // available for rendering, the texture information, etc.
165 struct DXVAVideoDecodeAccelerator::DXVAPictureBuffer {
166 public:
167 static linked_ptr<DXVAPictureBuffer> Create(
168 const media::PictureBuffer& buffer, EGLConfig egl_config);
169 ~DXVAPictureBuffer();
171 void ReusePictureBuffer();
172 // Copies the output sample data to the picture buffer provided by the
173 // client.
174 // The dest_surface parameter contains the decoded bits.
175 bool CopyOutputSampleDataToPictureBuffer(IDirect3DSurface9* dest_surface);
177 bool available() const {
178 return available_;
181 void set_available(bool available) {
182 available_ = available;
185 int id() const {
186 return picture_buffer_.id();
189 gfx::Size size() const {
190 return picture_buffer_.size();
193 private:
194 explicit DXVAPictureBuffer(const media::PictureBuffer& buffer);
196 bool available_;
197 media::PictureBuffer picture_buffer_;
198 EGLSurface decoding_surface_;
199 base::win::ScopedComPtr<IDirect3DTexture9> decoding_texture_;
201 DISALLOW_COPY_AND_ASSIGN(DXVAPictureBuffer);
204 // static
205 linked_ptr<DXVAVideoDecodeAccelerator::DXVAPictureBuffer>
206 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::Create(
207 const media::PictureBuffer& buffer, EGLConfig egl_config) {
208 linked_ptr<DXVAPictureBuffer> picture_buffer(new DXVAPictureBuffer(buffer));
210 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
212 EGLint attrib_list[] = {
213 EGL_WIDTH, buffer.size().width(),
214 EGL_HEIGHT, buffer.size().height(),
215 EGL_TEXTURE_FORMAT, EGL_TEXTURE_RGB,
216 EGL_TEXTURE_TARGET, EGL_TEXTURE_2D,
217 EGL_NONE
220 picture_buffer->decoding_surface_ = eglCreatePbufferSurface(
221 egl_display,
222 egl_config,
223 attrib_list);
224 RETURN_ON_FAILURE(picture_buffer->decoding_surface_,
225 "Failed to create surface",
226 linked_ptr<DXVAPictureBuffer>(NULL));
228 HANDLE share_handle = NULL;
229 EGLBoolean ret = eglQuerySurfacePointerANGLE(
230 egl_display,
231 picture_buffer->decoding_surface_,
232 EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE,
233 &share_handle);
235 RETURN_ON_FAILURE(share_handle && ret == EGL_TRUE,
236 "Failed to query ANGLE surface pointer",
237 linked_ptr<DXVAPictureBuffer>(NULL));
239 HRESULT hr = DXVAVideoDecodeAccelerator::device_->CreateTexture(
240 buffer.size().width(),
241 buffer.size().height(),
243 D3DUSAGE_RENDERTARGET,
244 D3DFMT_X8R8G8B8,
245 D3DPOOL_DEFAULT,
246 picture_buffer->decoding_texture_.Receive(),
247 &share_handle);
249 RETURN_ON_HR_FAILURE(hr, "Failed to create texture",
250 linked_ptr<DXVAPictureBuffer>(NULL));
251 return picture_buffer;
254 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer(
255 const media::PictureBuffer& buffer)
256 : available_(true),
257 picture_buffer_(buffer),
258 decoding_surface_(NULL) {
261 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::~DXVAPictureBuffer() {
262 if (decoding_surface_) {
263 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
265 eglReleaseTexImage(
266 egl_display,
267 decoding_surface_,
268 EGL_BACK_BUFFER);
270 eglDestroySurface(
271 egl_display,
272 decoding_surface_);
273 decoding_surface_ = NULL;
277 void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ReusePictureBuffer() {
278 DCHECK(decoding_surface_);
279 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
280 eglReleaseTexImage(
281 egl_display,
282 decoding_surface_,
283 EGL_BACK_BUFFER);
284 set_available(true);
287 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::
288 CopyOutputSampleDataToPictureBuffer(IDirect3DSurface9* dest_surface) {
289 DCHECK(dest_surface);
291 D3DSURFACE_DESC surface_desc;
292 HRESULT hr = dest_surface->GetDesc(&surface_desc);
293 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false);
295 D3DSURFACE_DESC texture_desc;
296 decoding_texture_->GetLevelDesc(0, &texture_desc);
298 if (texture_desc.Width != surface_desc.Width ||
299 texture_desc.Height != surface_desc.Height) {
300 NOTREACHED() << "Decode surface of different dimension than texture";
301 return false;
304 hr = d3d9_->CheckDeviceFormatConversion(D3DADAPTER_DEFAULT,
305 D3DDEVTYPE_HAL,
306 surface_desc.Format,
307 D3DFMT_X8R8G8B8);
308 bool device_supports_format_conversion = (hr == S_OK);
310 RETURN_ON_FAILURE(device_supports_format_conversion,
311 "Device does not support format converision",
312 false);
314 // This function currently executes in the context of IPC handlers in the
315 // GPU process which ensures that there is always an OpenGL context.
316 GLint current_texture = 0;
317 glGetIntegerv(GL_TEXTURE_BINDING_2D, &current_texture);
319 glBindTexture(GL_TEXTURE_2D, picture_buffer_.texture_id());
321 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
323 base::win::ScopedComPtr<IDirect3DSurface9> d3d_surface;
324 hr = decoding_texture_->GetSurfaceLevel(0, d3d_surface.Receive());
325 RETURN_ON_HR_FAILURE(hr, "Failed to get surface from texture", false);
327 hr = device_->StretchRect(dest_surface,
328 NULL,
329 d3d_surface,
330 NULL,
331 D3DTEXF_NONE);
332 RETURN_ON_HR_FAILURE(hr, "Colorspace conversion via StretchRect failed",
333 false);
335 // Ideally, this should be done immediately before the draw call that uses
336 // the texture. Flush it once here though.
337 hr = query_->Issue(D3DISSUE_END);
338 RETURN_ON_HR_FAILURE(hr, "Failed to issue END", false);
340 // The DXVA decoder has its own device which it uses for decoding. ANGLE
341 // has its own device which we don't have access to.
342 // The above code attempts to copy the decoded picture into a surface
343 // which is owned by ANGLE. As there are multiple devices involved in
344 // this, the StretchRect call above is not synchronous.
345 // We attempt to flush the batched operations to ensure that the picture is
346 // copied to the surface owned by ANGLE.
347 // We need to do this in a loop and call flush multiple times.
348 // We have seen the GetData call for flushing the command buffer fail to
349 // return success occassionally on multi core machines, leading to an
350 // infinite loop.
351 // Workaround is to have an upper limit of 10 on the number of iterations to
352 // wait for the Flush to finish.
353 int iterations = 0;
354 while ((query_->GetData(NULL, 0, D3DGETDATA_FLUSH) == S_FALSE) &&
355 ++iterations < kMaxIterationsForD3DFlush) {
356 Sleep(1); // Poor-man's Yield().
358 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
359 eglBindTexImage(
360 egl_display,
361 decoding_surface_,
362 EGL_BACK_BUFFER);
363 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
364 glBindTexture(GL_TEXTURE_2D, current_texture);
365 return true;
368 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo(
369 int32 buffer_id, IMFSample* sample)
370 : input_buffer_id(buffer_id) {
371 output_sample.Attach(sample);
374 DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {}
376 // static
377 void DXVAVideoDecodeAccelerator::PreSandboxInitialization() {
378 // Should be called only once during program startup.
379 DCHECK(!pre_sandbox_init_done_);
381 static const wchar_t* kDecodingDlls[] = {
382 L"d3d9.dll",
383 L"dxva2.dll",
384 L"mf.dll",
385 L"mfplat.dll",
386 L"msmpeg2vdec.dll",
389 for (int i = 0; i < arraysize(kDecodingDlls); ++i) {
390 if (!::LoadLibrary(kDecodingDlls[i])) {
391 DLOG(ERROR) << "Failed to load decoder dll: " << kDecodingDlls[i]
392 << ", Error: " << ::GetLastError();
393 return;
397 RETURN_ON_FAILURE(CreateD3DDevManager(),
398 "Failed to initialize D3D device and manager",);
400 if (base::win::GetVersion() == base::win::VERSION_WIN8) {
401 // On Windows 8+ mf.dll forwards to mfcore.dll. It does not exist in
402 // Windows 7. Loading mfcore.dll fails on Windows 8.1 in the
403 // sandbox.
404 if (!LoadLibrary(L"mfcore.dll")) {
405 DLOG(ERROR) << "Failed to load mfcore.dll, Error: " << ::GetLastError();
406 return;
408 // MFStartup needs to be called once outside the sandbox. It fails on
409 // Windows 8.1 with E_NOTIMPL if it is called the first time in the
410 // sandbox.
411 RETURN_ON_HR_FAILURE(MFStartup(MF_VERSION, MFSTARTUP_FULL),
412 "MFStartup failed.",);
415 pre_sandbox_init_done_ = true;
418 // static
419 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() {
420 HRESULT hr = Direct3DCreate9Ex(D3D_SDK_VERSION, &d3d9_);
421 RETURN_ON_HR_FAILURE(hr, "Direct3DCreate9Ex failed", false);
423 D3DPRESENT_PARAMETERS present_params = {0};
424 present_params.BackBufferWidth = 1;
425 present_params.BackBufferHeight = 1;
426 present_params.BackBufferFormat = D3DFMT_UNKNOWN;
427 present_params.BackBufferCount = 1;
428 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
429 present_params.hDeviceWindow = ::GetShellWindow();
430 present_params.Windowed = TRUE;
431 present_params.Flags = D3DPRESENTFLAG_VIDEO;
432 present_params.FullScreen_RefreshRateInHz = 0;
433 present_params.PresentationInterval = 0;
435 hr = d3d9_->CreateDeviceEx(D3DADAPTER_DEFAULT,
436 D3DDEVTYPE_HAL,
437 ::GetShellWindow(),
438 D3DCREATE_FPU_PRESERVE |
439 D3DCREATE_SOFTWARE_VERTEXPROCESSING |
440 D3DCREATE_DISABLE_PSGP_THREADING |
441 D3DCREATE_MULTITHREADED,
442 &present_params,
443 NULL,
444 &device_);
445 RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device", false);
447 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_,
448 &device_manager_);
449 RETURN_ON_HR_FAILURE(hr, "DXVA2CreateDirect3DDeviceManager9 failed", false);
451 hr = device_manager_->ResetDevice(device_, dev_manager_reset_token_);
452 RETURN_ON_HR_FAILURE(hr, "Failed to reset device", false);
454 hr = device_->CreateQuery(D3DQUERYTYPE_EVENT, &query_);
455 RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device query", false);
456 // Ensure query_ API works (to avoid an infinite loop later in
457 // CopyOutputSampleDataToPictureBuffer).
458 hr = query_->Issue(D3DISSUE_END);
459 RETURN_ON_HR_FAILURE(hr, "Failed to issue END test query", false);
460 return true;
463 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
464 media::VideoDecodeAccelerator::Client* client,
465 const base::Callback<bool(void)>& make_context_current)
466 : client_(client),
467 egl_config_(NULL),
468 state_(kUninitialized),
469 pictures_requested_(false),
470 inputs_before_decode_(0),
471 make_context_current_(make_context_current) {
472 memset(&input_stream_info_, 0, sizeof(input_stream_info_));
473 memset(&output_stream_info_, 0, sizeof(output_stream_info_));
476 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() {
477 client_ = NULL;
480 bool DXVAVideoDecodeAccelerator::Initialize(media::VideoCodecProfile profile) {
481 DCHECK(CalledOnValidThread());
483 // TODO(ananta)
484 // H264PROFILE_HIGH video decoding is janky at times. Needs more
485 // investigation.
486 if (profile != media::H264PROFILE_BASELINE &&
487 profile != media::H264PROFILE_MAIN &&
488 profile != media::H264PROFILE_HIGH) {
489 RETURN_AND_NOTIFY_ON_FAILURE(false,
490 "Unsupported h264 profile", PLATFORM_FAILURE, false);
493 RETURN_AND_NOTIFY_ON_FAILURE(pre_sandbox_init_done_,
494 "PreSandbox initialization not completed", PLATFORM_FAILURE, false);
496 RETURN_AND_NOTIFY_ON_FAILURE(
497 gfx::g_driver_egl.ext.b_EGL_ANGLE_surface_d3d_texture_2d_share_handle,
498 "EGL_ANGLE_surface_d3d_texture_2d_share_handle unavailable",
499 PLATFORM_FAILURE,
500 false);
502 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kUninitialized),
503 "Initialize: invalid state: " << state_, ILLEGAL_STATE, false);
505 HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
506 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "MFStartup failed.", PLATFORM_FAILURE,
507 false);
509 RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(),
510 "Failed to initialize decoder", PLATFORM_FAILURE, false);
512 RETURN_AND_NOTIFY_ON_FAILURE(GetStreamsInfoAndBufferReqs(),
513 "Failed to get input/output stream info.", PLATFORM_FAILURE, false);
515 RETURN_AND_NOTIFY_ON_FAILURE(
516 SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0),
517 "Send MFT_MESSAGE_NOTIFY_BEGIN_STREAMING notification failed",
518 PLATFORM_FAILURE, false);
520 RETURN_AND_NOTIFY_ON_FAILURE(
521 SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0),
522 "Send MFT_MESSAGE_NOTIFY_START_OF_STREAM notification failed",
523 PLATFORM_FAILURE, false);
525 state_ = kNormal;
526 base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
527 &DXVAVideoDecodeAccelerator::NotifyInitializeDone,
528 base::AsWeakPtr(this)));
529 return true;
532 void DXVAVideoDecodeAccelerator::Decode(
533 const media::BitstreamBuffer& bitstream_buffer) {
534 DCHECK(CalledOnValidThread());
536 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped ||
537 state_ == kFlushing),
538 "Invalid state: " << state_, ILLEGAL_STATE,);
540 base::win::ScopedComPtr<IMFSample> sample;
541 sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer,
542 input_stream_info_.cbSize,
543 input_stream_info_.cbAlignment));
544 RETURN_AND_NOTIFY_ON_FAILURE(sample, "Failed to create input sample",
545 PLATFORM_FAILURE,);
547 RETURN_AND_NOTIFY_ON_HR_FAILURE(sample->SetSampleTime(bitstream_buffer.id()),
548 "Failed to associate input buffer id with sample", PLATFORM_FAILURE,);
550 DecodeInternal(sample);
553 void DXVAVideoDecodeAccelerator::AssignPictureBuffers(
554 const std::vector<media::PictureBuffer>& buffers) {
555 DCHECK(CalledOnValidThread());
557 RETURN_AND_NOTIFY_ON_FAILURE((state_ != kUninitialized),
558 "Invalid state: " << state_, ILLEGAL_STATE,);
559 RETURN_AND_NOTIFY_ON_FAILURE((kNumPictureBuffers == buffers.size()),
560 "Failed to provide requested picture buffers. (Got " << buffers.size() <<
561 ", requested " << kNumPictureBuffers << ")", INVALID_ARGUMENT,);
563 // Copy the picture buffers provided by the client to the available list,
564 // and mark these buffers as available for use.
565 for (size_t buffer_index = 0; buffer_index < buffers.size();
566 ++buffer_index) {
567 linked_ptr<DXVAPictureBuffer> picture_buffer =
568 DXVAPictureBuffer::Create(buffers[buffer_index], egl_config_);
569 RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer.get(),
570 "Failed to allocate picture buffer", PLATFORM_FAILURE,);
572 bool inserted = output_picture_buffers_.insert(std::make_pair(
573 buffers[buffer_index].id(), picture_buffer)).second;
574 DCHECK(inserted);
576 ProcessPendingSamples();
577 if (state_ == kFlushing && pending_output_samples_.empty())
578 FlushInternal();
581 void DXVAVideoDecodeAccelerator::ReusePictureBuffer(
582 int32 picture_buffer_id) {
583 DCHECK(CalledOnValidThread());
585 RETURN_AND_NOTIFY_ON_FAILURE((state_ != kUninitialized),
586 "Invalid state: " << state_, ILLEGAL_STATE,);
588 if (output_picture_buffers_.empty())
589 return;
591 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id);
592 RETURN_AND_NOTIFY_ON_FAILURE(it != output_picture_buffers_.end(),
593 "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,);
595 it->second->ReusePictureBuffer();
596 ProcessPendingSamples();
598 if (state_ == kFlushing && pending_output_samples_.empty())
599 FlushInternal();
602 void DXVAVideoDecodeAccelerator::Flush() {
603 DCHECK(CalledOnValidThread());
605 DVLOG(1) << "DXVAVideoDecodeAccelerator::Flush";
607 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),
608 "Unexpected decoder state: " << state_, ILLEGAL_STATE,);
610 state_ = kFlushing;
612 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0),
613 "Failed to send drain message", PLATFORM_FAILURE,);
615 if (!pending_output_samples_.empty())
616 return;
618 FlushInternal();
621 void DXVAVideoDecodeAccelerator::Reset() {
622 DCHECK(CalledOnValidThread());
624 DVLOG(1) << "DXVAVideoDecodeAccelerator::Reset";
626 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),
627 "Reset: invalid state: " << state_, ILLEGAL_STATE,);
629 state_ = kResetting;
631 pending_output_samples_.clear();
633 NotifyInputBuffersDropped();
635 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0),
636 "Reset: Failed to send message.", PLATFORM_FAILURE,);
638 base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
639 &DXVAVideoDecodeAccelerator::NotifyResetDone, base::AsWeakPtr(this)));
641 state_ = DXVAVideoDecodeAccelerator::kNormal;
644 void DXVAVideoDecodeAccelerator::Destroy() {
645 DCHECK(CalledOnValidThread());
646 Invalidate();
647 delete this;
650 bool DXVAVideoDecodeAccelerator::InitDecoder() {
651 // We cannot use CoCreateInstance to instantiate the decoder object as that
652 // fails in the sandbox. We mimic the steps CoCreateInstance uses to
653 // instantiate the object.
654 HMODULE decoder_dll = ::GetModuleHandle(L"msmpeg2vdec.dll");
655 RETURN_ON_FAILURE(decoder_dll,
656 "msmpeg2vdec.dll required for decoding is not loaded",
657 false);
659 typedef HRESULT (WINAPI* GetClassObject)(const CLSID& clsid,
660 const IID& iid,
661 void** object);
663 GetClassObject get_class_object = reinterpret_cast<GetClassObject>(
664 GetProcAddress(decoder_dll, "DllGetClassObject"));
665 RETURN_ON_FAILURE(get_class_object,
666 "Failed to get DllGetClassObject pointer", false);
668 base::win::ScopedComPtr<IClassFactory> factory;
669 HRESULT hr = get_class_object(__uuidof(CMSH264DecoderMFT),
670 __uuidof(IClassFactory),
671 reinterpret_cast<void**>(factory.Receive()));
672 RETURN_ON_HR_FAILURE(hr, "DllGetClassObject for decoder failed", false);
674 hr = factory->CreateInstance(NULL, __uuidof(IMFTransform),
675 reinterpret_cast<void**>(decoder_.Receive()));
676 RETURN_ON_HR_FAILURE(hr, "Failed to create decoder instance", false);
678 RETURN_ON_FAILURE(CheckDecoderDxvaSupport(),
679 "Failed to check decoder DXVA support", false);
681 hr = decoder_->ProcessMessage(
682 MFT_MESSAGE_SET_D3D_MANAGER,
683 reinterpret_cast<ULONG_PTR>(device_manager_));
684 RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D manager to decoder", false);
686 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
688 EGLint config_attribs[] = {
689 EGL_BUFFER_SIZE, 32,
690 EGL_RED_SIZE, 8,
691 EGL_GREEN_SIZE, 8,
692 EGL_BLUE_SIZE, 8,
693 EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
694 EGL_ALPHA_SIZE, 0,
695 EGL_NONE
698 EGLint num_configs;
700 if (!eglChooseConfig(
701 egl_display,
702 config_attribs,
703 &egl_config_,
705 &num_configs))
706 return false;
708 return SetDecoderMediaTypes();
711 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() {
712 base::win::ScopedComPtr<IMFAttributes> attributes;
713 HRESULT hr = decoder_->GetAttributes(attributes.Receive());
714 RETURN_ON_HR_FAILURE(hr, "Failed to get decoder attributes", false);
716 UINT32 dxva = 0;
717 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva);
718 RETURN_ON_HR_FAILURE(hr, "Failed to check if decoder supports DXVA", false);
720 hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE);
721 RETURN_ON_HR_FAILURE(hr, "Failed to enable DXVA H/W decoding", false);
722 return true;
725 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() {
726 RETURN_ON_FAILURE(SetDecoderInputMediaType(),
727 "Failed to set decoder input media type", false);
728 return SetDecoderOutputMediaType(MFVideoFormat_NV12);
731 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() {
732 base::win::ScopedComPtr<IMFMediaType> media_type;
733 HRESULT hr = MFCreateMediaType(media_type.Receive());
734 RETURN_ON_HR_FAILURE(hr, "MFCreateMediaType failed", false);
736 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
737 RETURN_ON_HR_FAILURE(hr, "Failed to set major input type", false);
739 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
740 RETURN_ON_HR_FAILURE(hr, "Failed to set subtype", false);
742 // Not sure about this. msdn recommends setting this value on the input
743 // media type.
744 hr = media_type->SetUINT32(MF_MT_INTERLACE_MODE,
745 MFVideoInterlace_MixedInterlaceOrProgressive);
746 RETURN_ON_HR_FAILURE(hr, "Failed to set interlace mode", false);
748 hr = decoder_->SetInputType(0, media_type, 0); // No flags
749 RETURN_ON_HR_FAILURE(hr, "Failed to set decoder input type", false);
750 return true;
753 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType(
754 const GUID& subtype) {
755 base::win::ScopedComPtr<IMFMediaType> out_media_type;
757 for (uint32 i = 0;
758 SUCCEEDED(decoder_->GetOutputAvailableType(0, i,
759 out_media_type.Receive()));
760 ++i) {
761 GUID out_subtype = {0};
762 HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
763 RETURN_ON_HR_FAILURE(hr, "Failed to get output major type", false);
765 if (out_subtype == subtype) {
766 hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags
767 RETURN_ON_HR_FAILURE(hr, "Failed to set decoder output type", false);
768 return true;
770 out_media_type.Release();
772 return false;
775 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg,
776 int32 param) {
777 HRESULT hr = decoder_->ProcessMessage(msg, param);
778 return SUCCEEDED(hr);
781 // Gets the minimum buffer sizes for input and output samples. The MFT will not
782 // allocate buffer for input nor output, so we have to do it ourselves and make
783 // sure they're the correct size. We only provide decoding if DXVA is enabled.
784 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() {
785 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_);
786 RETURN_ON_HR_FAILURE(hr, "Failed to get input stream info", false);
788 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_);
789 RETURN_ON_HR_FAILURE(hr, "Failed to get decoder output stream info", false);
791 DVLOG(1) << "Input stream info: ";
792 DVLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency;
793 // There should be three flags, one for requiring a whole frame be in a
794 // single sample, one for requiring there be one buffer only in a single
795 // sample, and one that specifies a fixed sample size. (as in cbSize)
796 CHECK_EQ(input_stream_info_.dwFlags, 0x7u);
798 DVLOG(1) << "Min buffer size: " << input_stream_info_.cbSize;
799 DVLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead;
800 DVLOG(1) << "Alignment: " << input_stream_info_.cbAlignment;
802 DVLOG(1) << "Output stream info: ";
803 // The flags here should be the same and mean the same thing, except when
804 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will
805 // allocate its own sample.
806 DVLOG(1) << "Flags: "
807 << std::hex << std::showbase << output_stream_info_.dwFlags;
808 CHECK_EQ(output_stream_info_.dwFlags, 0x107u);
809 DVLOG(1) << "Min buffer size: " << output_stream_info_.cbSize;
810 DVLOG(1) << "Alignment: " << output_stream_info_.cbAlignment;
811 return true;
814 void DXVAVideoDecodeAccelerator::DoDecode() {
815 // This function is also called from FlushInternal in a loop which could
816 // result in the state transitioning to kStopped due to no decoded output.
817 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kFlushing ||
818 state_ == kStopped),
819 "DoDecode: not in normal/flushing/stopped state", ILLEGAL_STATE,);
821 MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0};
822 DWORD status = 0;
824 HRESULT hr = decoder_->ProcessOutput(0, // No flags
825 1, // # of out streams to pull from
826 &output_data_buffer,
827 &status);
828 IMFCollection* events = output_data_buffer.pEvents;
829 if (events != NULL) {
830 VLOG(1) << "Got events from ProcessOuput, but discarding";
831 events->Release();
833 if (FAILED(hr)) {
834 // A stream change needs further ProcessInput calls to get back decoder
835 // output which is why we need to set the state to stopped.
836 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
837 if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) {
838 // Decoder didn't let us set NV12 output format. Not sure as to why
839 // this can happen. Give up in disgust.
840 NOTREACHED() << "Failed to set decoder output media type to NV12";
841 state_ = kStopped;
842 } else {
843 DVLOG(1) << "Received output format change from the decoder."
844 " Recursively invoking DoDecode";
845 DoDecode();
847 return;
848 } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
849 // No more output from the decoder. Stop playback.
850 state_ = kStopped;
851 return;
852 } else {
853 NOTREACHED() << "Unhandled error in DoDecode()";
854 return;
857 TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.Decoding", this, "");
859 TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode",
860 inputs_before_decode_);
862 inputs_before_decode_ = 0;
864 RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_data_buffer.pSample),
865 "Failed to process output sample.", PLATFORM_FAILURE,);
868 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) {
869 RETURN_ON_FAILURE(sample, "Decode succeeded with NULL output sample", false);
871 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
872 HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive());
873 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from output sample", false);
875 base::win::ScopedComPtr<IDirect3DSurface9> surface;
876 hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
877 IID_PPV_ARGS(surface.Receive()));
878 RETURN_ON_HR_FAILURE(hr, "Failed to get D3D surface from output sample",
879 false);
881 LONGLONG input_buffer_id = 0;
882 RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id),
883 "Failed to get input buffer id associated with sample",
884 false);
886 pending_output_samples_.push_back(
887 PendingSampleInfo(input_buffer_id, sample));
889 // If we have available picture buffers to copy the output data then use the
890 // first one and then flag it as not being available for use.
891 if (output_picture_buffers_.size()) {
892 ProcessPendingSamples();
893 return true;
895 if (pictures_requested_) {
896 DVLOG(1) << "Waiting for picture slots from the client.";
897 return true;
900 // We only read the surface description, which contains its width/height when
901 // we need the picture buffers from the client. Once we have those, then they
902 // are reused.
903 D3DSURFACE_DESC surface_desc;
904 hr = surface->GetDesc(&surface_desc);
905 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false);
907 // Go ahead and request picture buffers.
908 base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
909 &DXVAVideoDecodeAccelerator::RequestPictureBuffers,
910 base::AsWeakPtr(this), surface_desc.Width, surface_desc.Height));
912 pictures_requested_ = true;
913 return true;
916 void DXVAVideoDecodeAccelerator::ProcessPendingSamples() {
917 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_.Run(),
918 "Failed to make context current", PLATFORM_FAILURE,);
920 OutputBuffers::iterator index;
922 for (index = output_picture_buffers_.begin();
923 index != output_picture_buffers_.end() &&
924 !pending_output_samples_.empty();
925 ++index) {
926 if (index->second->available()) {
927 PendingSampleInfo sample_info = pending_output_samples_.front();
929 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
930 HRESULT hr = sample_info.output_sample->GetBufferByIndex(
931 0, output_buffer.Receive());
932 RETURN_AND_NOTIFY_ON_HR_FAILURE(
933 hr, "Failed to get buffer from output sample", PLATFORM_FAILURE,);
935 base::win::ScopedComPtr<IDirect3DSurface9> surface;
936 hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
937 IID_PPV_ARGS(surface.Receive()));
938 RETURN_AND_NOTIFY_ON_HR_FAILURE(
939 hr, "Failed to get D3D surface from output sample",
940 PLATFORM_FAILURE,);
942 D3DSURFACE_DESC surface_desc;
943 hr = surface->GetDesc(&surface_desc);
944 RETURN_AND_NOTIFY_ON_HR_FAILURE(
945 hr, "Failed to get surface description", PLATFORM_FAILURE,);
947 if (surface_desc.Width !=
948 static_cast<uint32>(index->second->size().width()) ||
949 surface_desc.Height !=
950 static_cast<uint32>(index->second->size().height())) {
951 HandleResolutionChanged(surface_desc.Width, surface_desc.Height);
952 return;
955 RETURN_AND_NOTIFY_ON_FAILURE(
956 index->second->CopyOutputSampleDataToPictureBuffer(
957 surface),
958 "Failed to copy output sample", PLATFORM_FAILURE,);
960 media::Picture output_picture(index->second->id(),
961 sample_info.input_buffer_id);
962 base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
963 &DXVAVideoDecodeAccelerator::NotifyPictureReady,
964 base::AsWeakPtr(this), output_picture));
966 index->second->set_available(false);
967 pending_output_samples_.pop_front();
971 if (!pending_input_buffers_.empty() && pending_output_samples_.empty()) {
972 base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
973 &DXVAVideoDecodeAccelerator::DecodePendingInputBuffers,
974 base::AsWeakPtr(this)));
978 void DXVAVideoDecodeAccelerator::StopOnError(
979 media::VideoDecodeAccelerator::Error error) {
980 DCHECK(CalledOnValidThread());
982 if (client_)
983 client_->NotifyError(error);
984 client_ = NULL;
986 if (state_ != kUninitialized) {
987 Invalidate();
991 void DXVAVideoDecodeAccelerator::Invalidate() {
992 if (state_ == kUninitialized)
993 return;
994 output_picture_buffers_.clear();
995 pending_output_samples_.clear();
996 pending_input_buffers_.clear();
997 decoder_.Release();
998 MFShutdown();
999 state_ = kUninitialized;
1002 void DXVAVideoDecodeAccelerator::NotifyInitializeDone() {
1003 if (client_)
1004 client_->NotifyInitializeDone();
1007 void DXVAVideoDecodeAccelerator::NotifyInputBufferRead(int input_buffer_id) {
1008 if (client_)
1009 client_->NotifyEndOfBitstreamBuffer(input_buffer_id);
1012 void DXVAVideoDecodeAccelerator::NotifyFlushDone() {
1013 if (client_)
1014 client_->NotifyFlushDone();
1017 void DXVAVideoDecodeAccelerator::NotifyResetDone() {
1018 if (client_)
1019 client_->NotifyResetDone();
1022 void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) {
1023 // This task could execute after the decoder has been torn down.
1024 if (state_ != kUninitialized && client_) {
1025 client_->ProvidePictureBuffers(
1026 kNumPictureBuffers,
1027 gfx::Size(width, height),
1028 GL_TEXTURE_2D);
1032 void DXVAVideoDecodeAccelerator::NotifyPictureReady(
1033 const media::Picture& picture) {
1034 // This task could execute after the decoder has been torn down.
1035 if (state_ != kUninitialized && client_)
1036 client_->PictureReady(picture);
1039 void DXVAVideoDecodeAccelerator::NotifyInputBuffersDropped() {
1040 if (!client_ || !pending_output_samples_.empty())
1041 return;
1043 for (PendingInputs::iterator it = pending_input_buffers_.begin();
1044 it != pending_input_buffers_.end(); ++it) {
1045 LONGLONG input_buffer_id = 0;
1046 RETURN_ON_HR_FAILURE((*it)->GetSampleTime(&input_buffer_id),
1047 "Failed to get buffer id associated with sample",);
1048 client_->NotifyEndOfBitstreamBuffer(input_buffer_id);
1050 pending_input_buffers_.clear();
1053 void DXVAVideoDecodeAccelerator::DecodePendingInputBuffers() {
1054 RETURN_AND_NOTIFY_ON_FAILURE((state_ != kUninitialized),
1055 "Invalid state: " << state_, ILLEGAL_STATE,);
1057 if (pending_input_buffers_.empty() || !pending_output_samples_.empty())
1058 return;
1060 PendingInputs pending_input_buffers_copy;
1061 std::swap(pending_input_buffers_, pending_input_buffers_copy);
1063 for (PendingInputs::iterator it = pending_input_buffers_copy.begin();
1064 it != pending_input_buffers_copy.end(); ++it) {
1065 DecodeInternal(*it);
1069 void DXVAVideoDecodeAccelerator::FlushInternal() {
1070 // The DoDecode function sets the state to kStopped when the decoder returns
1071 // MF_E_TRANSFORM_NEED_MORE_INPUT.
1072 // The MFT decoder can buffer upto 30 frames worth of input before returning
1073 // an output frame. This loop here attempts to retrieve as many output frames
1074 // as possible from the buffered set.
1075 while (state_ != kStopped) {
1076 DoDecode();
1077 if (!pending_output_samples_.empty())
1078 return;
1081 base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
1082 &DXVAVideoDecodeAccelerator::NotifyFlushDone, base::AsWeakPtr(this)));
1084 state_ = kNormal;
1087 void DXVAVideoDecodeAccelerator::DecodeInternal(
1088 const base::win::ScopedComPtr<IMFSample>& sample) {
1089 DCHECK(CalledOnValidThread());
1091 if (state_ == kUninitialized)
1092 return;
1094 if (!pending_output_samples_.empty() || !pending_input_buffers_.empty()) {
1095 pending_input_buffers_.push_back(sample);
1096 return;
1099 if (!inputs_before_decode_) {
1100 TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.Decoding", this, "");
1102 inputs_before_decode_++;
1104 HRESULT hr = decoder_->ProcessInput(0, sample, 0);
1105 // As per msdn if the decoder returns MF_E_NOTACCEPTING then it means that it
1106 // has enough data to produce one or more output samples. In this case the
1107 // recommended options are to
1108 // 1. Generate new output by calling IMFTransform::ProcessOutput until it
1109 // returns MF_E_TRANSFORM_NEED_MORE_INPUT.
1110 // 2. Flush the input data
1111 // We implement the first option, i.e to retrieve the output sample and then
1112 // process the input again. Failure in either of these steps is treated as a
1113 // decoder failure.
1114 if (hr == MF_E_NOTACCEPTING) {
1115 DoDecode();
1116 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kStopped || state_ == kNormal),
1117 "Failed to process output. Unexpected decoder state: " << state_,
1118 PLATFORM_FAILURE,);
1119 hr = decoder_->ProcessInput(0, sample, 0);
1120 // If we continue to get the MF_E_NOTACCEPTING error we do the following:-
1121 // 1. Add the input sample to the pending queue.
1122 // 2. If we don't have any output samples we post the
1123 // DecodePendingInputBuffers task to process the pending input samples.
1124 // If we have an output sample then the above task is posted when the
1125 // output samples are sent to the client.
1126 // This is because we only support 1 pending output sample at any
1127 // given time due to the limitation with the Microsoft media foundation
1128 // decoder where it recycles the output Decoder surfaces.
1129 if (hr == MF_E_NOTACCEPTING) {
1130 pending_input_buffers_.push_back(sample);
1131 if (pending_output_samples_.empty()) {
1132 base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
1133 &DXVAVideoDecodeAccelerator::DecodePendingInputBuffers,
1134 base::AsWeakPtr(this)));
1136 return;
1139 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to process input sample",
1140 PLATFORM_FAILURE,);
1142 DoDecode();
1144 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kStopped || state_ == kNormal),
1145 "Failed to process output. Unexpected decoder state: " << state_,
1146 ILLEGAL_STATE,);
1148 LONGLONG input_buffer_id = 0;
1149 RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id),
1150 "Failed to get input buffer id associated with sample",);
1151 // The Microsoft Media foundation decoder internally buffers up to 30 frames
1152 // before returning a decoded frame. We need to inform the client that this
1153 // input buffer is processed as it may stop sending us further input.
1154 // Note: This may break clients which expect every input buffer to be
1155 // associated with a decoded output buffer.
1156 // TODO(ananta)
1157 // Do some more investigation into whether it is possible to get the MFT
1158 // decoder to emit an output packet for every input packet.
1159 // http://code.google.com/p/chromium/issues/detail?id=108121
1160 // http://code.google.com/p/chromium/issues/detail?id=150925
1161 base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
1162 &DXVAVideoDecodeAccelerator::NotifyInputBufferRead,
1163 base::AsWeakPtr(this), input_buffer_id));
1166 void DXVAVideoDecodeAccelerator::HandleResolutionChanged(int width,
1167 int height) {
1168 base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
1169 &DXVAVideoDecodeAccelerator::DismissStaleBuffers,
1170 base::AsWeakPtr(this), output_picture_buffers_));
1172 base::MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
1173 &DXVAVideoDecodeAccelerator::RequestPictureBuffers,
1174 base::AsWeakPtr(this), width, height));
1176 output_picture_buffers_.clear();
1179 void DXVAVideoDecodeAccelerator::DismissStaleBuffers(
1180 const OutputBuffers& picture_buffers) {
1181 OutputBuffers::const_iterator index;
1183 for (index = picture_buffers.begin();
1184 index != picture_buffers.end();
1185 ++index) {
1186 DVLOG(1) << "Dismissing picture id: " << index->second->id();
1187 client_->DismissPictureBuffer(index->second->id());
1191 } // namespace content