Supervised user whitelists: Cleanup
[chromium-blink-merge.git] / content / common / gpu / media / vaapi_video_decode_accelerator.cc
blob060c87d93926b4f6f132495dbb06509dd4248876
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
7 #include "base/bind.h"
8 #include "base/logging.h"
9 #include "base/metrics/histogram.h"
10 #include "base/stl_util.h"
11 #include "base/strings/string_util.h"
12 #include "base/synchronization/waitable_event.h"
13 #include "base/trace_event/trace_event.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/media/accelerated_video_decoder.h"
16 #include "content/common/gpu/media/h264_decoder.h"
17 #include "content/common/gpu/media/vaapi_picture.h"
18 #include "media/base/bind_to_current_loop.h"
19 #include "media/video/picture.h"
20 #include "ui/gl/gl_bindings.h"
21 #include "ui/gl/gl_image.h"
23 namespace content {
25 namespace {
26 // UMA errors that the VaapiVideoDecodeAccelerator class reports.
27 enum VAVDADecoderFailure {
28 VAAPI_ERROR = 0,
29 VAVDA_DECODER_FAILURES_MAX,
33 static void ReportToUMA(VAVDADecoderFailure failure) {
34 UMA_HISTOGRAM_ENUMERATION("Media.VAVDA.DecoderFailure", failure,
35 VAVDA_DECODER_FAILURES_MAX);
38 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
39 do { \
40 if (!(result)) { \
41 LOG(ERROR) << log; \
42 NotifyError(error_code); \
43 return ret; \
44 } \
45 } while (0)
47 class VaapiVideoDecodeAccelerator::VaapiDecodeSurface
48 : public base::RefCountedThreadSafe<VaapiDecodeSurface> {
49 public:
50 VaapiDecodeSurface(int32 bitstream_id,
51 const scoped_refptr<VASurface>& va_surface);
53 int32 bitstream_id() const { return bitstream_id_; }
54 scoped_refptr<VASurface> va_surface() { return va_surface_; }
56 private:
57 friend class base::RefCountedThreadSafe<VaapiDecodeSurface>;
58 ~VaapiDecodeSurface();
60 int32 bitstream_id_;
61 scoped_refptr<VASurface> va_surface_;
64 VaapiVideoDecodeAccelerator::VaapiDecodeSurface::VaapiDecodeSurface(
65 int32 bitstream_id,
66 const scoped_refptr<VASurface>& va_surface)
67 : bitstream_id_(bitstream_id), va_surface_(va_surface) {
70 VaapiVideoDecodeAccelerator::VaapiDecodeSurface::~VaapiDecodeSurface() {
73 class VaapiH264Picture : public H264Picture {
74 public:
75 VaapiH264Picture(const scoped_refptr<
76 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface);
78 VaapiH264Picture* AsVaapiH264Picture() override { return this; }
79 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface() {
80 return dec_surface_;
83 private:
84 ~VaapiH264Picture() override;
86 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface_;
88 DISALLOW_COPY_AND_ASSIGN(VaapiH264Picture);
91 VaapiH264Picture::VaapiH264Picture(const scoped_refptr<
92 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface)
93 : dec_surface_(dec_surface) {
96 VaapiH264Picture::~VaapiH264Picture() {
99 class VaapiVideoDecodeAccelerator::VaapiH264Accelerator
100 : public H264Decoder::H264Accelerator {
101 public:
102 VaapiH264Accelerator(VaapiVideoDecodeAccelerator* vaapi_dec,
103 VaapiWrapper* vaapi_wrapper);
104 ~VaapiH264Accelerator() override;
106 // H264Decoder::H264Accelerator implementation.
107 scoped_refptr<H264Picture> CreateH264Picture() override;
109 bool SubmitFrameMetadata(const media::H264SPS* sps,
110 const media::H264PPS* pps,
111 const H264DPB& dpb,
112 const H264Picture::Vector& ref_pic_listp0,
113 const H264Picture::Vector& ref_pic_listb0,
114 const H264Picture::Vector& ref_pic_listb1,
115 const scoped_refptr<H264Picture>& pic) override;
117 bool SubmitSlice(const media::H264PPS* pps,
118 const media::H264SliceHeader* slice_hdr,
119 const H264Picture::Vector& ref_pic_list0,
120 const H264Picture::Vector& ref_pic_list1,
121 const scoped_refptr<H264Picture>& pic,
122 const uint8_t* data,
123 size_t size) override;
125 bool SubmitDecode(const scoped_refptr<H264Picture>& pic) override;
126 bool OutputPicture(const scoped_refptr<H264Picture>& pic) override;
128 void Reset() override;
130 private:
131 scoped_refptr<VaapiDecodeSurface> H264PictureToVaapiDecodeSurface(
132 const scoped_refptr<H264Picture>& pic);
134 void FillVAPicture(VAPictureH264* va_pic, scoped_refptr<H264Picture> pic);
135 int FillVARefFramesFromDPB(const H264DPB& dpb,
136 VAPictureH264* va_pics,
137 int num_pics);
139 VaapiWrapper* vaapi_wrapper_;
140 VaapiVideoDecodeAccelerator* vaapi_dec_;
142 DISALLOW_COPY_AND_ASSIGN(VaapiH264Accelerator);
145 VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0), size(0) {
148 VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() {
151 void VaapiVideoDecodeAccelerator::NotifyError(Error error) {
152 if (message_loop_ != base::MessageLoop::current()) {
153 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
154 message_loop_->PostTask(FROM_HERE, base::Bind(
155 &VaapiVideoDecodeAccelerator::NotifyError, weak_this_, error));
156 return;
159 // Post Cleanup() as a task so we don't recursively acquire lock_.
160 message_loop_->PostTask(FROM_HERE, base::Bind(
161 &VaapiVideoDecodeAccelerator::Cleanup, weak_this_));
163 LOG(ERROR) << "Notifying of error " << error;
164 if (client_) {
165 client_->NotifyError(error);
166 client_ptr_factory_.reset();
170 VaapiPicture* VaapiVideoDecodeAccelerator::PictureById(
171 int32 picture_buffer_id) {
172 Pictures::iterator it = pictures_.find(picture_buffer_id);
173 if (it == pictures_.end()) {
174 LOG(ERROR) << "Picture id " << picture_buffer_id << " does not exist";
175 return NULL;
178 return it->second.get();
181 VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
182 const base::Callback<bool(void)>& make_context_current,
183 const base::Callback<void(uint32, uint32, scoped_refptr<gfx::GLImage>)>&
184 bind_image)
185 : make_context_current_(make_context_current),
186 state_(kUninitialized),
187 input_ready_(&lock_),
188 surfaces_available_(&lock_),
189 message_loop_(base::MessageLoop::current()),
190 decoder_thread_("VaapiDecoderThread"),
191 num_frames_at_client_(0),
192 num_stream_bufs_at_decoder_(0),
193 finish_flush_pending_(false),
194 awaiting_va_surfaces_recycle_(false),
195 requested_num_pics_(0),
196 bind_image_(bind_image),
197 weak_this_factory_(this) {
198 weak_this_ = weak_this_factory_.GetWeakPtr();
199 va_surface_release_cb_ = media::BindToCurrentLoop(
200 base::Bind(&VaapiVideoDecodeAccelerator::RecycleVASurfaceID, weak_this_));
203 VaapiVideoDecodeAccelerator::~VaapiVideoDecodeAccelerator() {
204 DCHECK_EQ(message_loop_, base::MessageLoop::current());
207 bool VaapiVideoDecodeAccelerator::Initialize(media::VideoCodecProfile profile,
208 Client* client) {
209 DCHECK_EQ(message_loop_, base::MessageLoop::current());
211 client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
212 client_ = client_ptr_factory_->GetWeakPtr();
214 base::AutoLock auto_lock(lock_);
215 DCHECK_EQ(state_, kUninitialized);
216 DVLOG(2) << "Initializing VAVDA, profile: " << profile;
218 #if defined(USE_X11)
219 if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL) {
220 DVLOG(1) << "HW video decode acceleration not available without "
221 "DesktopGL (GLX).";
222 return false;
224 #elif defined(USE_OZONE)
225 if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
226 DVLOG(1) << "HW video decode acceleration not available without "
227 << "EGLGLES2.";
228 return false;
230 #endif // USE_X11
232 vaapi_wrapper_ = VaapiWrapper::CreateForVideoCodec(
233 VaapiWrapper::kDecode, profile, base::Bind(&ReportToUMA, VAAPI_ERROR));
235 if (!vaapi_wrapper_.get()) {
236 DVLOG(1) << "Failed initializing VAAPI for profile " << profile;
237 return false;
240 if (!(profile >= media::H264PROFILE_MIN &&
241 profile <= media::H264PROFILE_MAX)) {
242 DLOG(ERROR) << "Unsupported profile " << profile;
243 return false;
246 h264_accelerator_.reset(new VaapiH264Accelerator(this, vaapi_wrapper_.get()));
247 decoder_.reset(new H264Decoder(h264_accelerator_.get()));
249 CHECK(decoder_thread_.Start());
250 decoder_thread_proxy_ = decoder_thread_.message_loop_proxy();
252 state_ = kIdle;
253 return true;
256 void VaapiVideoDecodeAccelerator::OutputPicture(
257 const scoped_refptr<VASurface>& va_surface,
258 int32 input_id,
259 VaapiPicture* picture) {
260 DCHECK_EQ(message_loop_, base::MessageLoop::current());
262 int32 output_id = picture->picture_buffer_id();
264 TRACE_EVENT2("Video Decoder", "VAVDA::OutputSurface",
265 "input_id", input_id,
266 "output_id", output_id);
268 DVLOG(3) << "Outputting VASurface " << va_surface->id()
269 << " into pixmap bound to picture buffer id " << output_id;
271 RETURN_AND_NOTIFY_ON_FAILURE(picture->DownloadFromSurface(va_surface),
272 "Failed putting surface into pixmap",
273 PLATFORM_FAILURE, );
275 // Notify the client a picture is ready to be displayed.
276 ++num_frames_at_client_;
277 TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_);
278 DVLOG(4) << "Notifying output picture id " << output_id
279 << " for input "<< input_id << " is ready";
280 // TODO(posciak): Use visible size from decoder here instead
281 // (crbug.com/402760).
282 if (client_)
283 client_->PictureReady(media::Picture(output_id, input_id,
284 gfx::Rect(picture->size()),
285 picture->AllowOverlay()));
288 void VaapiVideoDecodeAccelerator::TryOutputSurface() {
289 DCHECK_EQ(message_loop_, base::MessageLoop::current());
291 // Handle Destroy() arriving while pictures are queued for output.
292 if (!client_)
293 return;
295 if (pending_output_cbs_.empty() || output_buffers_.empty())
296 return;
298 OutputCB output_cb = pending_output_cbs_.front();
299 pending_output_cbs_.pop();
301 VaapiPicture* picture = PictureById(output_buffers_.front());
302 DCHECK(picture);
303 output_buffers_.pop();
305 output_cb.Run(picture);
307 if (finish_flush_pending_ && pending_output_cbs_.empty())
308 FinishFlush();
311 void VaapiVideoDecodeAccelerator::MapAndQueueNewInputBuffer(
312 const media::BitstreamBuffer& bitstream_buffer) {
313 DCHECK_EQ(message_loop_, base::MessageLoop::current());
314 TRACE_EVENT1("Video Decoder", "MapAndQueueNewInputBuffer", "input_id",
315 bitstream_buffer.id());
317 DVLOG(4) << "Mapping new input buffer id: " << bitstream_buffer.id()
318 << " size: " << (int)bitstream_buffer.size();
320 scoped_ptr<base::SharedMemory> shm(
321 new base::SharedMemory(bitstream_buffer.handle(), true));
322 RETURN_AND_NOTIFY_ON_FAILURE(shm->Map(bitstream_buffer.size()),
323 "Failed to map input buffer", UNREADABLE_INPUT,);
325 base::AutoLock auto_lock(lock_);
327 // Set up a new input buffer and queue it for later.
328 linked_ptr<InputBuffer> input_buffer(new InputBuffer());
329 input_buffer->shm.reset(shm.release());
330 input_buffer->id = bitstream_buffer.id();
331 input_buffer->size = bitstream_buffer.size();
333 ++num_stream_bufs_at_decoder_;
334 TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
335 num_stream_bufs_at_decoder_);
337 input_buffers_.push(input_buffer);
338 input_ready_.Signal();
341 bool VaapiVideoDecodeAccelerator::GetInputBuffer_Locked() {
342 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
343 lock_.AssertAcquired();
345 if (curr_input_buffer_.get())
346 return true;
348 // Will only wait if it is expected that in current state new buffers will
349 // be queued from the client via Decode(). The state can change during wait.
350 while (input_buffers_.empty() && (state_ == kDecoding || state_ == kIdle)) {
351 input_ready_.Wait();
354 // We could have got woken up in a different state or never got to sleep
355 // due to current state; check for that.
356 switch (state_) {
357 case kFlushing:
358 // Here we are only interested in finishing up decoding buffers that are
359 // already queued up. Otherwise will stop decoding.
360 if (input_buffers_.empty())
361 return false;
362 // else fallthrough
363 case kDecoding:
364 case kIdle:
365 DCHECK(!input_buffers_.empty());
367 curr_input_buffer_ = input_buffers_.front();
368 input_buffers_.pop();
370 DVLOG(4) << "New current bitstream buffer, id: "
371 << curr_input_buffer_->id
372 << " size: " << curr_input_buffer_->size;
374 decoder_->SetStream(
375 static_cast<uint8*>(curr_input_buffer_->shm->memory()),
376 curr_input_buffer_->size);
377 return true;
379 default:
380 // We got woken up due to being destroyed/reset, ignore any already
381 // queued inputs.
382 return false;
386 void VaapiVideoDecodeAccelerator::ReturnCurrInputBuffer_Locked() {
387 lock_.AssertAcquired();
388 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
389 DCHECK(curr_input_buffer_.get());
391 int32 id = curr_input_buffer_->id;
392 curr_input_buffer_.reset();
393 DVLOG(4) << "End of input buffer " << id;
394 message_loop_->PostTask(FROM_HERE, base::Bind(
395 &Client::NotifyEndOfBitstreamBuffer, client_, id));
397 --num_stream_bufs_at_decoder_;
398 TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
399 num_stream_bufs_at_decoder_);
402 // TODO(posciak): refactor the whole class to remove sleeping in wait for
403 // surfaces, and reschedule DecodeTask instead.
404 bool VaapiVideoDecodeAccelerator::WaitForSurfaces_Locked() {
405 lock_.AssertAcquired();
406 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
408 while (available_va_surfaces_.empty() &&
409 (state_ == kDecoding || state_ == kFlushing || state_ == kIdle)) {
410 surfaces_available_.Wait();
413 if (state_ != kDecoding && state_ != kFlushing && state_ != kIdle)
414 return false;
416 return true;
419 void VaapiVideoDecodeAccelerator::DecodeTask() {
420 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
421 TRACE_EVENT0("Video Decoder", "VAVDA::DecodeTask");
422 base::AutoLock auto_lock(lock_);
424 if (state_ != kDecoding)
425 return;
427 // Main decode task.
428 DVLOG(4) << "Decode task";
430 // Try to decode what stream data is (still) in the decoder until we run out
431 // of it.
432 while (GetInputBuffer_Locked()) {
433 DCHECK(curr_input_buffer_.get());
435 AcceleratedVideoDecoder::DecodeResult res;
437 // We are OK releasing the lock here, as decoder never calls our methods
438 // directly and we will reacquire the lock before looking at state again.
439 // This is the main decode function of the decoder and while keeping
440 // the lock for its duration would be fine, it would defeat the purpose
441 // of having a separate decoder thread.
442 base::AutoUnlock auto_unlock(lock_);
443 res = decoder_->Decode();
446 switch (res) {
447 case AcceleratedVideoDecoder::kAllocateNewSurfaces:
448 DVLOG(1) << "Decoder requesting a new set of surfaces";
449 message_loop_->PostTask(FROM_HERE, base::Bind(
450 &VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange, weak_this_,
451 decoder_->GetRequiredNumOfPictures(),
452 decoder_->GetPicSize()));
453 // We'll get rescheduled once ProvidePictureBuffers() finishes.
454 return;
456 case AcceleratedVideoDecoder::kRanOutOfStreamData:
457 ReturnCurrInputBuffer_Locked();
458 break;
460 case AcceleratedVideoDecoder::kRanOutOfSurfaces:
461 // No more output buffers in the decoder, try getting more or go to
462 // sleep waiting for them.
463 if (!WaitForSurfaces_Locked())
464 return;
466 break;
468 case AcceleratedVideoDecoder::kDecodeError:
469 RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream",
470 PLATFORM_FAILURE, );
471 return;
476 void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(size_t num_pics,
477 gfx::Size size) {
478 DCHECK_EQ(message_loop_, base::MessageLoop::current());
479 DCHECK(!awaiting_va_surfaces_recycle_);
481 // At this point decoder has stopped running and has already posted onto our
482 // loop any remaining output request callbacks, which executed before we got
483 // here. Some of them might have been pended though, because we might not
484 // have had enough TFPictures to output surfaces to. Initiate a wait cycle,
485 // which will wait for client to return enough PictureBuffers to us, so that
486 // we can finish all pending output callbacks, releasing associated surfaces.
487 DVLOG(1) << "Initiating surface set change";
488 awaiting_va_surfaces_recycle_ = true;
490 requested_num_pics_ = num_pics;
491 requested_pic_size_ = size;
493 TryFinishSurfaceSetChange();
496 void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() {
497 DCHECK_EQ(message_loop_, base::MessageLoop::current());
499 if (!awaiting_va_surfaces_recycle_)
500 return;
502 if (!pending_output_cbs_.empty() ||
503 pictures_.size() != available_va_surfaces_.size()) {
504 // Either:
505 // 1. Not all pending pending output callbacks have been executed yet.
506 // Wait for the client to return enough pictures and retry later.
507 // 2. The above happened and all surface release callbacks have been posted
508 // as the result, but not all have executed yet. Post ourselves after them
509 // to let them release surfaces.
510 DVLOG(2) << "Awaiting pending output/surface release callbacks to finish";
511 message_loop_->PostTask(FROM_HERE, base::Bind(
512 &VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange, weak_this_));
513 return;
516 // All surfaces released, destroy them and dismiss all PictureBuffers.
517 awaiting_va_surfaces_recycle_ = false;
518 available_va_surfaces_.clear();
519 vaapi_wrapper_->DestroySurfaces();
521 for (Pictures::iterator iter = pictures_.begin(); iter != pictures_.end();
522 ++iter) {
523 DVLOG(2) << "Dismissing picture id: " << iter->first;
524 if (client_)
525 client_->DismissPictureBuffer(iter->first);
527 pictures_.clear();
529 // And ask for a new set as requested.
530 DVLOG(1) << "Requesting " << requested_num_pics_ << " pictures of size: "
531 << requested_pic_size_.ToString();
533 message_loop_->PostTask(
534 FROM_HERE,
535 base::Bind(&Client::ProvidePictureBuffers, client_, requested_num_pics_,
536 requested_pic_size_, VaapiPicture::GetGLTextureTarget()));
539 void VaapiVideoDecodeAccelerator::Decode(
540 const media::BitstreamBuffer& bitstream_buffer) {
541 DCHECK_EQ(message_loop_, base::MessageLoop::current());
543 TRACE_EVENT1("Video Decoder", "VAVDA::Decode", "Buffer id",
544 bitstream_buffer.id());
546 // We got a new input buffer from the client, map it and queue for later use.
547 MapAndQueueNewInputBuffer(bitstream_buffer);
549 base::AutoLock auto_lock(lock_);
550 switch (state_) {
551 case kIdle:
552 state_ = kDecoding;
553 decoder_thread_proxy_->PostTask(FROM_HERE, base::Bind(
554 &VaapiVideoDecodeAccelerator::DecodeTask,
555 base::Unretained(this)));
556 break;
558 case kDecoding:
559 // Decoder already running, fallthrough.
560 case kResetting:
561 // When resetting, allow accumulating bitstream buffers, so that
562 // the client can queue after-seek-buffers while we are finishing with
563 // the before-seek one.
564 break;
566 default:
567 RETURN_AND_NOTIFY_ON_FAILURE(false,
568 "Decode request from client in invalid state: " << state_,
569 PLATFORM_FAILURE, );
570 break;
574 void VaapiVideoDecodeAccelerator::RecycleVASurfaceID(
575 VASurfaceID va_surface_id) {
576 DCHECK_EQ(message_loop_, base::MessageLoop::current());
577 base::AutoLock auto_lock(lock_);
579 available_va_surfaces_.push_back(va_surface_id);
580 surfaces_available_.Signal();
583 void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
584 const std::vector<media::PictureBuffer>& buffers) {
585 DCHECK_EQ(message_loop_, base::MessageLoop::current());
587 base::AutoLock auto_lock(lock_);
588 DCHECK(pictures_.empty());
590 while (!output_buffers_.empty())
591 output_buffers_.pop();
593 RETURN_AND_NOTIFY_ON_FAILURE(
594 buffers.size() == requested_num_pics_,
595 "Got an invalid number of picture buffers. (Got " << buffers.size()
596 << ", requested " << requested_num_pics_ << ")", INVALID_ARGUMENT, );
597 DCHECK(requested_pic_size_ == buffers[0].size());
599 std::vector<VASurfaceID> va_surface_ids;
600 RETURN_AND_NOTIFY_ON_FAILURE(
601 vaapi_wrapper_->CreateSurfaces(requested_pic_size_,
602 buffers.size(),
603 &va_surface_ids),
604 "Failed creating VA Surfaces", PLATFORM_FAILURE, );
605 DCHECK_EQ(va_surface_ids.size(), buffers.size());
607 for (size_t i = 0; i < buffers.size(); ++i) {
608 DVLOG(2) << "Assigning picture id: " << buffers[i].id()
609 << " to texture id: " << buffers[i].texture_id()
610 << " VASurfaceID: " << va_surface_ids[i];
612 linked_ptr<VaapiPicture> picture(VaapiPicture::CreatePicture(
613 vaapi_wrapper_.get(), make_context_current_, buffers[i].id(),
614 buffers[i].texture_id(), requested_pic_size_));
616 scoped_refptr<gfx::GLImage> image = picture->GetImageToBind();
617 if (image) {
618 bind_image_.Run(buffers[i].internal_texture_id(),
619 VaapiPicture::GetGLTextureTarget(), image);
622 RETURN_AND_NOTIFY_ON_FAILURE(
623 picture.get(), "Failed assigning picture buffer to a texture.",
624 PLATFORM_FAILURE, );
626 bool inserted =
627 pictures_.insert(std::make_pair(buffers[i].id(), picture)).second;
628 DCHECK(inserted);
630 output_buffers_.push(buffers[i].id());
631 available_va_surfaces_.push_back(va_surface_ids[i]);
632 surfaces_available_.Signal();
635 state_ = kDecoding;
636 decoder_thread_proxy_->PostTask(FROM_HERE, base::Bind(
637 &VaapiVideoDecodeAccelerator::DecodeTask, base::Unretained(this)));
640 void VaapiVideoDecodeAccelerator::ReusePictureBuffer(int32 picture_buffer_id) {
641 DCHECK_EQ(message_loop_, base::MessageLoop::current());
642 TRACE_EVENT1("Video Decoder", "VAVDA::ReusePictureBuffer", "Picture id",
643 picture_buffer_id);
645 --num_frames_at_client_;
646 TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_);
648 output_buffers_.push(picture_buffer_id);
649 TryOutputSurface();
652 void VaapiVideoDecodeAccelerator::FlushTask() {
653 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
654 DVLOG(1) << "Flush task";
656 // First flush all the pictures that haven't been outputted, notifying the
657 // client to output them.
658 bool res = decoder_->Flush();
659 RETURN_AND_NOTIFY_ON_FAILURE(res, "Failed flushing the decoder.",
660 PLATFORM_FAILURE, );
662 // Put the decoder in idle state, ready to resume.
663 decoder_->Reset();
665 message_loop_->PostTask(FROM_HERE, base::Bind(
666 &VaapiVideoDecodeAccelerator::FinishFlush, weak_this_));
669 void VaapiVideoDecodeAccelerator::Flush() {
670 DCHECK_EQ(message_loop_, base::MessageLoop::current());
671 DVLOG(1) << "Got flush request";
673 base::AutoLock auto_lock(lock_);
674 state_ = kFlushing;
675 // Queue a flush task after all existing decoding tasks to clean up.
676 decoder_thread_proxy_->PostTask(FROM_HERE, base::Bind(
677 &VaapiVideoDecodeAccelerator::FlushTask, base::Unretained(this)));
679 input_ready_.Signal();
680 surfaces_available_.Signal();
683 void VaapiVideoDecodeAccelerator::FinishFlush() {
684 DCHECK_EQ(message_loop_, base::MessageLoop::current());
686 finish_flush_pending_ = false;
688 base::AutoLock auto_lock(lock_);
689 if (state_ != kFlushing) {
690 DCHECK_EQ(state_, kDestroying);
691 return; // We could've gotten destroyed already.
694 // Still waiting for textures from client to finish outputting all pending
695 // frames. Try again later.
696 if (!pending_output_cbs_.empty()) {
697 finish_flush_pending_ = true;
698 return;
701 state_ = kIdle;
703 message_loop_->PostTask(FROM_HERE, base::Bind(
704 &Client::NotifyFlushDone, client_));
706 DVLOG(1) << "Flush finished";
709 void VaapiVideoDecodeAccelerator::ResetTask() {
710 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
711 DVLOG(1) << "ResetTask";
713 // All the decoding tasks from before the reset request from client are done
714 // by now, as this task was scheduled after them and client is expected not
715 // to call Decode() after Reset() and before NotifyResetDone.
716 decoder_->Reset();
718 base::AutoLock auto_lock(lock_);
720 // Return current input buffer, if present.
721 if (curr_input_buffer_.get())
722 ReturnCurrInputBuffer_Locked();
724 // And let client know that we are done with reset.
725 message_loop_->PostTask(FROM_HERE, base::Bind(
726 &VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
729 void VaapiVideoDecodeAccelerator::Reset() {
730 DCHECK_EQ(message_loop_, base::MessageLoop::current());
731 DVLOG(1) << "Got reset request";
733 // This will make any new decode tasks exit early.
734 base::AutoLock auto_lock(lock_);
735 state_ = kResetting;
736 finish_flush_pending_ = false;
738 // Drop all remaining input buffers, if present.
739 while (!input_buffers_.empty()) {
740 message_loop_->PostTask(FROM_HERE, base::Bind(
741 &Client::NotifyEndOfBitstreamBuffer, client_,
742 input_buffers_.front()->id));
743 input_buffers_.pop();
746 decoder_thread_proxy_->PostTask(FROM_HERE, base::Bind(
747 &VaapiVideoDecodeAccelerator::ResetTask, base::Unretained(this)));
749 input_ready_.Signal();
750 surfaces_available_.Signal();
753 void VaapiVideoDecodeAccelerator::FinishReset() {
754 DCHECK_EQ(message_loop_, base::MessageLoop::current());
755 DVLOG(1) << "FinishReset";
756 base::AutoLock auto_lock(lock_);
758 if (state_ != kResetting) {
759 DCHECK(state_ == kDestroying || state_ == kUninitialized) << state_;
760 return; // We could've gotten destroyed already.
763 // Drop pending outputs.
764 while (!pending_output_cbs_.empty())
765 pending_output_cbs_.pop();
767 if (awaiting_va_surfaces_recycle_) {
768 // Decoder requested a new surface set while we were waiting for it to
769 // finish the last DecodeTask, running at the time of Reset().
770 // Let the surface set change finish first before resetting.
771 message_loop_->PostTask(FROM_HERE, base::Bind(
772 &VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
773 return;
776 num_stream_bufs_at_decoder_ = 0;
777 state_ = kIdle;
779 message_loop_->PostTask(FROM_HERE, base::Bind(
780 &Client::NotifyResetDone, client_));
782 // The client might have given us new buffers via Decode() while we were
783 // resetting and might be waiting for our move, and not call Decode() anymore
784 // until we return something. Post a DecodeTask() so that we won't
785 // sleep forever waiting for Decode() in that case. Having two of them
786 // in the pipe is harmless, the additional one will return as soon as it sees
787 // that we are back in kDecoding state.
788 if (!input_buffers_.empty()) {
789 state_ = kDecoding;
790 decoder_thread_proxy_->PostTask(FROM_HERE, base::Bind(
791 &VaapiVideoDecodeAccelerator::DecodeTask,
792 base::Unretained(this)));
795 DVLOG(1) << "Reset finished";
798 void VaapiVideoDecodeAccelerator::Cleanup() {
799 DCHECK_EQ(message_loop_, base::MessageLoop::current());
801 base::AutoLock auto_lock(lock_);
802 if (state_ == kUninitialized || state_ == kDestroying)
803 return;
805 DVLOG(1) << "Destroying VAVDA";
806 state_ = kDestroying;
808 client_ptr_factory_.reset();
809 weak_this_factory_.InvalidateWeakPtrs();
811 // Signal all potential waiters on the decoder_thread_, let them early-exit,
812 // as we've just moved to the kDestroying state, and wait for all tasks
813 // to finish.
814 input_ready_.Signal();
815 surfaces_available_.Signal();
817 base::AutoUnlock auto_unlock(lock_);
818 decoder_thread_.Stop();
821 state_ = kUninitialized;
824 void VaapiVideoDecodeAccelerator::Destroy() {
825 DCHECK_EQ(message_loop_, base::MessageLoop::current());
826 Cleanup();
827 delete this;
830 bool VaapiVideoDecodeAccelerator::CanDecodeOnIOThread() {
831 return false;
834 bool VaapiVideoDecodeAccelerator::DecodeSurface(
835 const scoped_refptr<VaapiDecodeSurface>& dec_surface) {
836 if (!vaapi_wrapper_->ExecuteAndDestroyPendingBuffers(
837 dec_surface->va_surface()->id())) {
838 DVLOG(1) << "Failed decoding picture";
839 return false;
842 return true;
845 void VaapiVideoDecodeAccelerator::SurfaceReady(
846 const scoped_refptr<VaapiDecodeSurface>& dec_surface) {
847 if (message_loop_ != base::MessageLoop::current()) {
848 message_loop_->PostTask(
849 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::SurfaceReady,
850 weak_this_, dec_surface));
851 return;
854 DCHECK(!awaiting_va_surfaces_recycle_);
857 base::AutoLock auto_lock(lock_);
858 // Drop any requests to output if we are resetting or being destroyed.
859 if (state_ == kResetting || state_ == kDestroying)
860 return;
863 pending_output_cbs_.push(
864 base::Bind(&VaapiVideoDecodeAccelerator::OutputPicture, weak_this_,
865 dec_surface->va_surface(), dec_surface->bitstream_id()));
867 TryOutputSurface();
870 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>
871 VaapiVideoDecodeAccelerator::CreateSurface() {
872 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
873 base::AutoLock auto_lock(lock_);
875 if (available_va_surfaces_.empty())
876 return nullptr;
878 DCHECK(!awaiting_va_surfaces_recycle_);
879 scoped_refptr<VASurface> va_surface(
880 new VASurface(available_va_surfaces_.front(), requested_pic_size_,
881 va_surface_release_cb_));
882 available_va_surfaces_.pop_front();
884 scoped_refptr<VaapiDecodeSurface> dec_surface =
885 new VaapiDecodeSurface(curr_input_buffer_->id, va_surface);
887 return dec_surface;
890 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::VaapiH264Accelerator(
891 VaapiVideoDecodeAccelerator* vaapi_dec,
892 VaapiWrapper* vaapi_wrapper)
893 : vaapi_wrapper_(vaapi_wrapper), vaapi_dec_(vaapi_dec) {
894 DCHECK(vaapi_wrapper_);
895 DCHECK(vaapi_dec_);
898 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::~VaapiH264Accelerator() {
901 scoped_refptr<H264Picture>
902 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::CreateH264Picture() {
903 scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface();
904 if (!va_surface)
905 return nullptr;
907 return new VaapiH264Picture(va_surface);
910 // Fill |va_pic| with default/neutral values.
911 static void InitVAPicture(VAPictureH264* va_pic) {
912 memset(va_pic, 0, sizeof(*va_pic));
913 va_pic->picture_id = VA_INVALID_ID;
914 va_pic->flags = VA_PICTURE_H264_INVALID;
917 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitFrameMetadata(
918 const media::H264SPS* sps,
919 const media::H264PPS* pps,
920 const H264DPB& dpb,
921 const H264Picture::Vector& ref_pic_listp0,
922 const H264Picture::Vector& ref_pic_listb0,
923 const H264Picture::Vector& ref_pic_listb1,
924 const scoped_refptr<H264Picture>& pic) {
925 VAPictureParameterBufferH264 pic_param;
926 memset(&pic_param, 0, sizeof(pic_param));
928 #define FROM_SPS_TO_PP(a) pic_param.a = sps->a;
929 #define FROM_SPS_TO_PP2(a, b) pic_param.b = sps->a;
930 FROM_SPS_TO_PP2(pic_width_in_mbs_minus1, picture_width_in_mbs_minus1);
931 // This assumes non-interlaced video
932 FROM_SPS_TO_PP2(pic_height_in_map_units_minus1, picture_height_in_mbs_minus1);
933 FROM_SPS_TO_PP(bit_depth_luma_minus8);
934 FROM_SPS_TO_PP(bit_depth_chroma_minus8);
935 #undef FROM_SPS_TO_PP
936 #undef FROM_SPS_TO_PP2
938 #define FROM_SPS_TO_PP_SF(a) pic_param.seq_fields.bits.a = sps->a;
939 #define FROM_SPS_TO_PP_SF2(a, b) pic_param.seq_fields.bits.b = sps->a;
940 FROM_SPS_TO_PP_SF(chroma_format_idc);
941 FROM_SPS_TO_PP_SF2(separate_colour_plane_flag,
942 residual_colour_transform_flag);
943 FROM_SPS_TO_PP_SF(gaps_in_frame_num_value_allowed_flag);
944 FROM_SPS_TO_PP_SF(frame_mbs_only_flag);
945 FROM_SPS_TO_PP_SF(mb_adaptive_frame_field_flag);
946 FROM_SPS_TO_PP_SF(direct_8x8_inference_flag);
947 pic_param.seq_fields.bits.MinLumaBiPredSize8x8 = (sps->level_idc >= 31);
948 FROM_SPS_TO_PP_SF(log2_max_frame_num_minus4);
949 FROM_SPS_TO_PP_SF(pic_order_cnt_type);
950 FROM_SPS_TO_PP_SF(log2_max_pic_order_cnt_lsb_minus4);
951 FROM_SPS_TO_PP_SF(delta_pic_order_always_zero_flag);
952 #undef FROM_SPS_TO_PP_SF
953 #undef FROM_SPS_TO_PP_SF2
955 #define FROM_PPS_TO_PP(a) pic_param.a = pps->a;
956 FROM_PPS_TO_PP(num_slice_groups_minus1);
957 pic_param.slice_group_map_type = 0;
958 pic_param.slice_group_change_rate_minus1 = 0;
959 FROM_PPS_TO_PP(pic_init_qp_minus26);
960 FROM_PPS_TO_PP(pic_init_qs_minus26);
961 FROM_PPS_TO_PP(chroma_qp_index_offset);
962 FROM_PPS_TO_PP(second_chroma_qp_index_offset);
963 #undef FROM_PPS_TO_PP
965 #define FROM_PPS_TO_PP_PF(a) pic_param.pic_fields.bits.a = pps->a;
966 #define FROM_PPS_TO_PP_PF2(a, b) pic_param.pic_fields.bits.b = pps->a;
967 FROM_PPS_TO_PP_PF(entropy_coding_mode_flag);
968 FROM_PPS_TO_PP_PF(weighted_pred_flag);
969 FROM_PPS_TO_PP_PF(weighted_bipred_idc);
970 FROM_PPS_TO_PP_PF(transform_8x8_mode_flag);
972 pic_param.pic_fields.bits.field_pic_flag = 0;
973 FROM_PPS_TO_PP_PF(constrained_intra_pred_flag);
974 FROM_PPS_TO_PP_PF2(bottom_field_pic_order_in_frame_present_flag,
975 pic_order_present_flag);
976 FROM_PPS_TO_PP_PF(deblocking_filter_control_present_flag);
977 FROM_PPS_TO_PP_PF(redundant_pic_cnt_present_flag);
978 pic_param.pic_fields.bits.reference_pic_flag = pic->ref;
979 #undef FROM_PPS_TO_PP_PF
980 #undef FROM_PPS_TO_PP_PF2
982 pic_param.frame_num = pic->frame_num;
984 InitVAPicture(&pic_param.CurrPic);
985 FillVAPicture(&pic_param.CurrPic, pic);
987 // Init reference pictures' array.
988 for (int i = 0; i < 16; ++i)
989 InitVAPicture(&pic_param.ReferenceFrames[i]);
991 // And fill it with picture info from DPB.
992 FillVARefFramesFromDPB(dpb, pic_param.ReferenceFrames,
993 arraysize(pic_param.ReferenceFrames));
995 pic_param.num_ref_frames = sps->max_num_ref_frames;
997 if (!vaapi_wrapper_->SubmitBuffer(VAPictureParameterBufferType,
998 sizeof(pic_param),
999 &pic_param))
1000 return false;
1002 VAIQMatrixBufferH264 iq_matrix_buf;
1003 memset(&iq_matrix_buf, 0, sizeof(iq_matrix_buf));
1005 if (pps->pic_scaling_matrix_present_flag) {
1006 for (int i = 0; i < 6; ++i) {
1007 for (int j = 0; j < 16; ++j)
1008 iq_matrix_buf.ScalingList4x4[i][j] = pps->scaling_list4x4[i][j];
1011 for (int i = 0; i < 2; ++i) {
1012 for (int j = 0; j < 64; ++j)
1013 iq_matrix_buf.ScalingList8x8[i][j] = pps->scaling_list8x8[i][j];
1015 } else {
1016 for (int i = 0; i < 6; ++i) {
1017 for (int j = 0; j < 16; ++j)
1018 iq_matrix_buf.ScalingList4x4[i][j] = sps->scaling_list4x4[i][j];
1021 for (int i = 0; i < 2; ++i) {
1022 for (int j = 0; j < 64; ++j)
1023 iq_matrix_buf.ScalingList8x8[i][j] = sps->scaling_list8x8[i][j];
1027 return vaapi_wrapper_->SubmitBuffer(VAIQMatrixBufferType,
1028 sizeof(iq_matrix_buf),
1029 &iq_matrix_buf);
1032 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitSlice(
1033 const media::H264PPS* pps,
1034 const media::H264SliceHeader* slice_hdr,
1035 const H264Picture::Vector& ref_pic_list0,
1036 const H264Picture::Vector& ref_pic_list1,
1037 const scoped_refptr<H264Picture>& pic,
1038 const uint8_t* data,
1039 size_t size) {
1040 VASliceParameterBufferH264 slice_param;
1041 memset(&slice_param, 0, sizeof(slice_param));
1043 slice_param.slice_data_size = slice_hdr->nalu_size;
1044 slice_param.slice_data_offset = 0;
1045 slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
1046 slice_param.slice_data_bit_offset = slice_hdr->header_bit_size;
1048 #define SHDRToSP(a) slice_param.a = slice_hdr->a;
1049 SHDRToSP(first_mb_in_slice);
1050 slice_param.slice_type = slice_hdr->slice_type % 5;
1051 SHDRToSP(direct_spatial_mv_pred_flag);
1053 // TODO posciak: make sure parser sets those even when override flags
1054 // in slice header is off.
1055 SHDRToSP(num_ref_idx_l0_active_minus1);
1056 SHDRToSP(num_ref_idx_l1_active_minus1);
1057 SHDRToSP(cabac_init_idc);
1058 SHDRToSP(slice_qp_delta);
1059 SHDRToSP(disable_deblocking_filter_idc);
1060 SHDRToSP(slice_alpha_c0_offset_div2);
1061 SHDRToSP(slice_beta_offset_div2);
1063 if (((slice_hdr->IsPSlice() || slice_hdr->IsSPSlice()) &&
1064 pps->weighted_pred_flag) ||
1065 (slice_hdr->IsBSlice() && pps->weighted_bipred_idc == 1)) {
1066 SHDRToSP(luma_log2_weight_denom);
1067 SHDRToSP(chroma_log2_weight_denom);
1069 SHDRToSP(luma_weight_l0_flag);
1070 SHDRToSP(luma_weight_l1_flag);
1072 SHDRToSP(chroma_weight_l0_flag);
1073 SHDRToSP(chroma_weight_l1_flag);
1075 for (int i = 0; i <= slice_param.num_ref_idx_l0_active_minus1; ++i) {
1076 slice_param.luma_weight_l0[i] =
1077 slice_hdr->pred_weight_table_l0.luma_weight[i];
1078 slice_param.luma_offset_l0[i] =
1079 slice_hdr->pred_weight_table_l0.luma_offset[i];
1081 for (int j = 0; j < 2; ++j) {
1082 slice_param.chroma_weight_l0[i][j] =
1083 slice_hdr->pred_weight_table_l0.chroma_weight[i][j];
1084 slice_param.chroma_offset_l0[i][j] =
1085 slice_hdr->pred_weight_table_l0.chroma_offset[i][j];
1089 if (slice_hdr->IsBSlice()) {
1090 for (int i = 0; i <= slice_param.num_ref_idx_l1_active_minus1; ++i) {
1091 slice_param.luma_weight_l1[i] =
1092 slice_hdr->pred_weight_table_l1.luma_weight[i];
1093 slice_param.luma_offset_l1[i] =
1094 slice_hdr->pred_weight_table_l1.luma_offset[i];
1096 for (int j = 0; j < 2; ++j) {
1097 slice_param.chroma_weight_l1[i][j] =
1098 slice_hdr->pred_weight_table_l1.chroma_weight[i][j];
1099 slice_param.chroma_offset_l1[i][j] =
1100 slice_hdr->pred_weight_table_l1.chroma_offset[i][j];
1106 static_assert(
1107 arraysize(slice_param.RefPicList0) == arraysize(slice_param.RefPicList1),
1108 "Invalid RefPicList sizes");
1110 for (size_t i = 0; i < arraysize(slice_param.RefPicList0); ++i) {
1111 InitVAPicture(&slice_param.RefPicList0[i]);
1112 InitVAPicture(&slice_param.RefPicList1[i]);
1115 for (size_t i = 0;
1116 i < ref_pic_list0.size() && i < arraysize(slice_param.RefPicList0);
1117 ++i) {
1118 if (ref_pic_list0[i])
1119 FillVAPicture(&slice_param.RefPicList0[i], ref_pic_list0[i]);
1121 for (size_t i = 0;
1122 i < ref_pic_list1.size() && i < arraysize(slice_param.RefPicList1);
1123 ++i) {
1124 if (ref_pic_list1[i])
1125 FillVAPicture(&slice_param.RefPicList1[i], ref_pic_list1[i]);
1128 if (!vaapi_wrapper_->SubmitBuffer(VASliceParameterBufferType,
1129 sizeof(slice_param),
1130 &slice_param))
1131 return false;
1133 // Can't help it, blame libva...
1134 void* non_const_ptr = const_cast<uint8*>(data);
1135 return vaapi_wrapper_->SubmitBuffer(VASliceDataBufferType, size,
1136 non_const_ptr);
1139 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitDecode(
1140 const scoped_refptr<H264Picture>& pic) {
1141 DVLOG(4) << "Decoding POC " << pic->pic_order_cnt;
1142 scoped_refptr<VaapiDecodeSurface> dec_surface =
1143 H264PictureToVaapiDecodeSurface(pic);
1145 return vaapi_dec_->DecodeSurface(dec_surface);
1148 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::OutputPicture(
1149 const scoped_refptr<H264Picture>& pic) {
1150 scoped_refptr<VaapiDecodeSurface> dec_surface =
1151 H264PictureToVaapiDecodeSurface(pic);
1153 vaapi_dec_->SurfaceReady(dec_surface);
1155 return true;
1158 void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::Reset() {
1159 vaapi_wrapper_->DestroyPendingBuffers();
1162 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>
1163 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::
1164 H264PictureToVaapiDecodeSurface(const scoped_refptr<H264Picture>& pic) {
1165 VaapiH264Picture* vaapi_pic = pic->AsVaapiH264Picture();
1166 CHECK(vaapi_pic);
1167 return vaapi_pic->dec_surface();
1170 void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::FillVAPicture(
1171 VAPictureH264* va_pic,
1172 scoped_refptr<H264Picture> pic) {
1173 scoped_refptr<VaapiDecodeSurface> dec_surface =
1174 H264PictureToVaapiDecodeSurface(pic);
1176 va_pic->picture_id = dec_surface->va_surface()->id();
1177 va_pic->frame_idx = pic->frame_num;
1178 va_pic->flags = 0;
1180 switch (pic->field) {
1181 case H264Picture::FIELD_NONE:
1182 break;
1183 case H264Picture::FIELD_TOP:
1184 va_pic->flags |= VA_PICTURE_H264_TOP_FIELD;
1185 break;
1186 case H264Picture::FIELD_BOTTOM:
1187 va_pic->flags |= VA_PICTURE_H264_BOTTOM_FIELD;
1188 break;
1191 if (pic->ref) {
1192 va_pic->flags |= pic->long_term ? VA_PICTURE_H264_LONG_TERM_REFERENCE
1193 : VA_PICTURE_H264_SHORT_TERM_REFERENCE;
1196 va_pic->TopFieldOrderCnt = pic->top_field_order_cnt;
1197 va_pic->BottomFieldOrderCnt = pic->bottom_field_order_cnt;
1200 int VaapiVideoDecodeAccelerator::VaapiH264Accelerator::FillVARefFramesFromDPB(
1201 const H264DPB& dpb,
1202 VAPictureH264* va_pics,
1203 int num_pics) {
1204 H264Picture::Vector::const_reverse_iterator rit;
1205 int i;
1207 // Return reference frames in reverse order of insertion.
1208 // Libva does not document this, but other implementations (e.g. mplayer)
1209 // do it this way as well.
1210 for (rit = dpb.rbegin(), i = 0; rit != dpb.rend() && i < num_pics; ++rit) {
1211 if ((*rit)->ref)
1212 FillVAPicture(&va_pics[i++], *rit);
1215 return i;
1218 // static
1219 media::VideoDecodeAccelerator::SupportedProfiles
1220 VaapiVideoDecodeAccelerator::GetSupportedProfiles() {
1221 return VaapiWrapper::GetSupportedDecodeProfiles();
1224 } // namespace content