1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
8 #include "base/logging.h"
9 #include "base/metrics/histogram.h"
10 #include "base/stl_util.h"
11 #include "base/strings/string_util.h"
12 #include "base/synchronization/waitable_event.h"
13 #include "base/trace_event/trace_event.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/media/accelerated_video_decoder.h"
16 #include "content/common/gpu/media/h264_decoder.h"
17 #include "content/common/gpu/media/vaapi_picture.h"
18 #include "content/common/gpu/media/vp8_decoder.h"
19 #include "content/common/gpu/media/vp9_decoder.h"
20 #include "media/base/bind_to_current_loop.h"
21 #include "media/video/picture.h"
22 #include "third_party/libva/va/va_dec_vp8.h"
23 #include "ui/gl/gl_bindings.h"
24 #include "ui/gl/gl_image.h"
29 // UMA errors that the VaapiVideoDecodeAccelerator class reports.
30 enum VAVDADecoderFailure
{
32 VAVDA_DECODER_FAILURES_MAX
,
36 static void ReportToUMA(VAVDADecoderFailure failure
) {
37 UMA_HISTOGRAM_ENUMERATION("Media.VAVDA.DecoderFailure", failure
,
38 VAVDA_DECODER_FAILURES_MAX
);
41 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
45 NotifyError(error_code); \
50 class VaapiVideoDecodeAccelerator::VaapiDecodeSurface
51 : public base::RefCountedThreadSafe
<VaapiDecodeSurface
> {
53 VaapiDecodeSurface(int32 bitstream_id
,
54 const scoped_refptr
<VASurface
>& va_surface
);
56 int32
bitstream_id() const { return bitstream_id_
; }
57 scoped_refptr
<VASurface
> va_surface() { return va_surface_
; }
60 friend class base::RefCountedThreadSafe
<VaapiDecodeSurface
>;
61 ~VaapiDecodeSurface();
64 scoped_refptr
<VASurface
> va_surface_
;
67 VaapiVideoDecodeAccelerator::VaapiDecodeSurface::VaapiDecodeSurface(
69 const scoped_refptr
<VASurface
>& va_surface
)
70 : bitstream_id_(bitstream_id
), va_surface_(va_surface
) {
73 VaapiVideoDecodeAccelerator::VaapiDecodeSurface::~VaapiDecodeSurface() {
76 class VaapiH264Picture
: public H264Picture
{
78 VaapiH264Picture(const scoped_refptr
<
79 VaapiVideoDecodeAccelerator::VaapiDecodeSurface
>& dec_surface
);
81 VaapiH264Picture
* AsVaapiH264Picture() override
{ return this; }
82 scoped_refptr
<VaapiVideoDecodeAccelerator::VaapiDecodeSurface
> dec_surface() {
87 ~VaapiH264Picture() override
;
89 scoped_refptr
<VaapiVideoDecodeAccelerator::VaapiDecodeSurface
> dec_surface_
;
91 DISALLOW_COPY_AND_ASSIGN(VaapiH264Picture
);
94 VaapiH264Picture::VaapiH264Picture(const scoped_refptr
<
95 VaapiVideoDecodeAccelerator::VaapiDecodeSurface
>& dec_surface
)
96 : dec_surface_(dec_surface
) {
99 VaapiH264Picture::~VaapiH264Picture() {
102 class VaapiVideoDecodeAccelerator::VaapiH264Accelerator
103 : public H264Decoder::H264Accelerator
{
105 VaapiH264Accelerator(VaapiVideoDecodeAccelerator
* vaapi_dec
,
106 VaapiWrapper
* vaapi_wrapper
);
107 ~VaapiH264Accelerator() override
;
109 // H264Decoder::H264Accelerator implementation.
110 scoped_refptr
<H264Picture
> CreateH264Picture() override
;
112 bool SubmitFrameMetadata(const media::H264SPS
* sps
,
113 const media::H264PPS
* pps
,
115 const H264Picture::Vector
& ref_pic_listp0
,
116 const H264Picture::Vector
& ref_pic_listb0
,
117 const H264Picture::Vector
& ref_pic_listb1
,
118 const scoped_refptr
<H264Picture
>& pic
) override
;
120 bool SubmitSlice(const media::H264PPS
* pps
,
121 const media::H264SliceHeader
* slice_hdr
,
122 const H264Picture::Vector
& ref_pic_list0
,
123 const H264Picture::Vector
& ref_pic_list1
,
124 const scoped_refptr
<H264Picture
>& pic
,
126 size_t size
) override
;
128 bool SubmitDecode(const scoped_refptr
<H264Picture
>& pic
) override
;
129 bool OutputPicture(const scoped_refptr
<H264Picture
>& pic
) override
;
131 void Reset() override
;
134 scoped_refptr
<VaapiDecodeSurface
> H264PictureToVaapiDecodeSurface(
135 const scoped_refptr
<H264Picture
>& pic
);
137 void FillVAPicture(VAPictureH264
* va_pic
, scoped_refptr
<H264Picture
> pic
);
138 int FillVARefFramesFromDPB(const H264DPB
& dpb
,
139 VAPictureH264
* va_pics
,
142 VaapiWrapper
* vaapi_wrapper_
;
143 VaapiVideoDecodeAccelerator
* vaapi_dec_
;
145 DISALLOW_COPY_AND_ASSIGN(VaapiH264Accelerator
);
148 class VaapiVP8Picture
: public VP8Picture
{
150 VaapiVP8Picture(const scoped_refptr
<
151 VaapiVideoDecodeAccelerator::VaapiDecodeSurface
>& dec_surface
);
153 VaapiVP8Picture
* AsVaapiVP8Picture() override
{ return this; }
154 scoped_refptr
<VaapiVideoDecodeAccelerator::VaapiDecodeSurface
> dec_surface() {
159 ~VaapiVP8Picture() override
;
161 scoped_refptr
<VaapiVideoDecodeAccelerator::VaapiDecodeSurface
> dec_surface_
;
163 DISALLOW_COPY_AND_ASSIGN(VaapiVP8Picture
);
166 VaapiVP8Picture::VaapiVP8Picture(const scoped_refptr
<
167 VaapiVideoDecodeAccelerator::VaapiDecodeSurface
>& dec_surface
)
168 : dec_surface_(dec_surface
) {
171 VaapiVP8Picture::~VaapiVP8Picture() {
174 class VaapiVideoDecodeAccelerator::VaapiVP8Accelerator
175 : public VP8Decoder::VP8Accelerator
{
177 VaapiVP8Accelerator(VaapiVideoDecodeAccelerator
* vaapi_dec
,
178 VaapiWrapper
* vaapi_wrapper
);
179 ~VaapiVP8Accelerator() override
;
181 // VP8Decoder::VP8Accelerator implementation.
182 scoped_refptr
<VP8Picture
> CreateVP8Picture() override
;
184 bool SubmitDecode(const scoped_refptr
<VP8Picture
>& pic
,
185 const media::Vp8FrameHeader
* frame_hdr
,
186 const scoped_refptr
<VP8Picture
>& last_frame
,
187 const scoped_refptr
<VP8Picture
>& golden_frame
,
188 const scoped_refptr
<VP8Picture
>& alt_frame
) override
;
190 bool OutputPicture(const scoped_refptr
<VP8Picture
>& pic
) override
;
193 scoped_refptr
<VaapiDecodeSurface
> VP8PictureToVaapiDecodeSurface(
194 const scoped_refptr
<VP8Picture
>& pic
);
196 VaapiWrapper
* vaapi_wrapper_
;
197 VaapiVideoDecodeAccelerator
* vaapi_dec_
;
199 DISALLOW_COPY_AND_ASSIGN(VaapiVP8Accelerator
);
202 class VaapiVP9Picture
: public VP9Picture
{
205 const scoped_refptr
<VaapiVideoDecodeAccelerator::VaapiDecodeSurface
>&
208 VaapiVP9Picture
* AsVaapiVP9Picture() override
{ return this; }
209 scoped_refptr
<VaapiVideoDecodeAccelerator::VaapiDecodeSurface
> dec_surface() {
214 ~VaapiVP9Picture() override
;
216 scoped_refptr
<VaapiVideoDecodeAccelerator::VaapiDecodeSurface
> dec_surface_
;
218 DISALLOW_COPY_AND_ASSIGN(VaapiVP9Picture
);
221 VaapiVP9Picture::VaapiVP9Picture(
222 const scoped_refptr
<VaapiVideoDecodeAccelerator::VaapiDecodeSurface
>&
224 : dec_surface_(dec_surface
) {}
226 VaapiVP9Picture::~VaapiVP9Picture() {}
228 class VaapiVideoDecodeAccelerator::VaapiVP9Accelerator
229 : public VP9Decoder::VP9Accelerator
{
231 VaapiVP9Accelerator(VaapiVideoDecodeAccelerator
* vaapi_dec
,
232 VaapiWrapper
* vaapi_wrapper
);
233 ~VaapiVP9Accelerator() override
;
235 // VP9Decoder::VP9Accelerator implementation.
236 scoped_refptr
<VP9Picture
> CreateVP9Picture() override
;
239 const scoped_refptr
<VP9Picture
>& pic
,
240 const media::Vp9Segmentation
& seg
,
241 const media::Vp9LoopFilter
& lf
,
242 const std::vector
<scoped_refptr
<VP9Picture
>>& ref_pictures
) override
;
244 bool OutputPicture(const scoped_refptr
<VP9Picture
>& pic
) override
;
247 scoped_refptr
<VaapiDecodeSurface
> VP9PictureToVaapiDecodeSurface(
248 const scoped_refptr
<VP9Picture
>& pic
);
250 VaapiWrapper
* vaapi_wrapper_
;
251 VaapiVideoDecodeAccelerator
* vaapi_dec_
;
253 DISALLOW_COPY_AND_ASSIGN(VaapiVP9Accelerator
);
256 VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0), size(0) {
259 VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() {
262 void VaapiVideoDecodeAccelerator::NotifyError(Error error
) {
263 if (message_loop_
!= base::MessageLoop::current()) {
264 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
265 message_loop_
->PostTask(FROM_HERE
, base::Bind(
266 &VaapiVideoDecodeAccelerator::NotifyError
, weak_this_
, error
));
270 // Post Cleanup() as a task so we don't recursively acquire lock_.
271 message_loop_
->PostTask(FROM_HERE
, base::Bind(
272 &VaapiVideoDecodeAccelerator::Cleanup
, weak_this_
));
274 LOG(ERROR
) << "Notifying of error " << error
;
276 client_
->NotifyError(error
);
277 client_ptr_factory_
.reset();
281 VaapiPicture
* VaapiVideoDecodeAccelerator::PictureById(
282 int32 picture_buffer_id
) {
283 Pictures::iterator it
= pictures_
.find(picture_buffer_id
);
284 if (it
== pictures_
.end()) {
285 LOG(ERROR
) << "Picture id " << picture_buffer_id
<< " does not exist";
289 return it
->second
.get();
292 VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
293 const base::Callback
<bool(void)>& make_context_current
,
294 const base::Callback
<void(uint32
, uint32
, scoped_refptr
<gfx::GLImage
>)>&
296 : make_context_current_(make_context_current
),
297 state_(kUninitialized
),
298 input_ready_(&lock_
),
299 surfaces_available_(&lock_
),
300 message_loop_(base::MessageLoop::current()),
301 decoder_thread_("VaapiDecoderThread"),
302 num_frames_at_client_(0),
303 num_stream_bufs_at_decoder_(0),
304 finish_flush_pending_(false),
305 awaiting_va_surfaces_recycle_(false),
306 requested_num_pics_(0),
307 bind_image_(bind_image
),
308 weak_this_factory_(this) {
309 weak_this_
= weak_this_factory_
.GetWeakPtr();
310 va_surface_release_cb_
= media::BindToCurrentLoop(
311 base::Bind(&VaapiVideoDecodeAccelerator::RecycleVASurfaceID
, weak_this_
));
314 VaapiVideoDecodeAccelerator::~VaapiVideoDecodeAccelerator() {
315 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
318 bool VaapiVideoDecodeAccelerator::Initialize(media::VideoCodecProfile profile
,
320 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
322 client_ptr_factory_
.reset(new base::WeakPtrFactory
<Client
>(client
));
323 client_
= client_ptr_factory_
->GetWeakPtr();
325 base::AutoLock
auto_lock(lock_
);
326 DCHECK_EQ(state_
, kUninitialized
);
327 DVLOG(2) << "Initializing VAVDA, profile: " << profile
;
330 if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL
) {
331 DVLOG(1) << "HW video decode acceleration not available without "
335 #elif defined(USE_OZONE)
336 if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2
) {
337 DVLOG(1) << "HW video decode acceleration not available without "
343 vaapi_wrapper_
= VaapiWrapper::CreateForVideoCodec(
344 VaapiWrapper::kDecode
, profile
, base::Bind(&ReportToUMA
, VAAPI_ERROR
));
346 if (!vaapi_wrapper_
.get()) {
347 DVLOG(1) << "Failed initializing VAAPI for profile " << profile
;
351 if (profile
>= media::H264PROFILE_MIN
&& profile
<= media::H264PROFILE_MAX
) {
352 h264_accelerator_
.reset(
353 new VaapiH264Accelerator(this, vaapi_wrapper_
.get()));
354 decoder_
.reset(new H264Decoder(h264_accelerator_
.get()));
355 } else if (profile
>= media::VP8PROFILE_MIN
&&
356 profile
<= media::VP8PROFILE_MAX
) {
357 vp8_accelerator_
.reset(new VaapiVP8Accelerator(this, vaapi_wrapper_
.get()));
358 decoder_
.reset(new VP8Decoder(vp8_accelerator_
.get()));
359 } else if (profile
>= media::VP9PROFILE_MIN
&&
360 profile
<= media::VP9PROFILE_MAX
) {
361 vp9_accelerator_
.reset(new VaapiVP9Accelerator(this, vaapi_wrapper_
.get()));
362 decoder_
.reset(new VP9Decoder(vp9_accelerator_
.get()));
364 DLOG(ERROR
) << "Unsupported profile " << profile
;
368 CHECK(decoder_thread_
.Start());
369 decoder_thread_task_runner_
= decoder_thread_
.task_runner();
375 void VaapiVideoDecodeAccelerator::OutputPicture(
376 const scoped_refptr
<VASurface
>& va_surface
,
378 VaapiPicture
* picture
) {
379 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
381 int32 output_id
= picture
->picture_buffer_id();
383 TRACE_EVENT2("Video Decoder", "VAVDA::OutputSurface",
384 "input_id", input_id
,
385 "output_id", output_id
);
387 DVLOG(3) << "Outputting VASurface " << va_surface
->id()
388 << " into pixmap bound to picture buffer id " << output_id
;
390 RETURN_AND_NOTIFY_ON_FAILURE(picture
->DownloadFromSurface(va_surface
),
391 "Failed putting surface into pixmap",
394 // Notify the client a picture is ready to be displayed.
395 ++num_frames_at_client_
;
396 TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_
);
397 DVLOG(4) << "Notifying output picture id " << output_id
398 << " for input "<< input_id
<< " is ready";
399 // TODO(posciak): Use visible size from decoder here instead
400 // (crbug.com/402760).
402 client_
->PictureReady(media::Picture(output_id
, input_id
,
403 gfx::Rect(picture
->size()),
404 picture
->AllowOverlay()));
407 void VaapiVideoDecodeAccelerator::TryOutputSurface() {
408 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
410 // Handle Destroy() arriving while pictures are queued for output.
414 if (pending_output_cbs_
.empty() || output_buffers_
.empty())
417 OutputCB output_cb
= pending_output_cbs_
.front();
418 pending_output_cbs_
.pop();
420 VaapiPicture
* picture
= PictureById(output_buffers_
.front());
422 output_buffers_
.pop();
424 output_cb
.Run(picture
);
426 if (finish_flush_pending_
&& pending_output_cbs_
.empty())
430 void VaapiVideoDecodeAccelerator::MapAndQueueNewInputBuffer(
431 const media::BitstreamBuffer
& bitstream_buffer
) {
432 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
433 TRACE_EVENT1("Video Decoder", "MapAndQueueNewInputBuffer", "input_id",
434 bitstream_buffer
.id());
436 DVLOG(4) << "Mapping new input buffer id: " << bitstream_buffer
.id()
437 << " size: " << (int)bitstream_buffer
.size();
439 scoped_ptr
<base::SharedMemory
> shm(
440 new base::SharedMemory(bitstream_buffer
.handle(), true));
441 RETURN_AND_NOTIFY_ON_FAILURE(shm
->Map(bitstream_buffer
.size()),
442 "Failed to map input buffer", UNREADABLE_INPUT
,);
444 base::AutoLock
auto_lock(lock_
);
446 // Set up a new input buffer and queue it for later.
447 linked_ptr
<InputBuffer
> input_buffer(new InputBuffer());
448 input_buffer
->shm
.reset(shm
.release());
449 input_buffer
->id
= bitstream_buffer
.id();
450 input_buffer
->size
= bitstream_buffer
.size();
452 ++num_stream_bufs_at_decoder_
;
453 TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
454 num_stream_bufs_at_decoder_
);
456 input_buffers_
.push(input_buffer
);
457 input_ready_
.Signal();
460 bool VaapiVideoDecodeAccelerator::GetInputBuffer_Locked() {
461 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
462 lock_
.AssertAcquired();
464 if (curr_input_buffer_
.get())
467 // Will only wait if it is expected that in current state new buffers will
468 // be queued from the client via Decode(). The state can change during wait.
469 while (input_buffers_
.empty() && (state_
== kDecoding
|| state_
== kIdle
)) {
473 // We could have got woken up in a different state or never got to sleep
474 // due to current state; check for that.
477 // Here we are only interested in finishing up decoding buffers that are
478 // already queued up. Otherwise will stop decoding.
479 if (input_buffers_
.empty())
484 DCHECK(!input_buffers_
.empty());
486 curr_input_buffer_
= input_buffers_
.front();
487 input_buffers_
.pop();
489 DVLOG(4) << "New current bitstream buffer, id: "
490 << curr_input_buffer_
->id
491 << " size: " << curr_input_buffer_
->size
;
494 static_cast<uint8
*>(curr_input_buffer_
->shm
->memory()),
495 curr_input_buffer_
->size
);
499 // We got woken up due to being destroyed/reset, ignore any already
505 void VaapiVideoDecodeAccelerator::ReturnCurrInputBuffer_Locked() {
506 lock_
.AssertAcquired();
507 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
508 DCHECK(curr_input_buffer_
.get());
510 int32 id
= curr_input_buffer_
->id
;
511 curr_input_buffer_
.reset();
512 DVLOG(4) << "End of input buffer " << id
;
513 message_loop_
->PostTask(FROM_HERE
, base::Bind(
514 &Client::NotifyEndOfBitstreamBuffer
, client_
, id
));
516 --num_stream_bufs_at_decoder_
;
517 TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
518 num_stream_bufs_at_decoder_
);
521 // TODO(posciak): refactor the whole class to remove sleeping in wait for
522 // surfaces, and reschedule DecodeTask instead.
523 bool VaapiVideoDecodeAccelerator::WaitForSurfaces_Locked() {
524 lock_
.AssertAcquired();
525 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
527 while (available_va_surfaces_
.empty() &&
528 (state_
== kDecoding
|| state_
== kFlushing
|| state_
== kIdle
)) {
529 surfaces_available_
.Wait();
532 if (state_
!= kDecoding
&& state_
!= kFlushing
&& state_
!= kIdle
)
538 void VaapiVideoDecodeAccelerator::DecodeTask() {
539 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
540 TRACE_EVENT0("Video Decoder", "VAVDA::DecodeTask");
541 base::AutoLock
auto_lock(lock_
);
543 if (state_
!= kDecoding
)
547 DVLOG(4) << "Decode task";
549 // Try to decode what stream data is (still) in the decoder until we run out
551 while (GetInputBuffer_Locked()) {
552 DCHECK(curr_input_buffer_
.get());
554 AcceleratedVideoDecoder::DecodeResult res
;
556 // We are OK releasing the lock here, as decoder never calls our methods
557 // directly and we will reacquire the lock before looking at state again.
558 // This is the main decode function of the decoder and while keeping
559 // the lock for its duration would be fine, it would defeat the purpose
560 // of having a separate decoder thread.
561 base::AutoUnlock
auto_unlock(lock_
);
562 res
= decoder_
->Decode();
566 case AcceleratedVideoDecoder::kAllocateNewSurfaces
:
567 DVLOG(1) << "Decoder requesting a new set of surfaces";
568 message_loop_
->PostTask(FROM_HERE
, base::Bind(
569 &VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange
, weak_this_
,
570 decoder_
->GetRequiredNumOfPictures(),
571 decoder_
->GetPicSize()));
572 // We'll get rescheduled once ProvidePictureBuffers() finishes.
575 case AcceleratedVideoDecoder::kRanOutOfStreamData
:
576 ReturnCurrInputBuffer_Locked();
579 case AcceleratedVideoDecoder::kRanOutOfSurfaces
:
580 // No more output buffers in the decoder, try getting more or go to
581 // sleep waiting for them.
582 if (!WaitForSurfaces_Locked())
587 case AcceleratedVideoDecoder::kDecodeError
:
588 RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream",
595 void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(size_t num_pics
,
597 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
598 DCHECK(!awaiting_va_surfaces_recycle_
);
600 // At this point decoder has stopped running and has already posted onto our
601 // loop any remaining output request callbacks, which executed before we got
602 // here. Some of them might have been pended though, because we might not
603 // have had enough TFPictures to output surfaces to. Initiate a wait cycle,
604 // which will wait for client to return enough PictureBuffers to us, so that
605 // we can finish all pending output callbacks, releasing associated surfaces.
606 DVLOG(1) << "Initiating surface set change";
607 awaiting_va_surfaces_recycle_
= true;
609 requested_num_pics_
= num_pics
;
610 requested_pic_size_
= size
;
612 TryFinishSurfaceSetChange();
615 void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() {
616 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
618 if (!awaiting_va_surfaces_recycle_
)
621 if (!pending_output_cbs_
.empty() ||
622 pictures_
.size() != available_va_surfaces_
.size()) {
624 // 1. Not all pending pending output callbacks have been executed yet.
625 // Wait for the client to return enough pictures and retry later.
626 // 2. The above happened and all surface release callbacks have been posted
627 // as the result, but not all have executed yet. Post ourselves after them
628 // to let them release surfaces.
629 DVLOG(2) << "Awaiting pending output/surface release callbacks to finish";
630 message_loop_
->PostTask(FROM_HERE
, base::Bind(
631 &VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange
, weak_this_
));
635 // All surfaces released, destroy them and dismiss all PictureBuffers.
636 awaiting_va_surfaces_recycle_
= false;
637 available_va_surfaces_
.clear();
638 vaapi_wrapper_
->DestroySurfaces();
640 for (Pictures::iterator iter
= pictures_
.begin(); iter
!= pictures_
.end();
642 DVLOG(2) << "Dismissing picture id: " << iter
->first
;
644 client_
->DismissPictureBuffer(iter
->first
);
648 // And ask for a new set as requested.
649 DVLOG(1) << "Requesting " << requested_num_pics_
<< " pictures of size: "
650 << requested_pic_size_
.ToString();
652 message_loop_
->PostTask(
654 base::Bind(&Client::ProvidePictureBuffers
, client_
, requested_num_pics_
,
655 requested_pic_size_
, VaapiPicture::GetGLTextureTarget()));
658 void VaapiVideoDecodeAccelerator::Decode(
659 const media::BitstreamBuffer
& bitstream_buffer
) {
660 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
662 TRACE_EVENT1("Video Decoder", "VAVDA::Decode", "Buffer id",
663 bitstream_buffer
.id());
665 // We got a new input buffer from the client, map it and queue for later use.
666 MapAndQueueNewInputBuffer(bitstream_buffer
);
668 base::AutoLock
auto_lock(lock_
);
672 decoder_thread_task_runner_
->PostTask(
673 FROM_HERE
, base::Bind(&VaapiVideoDecodeAccelerator::DecodeTask
,
674 base::Unretained(this)));
678 // Decoder already running, fallthrough.
680 // When resetting, allow accumulating bitstream buffers, so that
681 // the client can queue after-seek-buffers while we are finishing with
682 // the before-seek one.
686 RETURN_AND_NOTIFY_ON_FAILURE(false,
687 "Decode request from client in invalid state: " << state_
,
693 void VaapiVideoDecodeAccelerator::RecycleVASurfaceID(
694 VASurfaceID va_surface_id
) {
695 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
696 base::AutoLock
auto_lock(lock_
);
698 available_va_surfaces_
.push_back(va_surface_id
);
699 surfaces_available_
.Signal();
702 void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
703 const std::vector
<media::PictureBuffer
>& buffers
) {
704 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
706 base::AutoLock
auto_lock(lock_
);
707 DCHECK(pictures_
.empty());
709 while (!output_buffers_
.empty())
710 output_buffers_
.pop();
712 RETURN_AND_NOTIFY_ON_FAILURE(
713 buffers
.size() >= requested_num_pics_
,
714 "Got an invalid number of picture buffers. (Got " << buffers
.size()
715 << ", requested " << requested_num_pics_
<< ")", INVALID_ARGUMENT
, );
716 DCHECK(requested_pic_size_
== buffers
[0].size());
718 std::vector
<VASurfaceID
> va_surface_ids
;
719 RETURN_AND_NOTIFY_ON_FAILURE(
720 vaapi_wrapper_
->CreateSurfaces(VA_RT_FORMAT_YUV420
, requested_pic_size_
,
721 buffers
.size(), &va_surface_ids
),
722 "Failed creating VA Surfaces", PLATFORM_FAILURE
, );
723 DCHECK_EQ(va_surface_ids
.size(), buffers
.size());
725 for (size_t i
= 0; i
< buffers
.size(); ++i
) {
726 DVLOG(2) << "Assigning picture id: " << buffers
[i
].id()
727 << " to texture id: " << buffers
[i
].texture_id()
728 << " VASurfaceID: " << va_surface_ids
[i
];
730 linked_ptr
<VaapiPicture
> picture(VaapiPicture::CreatePicture(
731 vaapi_wrapper_
.get(), make_context_current_
, buffers
[i
].id(),
732 buffers
[i
].texture_id(), requested_pic_size_
));
734 scoped_refptr
<gfx::GLImage
> image
= picture
->GetImageToBind();
736 bind_image_
.Run(buffers
[i
].internal_texture_id(),
737 VaapiPicture::GetGLTextureTarget(), image
);
740 RETURN_AND_NOTIFY_ON_FAILURE(
741 picture
.get(), "Failed assigning picture buffer to a texture.",
745 pictures_
.insert(std::make_pair(buffers
[i
].id(), picture
)).second
;
748 output_buffers_
.push(buffers
[i
].id());
749 available_va_surfaces_
.push_back(va_surface_ids
[i
]);
750 surfaces_available_
.Signal();
754 decoder_thread_task_runner_
->PostTask(
755 FROM_HERE
, base::Bind(&VaapiVideoDecodeAccelerator::DecodeTask
,
756 base::Unretained(this)));
759 void VaapiVideoDecodeAccelerator::ReusePictureBuffer(int32 picture_buffer_id
) {
760 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
761 TRACE_EVENT1("Video Decoder", "VAVDA::ReusePictureBuffer", "Picture id",
764 --num_frames_at_client_
;
765 TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_
);
767 output_buffers_
.push(picture_buffer_id
);
771 void VaapiVideoDecodeAccelerator::FlushTask() {
772 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
773 DVLOG(1) << "Flush task";
775 // First flush all the pictures that haven't been outputted, notifying the
776 // client to output them.
777 bool res
= decoder_
->Flush();
778 RETURN_AND_NOTIFY_ON_FAILURE(res
, "Failed flushing the decoder.",
781 // Put the decoder in idle state, ready to resume.
784 message_loop_
->PostTask(FROM_HERE
, base::Bind(
785 &VaapiVideoDecodeAccelerator::FinishFlush
, weak_this_
));
788 void VaapiVideoDecodeAccelerator::Flush() {
789 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
790 DVLOG(1) << "Got flush request";
792 base::AutoLock
auto_lock(lock_
);
794 // Queue a flush task after all existing decoding tasks to clean up.
795 decoder_thread_task_runner_
->PostTask(
796 FROM_HERE
, base::Bind(&VaapiVideoDecodeAccelerator::FlushTask
,
797 base::Unretained(this)));
799 input_ready_
.Signal();
800 surfaces_available_
.Signal();
803 void VaapiVideoDecodeAccelerator::FinishFlush() {
804 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
806 finish_flush_pending_
= false;
808 base::AutoLock
auto_lock(lock_
);
809 if (state_
!= kFlushing
) {
810 DCHECK_EQ(state_
, kDestroying
);
811 return; // We could've gotten destroyed already.
814 // Still waiting for textures from client to finish outputting all pending
815 // frames. Try again later.
816 if (!pending_output_cbs_
.empty()) {
817 finish_flush_pending_
= true;
823 message_loop_
->PostTask(FROM_HERE
, base::Bind(
824 &Client::NotifyFlushDone
, client_
));
826 DVLOG(1) << "Flush finished";
829 void VaapiVideoDecodeAccelerator::ResetTask() {
830 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
831 DVLOG(1) << "ResetTask";
833 // All the decoding tasks from before the reset request from client are done
834 // by now, as this task was scheduled after them and client is expected not
835 // to call Decode() after Reset() and before NotifyResetDone.
838 base::AutoLock
auto_lock(lock_
);
840 // Return current input buffer, if present.
841 if (curr_input_buffer_
.get())
842 ReturnCurrInputBuffer_Locked();
844 // And let client know that we are done with reset.
845 message_loop_
->PostTask(FROM_HERE
, base::Bind(
846 &VaapiVideoDecodeAccelerator::FinishReset
, weak_this_
));
849 void VaapiVideoDecodeAccelerator::Reset() {
850 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
851 DVLOG(1) << "Got reset request";
853 // This will make any new decode tasks exit early.
854 base::AutoLock
auto_lock(lock_
);
856 finish_flush_pending_
= false;
858 // Drop all remaining input buffers, if present.
859 while (!input_buffers_
.empty()) {
860 message_loop_
->PostTask(FROM_HERE
, base::Bind(
861 &Client::NotifyEndOfBitstreamBuffer
, client_
,
862 input_buffers_
.front()->id
));
863 input_buffers_
.pop();
866 decoder_thread_task_runner_
->PostTask(
867 FROM_HERE
, base::Bind(&VaapiVideoDecodeAccelerator::ResetTask
,
868 base::Unretained(this)));
870 input_ready_
.Signal();
871 surfaces_available_
.Signal();
874 void VaapiVideoDecodeAccelerator::FinishReset() {
875 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
876 DVLOG(1) << "FinishReset";
877 base::AutoLock
auto_lock(lock_
);
879 if (state_
!= kResetting
) {
880 DCHECK(state_
== kDestroying
|| state_
== kUninitialized
) << state_
;
881 return; // We could've gotten destroyed already.
884 // Drop pending outputs.
885 while (!pending_output_cbs_
.empty())
886 pending_output_cbs_
.pop();
888 if (awaiting_va_surfaces_recycle_
) {
889 // Decoder requested a new surface set while we were waiting for it to
890 // finish the last DecodeTask, running at the time of Reset().
891 // Let the surface set change finish first before resetting.
892 message_loop_
->PostTask(FROM_HERE
, base::Bind(
893 &VaapiVideoDecodeAccelerator::FinishReset
, weak_this_
));
897 num_stream_bufs_at_decoder_
= 0;
900 message_loop_
->PostTask(FROM_HERE
, base::Bind(
901 &Client::NotifyResetDone
, client_
));
903 // The client might have given us new buffers via Decode() while we were
904 // resetting and might be waiting for our move, and not call Decode() anymore
905 // until we return something. Post a DecodeTask() so that we won't
906 // sleep forever waiting for Decode() in that case. Having two of them
907 // in the pipe is harmless, the additional one will return as soon as it sees
908 // that we are back in kDecoding state.
909 if (!input_buffers_
.empty()) {
911 decoder_thread_task_runner_
->PostTask(
912 FROM_HERE
, base::Bind(&VaapiVideoDecodeAccelerator::DecodeTask
,
913 base::Unretained(this)));
916 DVLOG(1) << "Reset finished";
919 void VaapiVideoDecodeAccelerator::Cleanup() {
920 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
922 base::AutoLock
auto_lock(lock_
);
923 if (state_
== kUninitialized
|| state_
== kDestroying
)
926 DVLOG(1) << "Destroying VAVDA";
927 state_
= kDestroying
;
929 client_ptr_factory_
.reset();
930 weak_this_factory_
.InvalidateWeakPtrs();
932 // Signal all potential waiters on the decoder_thread_, let them early-exit,
933 // as we've just moved to the kDestroying state, and wait for all tasks
935 input_ready_
.Signal();
936 surfaces_available_
.Signal();
938 base::AutoUnlock
auto_unlock(lock_
);
939 decoder_thread_
.Stop();
942 state_
= kUninitialized
;
945 void VaapiVideoDecodeAccelerator::Destroy() {
946 DCHECK_EQ(message_loop_
, base::MessageLoop::current());
951 bool VaapiVideoDecodeAccelerator::CanDecodeOnIOThread() {
955 bool VaapiVideoDecodeAccelerator::DecodeSurface(
956 const scoped_refptr
<VaapiDecodeSurface
>& dec_surface
) {
957 if (!vaapi_wrapper_
->ExecuteAndDestroyPendingBuffers(
958 dec_surface
->va_surface()->id())) {
959 DVLOG(1) << "Failed decoding picture";
966 void VaapiVideoDecodeAccelerator::SurfaceReady(
967 const scoped_refptr
<VaapiDecodeSurface
>& dec_surface
) {
968 if (message_loop_
!= base::MessageLoop::current()) {
969 message_loop_
->PostTask(
970 FROM_HERE
, base::Bind(&VaapiVideoDecodeAccelerator::SurfaceReady
,
971 weak_this_
, dec_surface
));
975 DCHECK(!awaiting_va_surfaces_recycle_
);
978 base::AutoLock
auto_lock(lock_
);
979 // Drop any requests to output if we are resetting or being destroyed.
980 if (state_
== kResetting
|| state_
== kDestroying
)
984 pending_output_cbs_
.push(
985 base::Bind(&VaapiVideoDecodeAccelerator::OutputPicture
, weak_this_
,
986 dec_surface
->va_surface(), dec_surface
->bitstream_id()));
991 scoped_refptr
<VaapiVideoDecodeAccelerator::VaapiDecodeSurface
>
992 VaapiVideoDecodeAccelerator::CreateSurface() {
993 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
994 base::AutoLock
auto_lock(lock_
);
996 if (available_va_surfaces_
.empty())
999 DCHECK(!awaiting_va_surfaces_recycle_
);
1000 scoped_refptr
<VASurface
> va_surface(
1001 new VASurface(available_va_surfaces_
.front(), requested_pic_size_
,
1002 va_surface_release_cb_
));
1003 available_va_surfaces_
.pop_front();
1005 scoped_refptr
<VaapiDecodeSurface
> dec_surface
=
1006 new VaapiDecodeSurface(curr_input_buffer_
->id
, va_surface
);
1011 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::VaapiH264Accelerator(
1012 VaapiVideoDecodeAccelerator
* vaapi_dec
,
1013 VaapiWrapper
* vaapi_wrapper
)
1014 : vaapi_wrapper_(vaapi_wrapper
), vaapi_dec_(vaapi_dec
) {
1015 DCHECK(vaapi_wrapper_
);
1019 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::~VaapiH264Accelerator() {
1022 scoped_refptr
<H264Picture
>
1023 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::CreateH264Picture() {
1024 scoped_refptr
<VaapiDecodeSurface
> va_surface
= vaapi_dec_
->CreateSurface();
1028 return new VaapiH264Picture(va_surface
);
1031 // Fill |va_pic| with default/neutral values.
1032 static void InitVAPicture(VAPictureH264
* va_pic
) {
1033 memset(va_pic
, 0, sizeof(*va_pic
));
1034 va_pic
->picture_id
= VA_INVALID_ID
;
1035 va_pic
->flags
= VA_PICTURE_H264_INVALID
;
1038 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitFrameMetadata(
1039 const media::H264SPS
* sps
,
1040 const media::H264PPS
* pps
,
1042 const H264Picture::Vector
& ref_pic_listp0
,
1043 const H264Picture::Vector
& ref_pic_listb0
,
1044 const H264Picture::Vector
& ref_pic_listb1
,
1045 const scoped_refptr
<H264Picture
>& pic
) {
1046 VAPictureParameterBufferH264 pic_param
;
1047 memset(&pic_param
, 0, sizeof(pic_param
));
1049 #define FROM_SPS_TO_PP(a) pic_param.a = sps->a
1050 #define FROM_SPS_TO_PP2(a, b) pic_param.b = sps->a
1051 FROM_SPS_TO_PP2(pic_width_in_mbs_minus1
, picture_width_in_mbs_minus1
);
1052 // This assumes non-interlaced video
1053 FROM_SPS_TO_PP2(pic_height_in_map_units_minus1
, picture_height_in_mbs_minus1
);
1054 FROM_SPS_TO_PP(bit_depth_luma_minus8
);
1055 FROM_SPS_TO_PP(bit_depth_chroma_minus8
);
1056 #undef FROM_SPS_TO_PP
1057 #undef FROM_SPS_TO_PP2
1059 #define FROM_SPS_TO_PP_SF(a) pic_param.seq_fields.bits.a = sps->a
1060 #define FROM_SPS_TO_PP_SF2(a, b) pic_param.seq_fields.bits.b = sps->a
1061 FROM_SPS_TO_PP_SF(chroma_format_idc
);
1062 FROM_SPS_TO_PP_SF2(separate_colour_plane_flag
,
1063 residual_colour_transform_flag
);
1064 FROM_SPS_TO_PP_SF(gaps_in_frame_num_value_allowed_flag
);
1065 FROM_SPS_TO_PP_SF(frame_mbs_only_flag
);
1066 FROM_SPS_TO_PP_SF(mb_adaptive_frame_field_flag
);
1067 FROM_SPS_TO_PP_SF(direct_8x8_inference_flag
);
1068 pic_param
.seq_fields
.bits
.MinLumaBiPredSize8x8
= (sps
->level_idc
>= 31);
1069 FROM_SPS_TO_PP_SF(log2_max_frame_num_minus4
);
1070 FROM_SPS_TO_PP_SF(pic_order_cnt_type
);
1071 FROM_SPS_TO_PP_SF(log2_max_pic_order_cnt_lsb_minus4
);
1072 FROM_SPS_TO_PP_SF(delta_pic_order_always_zero_flag
);
1073 #undef FROM_SPS_TO_PP_SF
1074 #undef FROM_SPS_TO_PP_SF2
1076 #define FROM_PPS_TO_PP(a) pic_param.a = pps->a
1077 FROM_PPS_TO_PP(num_slice_groups_minus1
);
1078 pic_param
.slice_group_map_type
= 0;
1079 pic_param
.slice_group_change_rate_minus1
= 0;
1080 FROM_PPS_TO_PP(pic_init_qp_minus26
);
1081 FROM_PPS_TO_PP(pic_init_qs_minus26
);
1082 FROM_PPS_TO_PP(chroma_qp_index_offset
);
1083 FROM_PPS_TO_PP(second_chroma_qp_index_offset
);
1084 #undef FROM_PPS_TO_PP
1086 #define FROM_PPS_TO_PP_PF(a) pic_param.pic_fields.bits.a = pps->a
1087 #define FROM_PPS_TO_PP_PF2(a, b) pic_param.pic_fields.bits.b = pps->a
1088 FROM_PPS_TO_PP_PF(entropy_coding_mode_flag
);
1089 FROM_PPS_TO_PP_PF(weighted_pred_flag
);
1090 FROM_PPS_TO_PP_PF(weighted_bipred_idc
);
1091 FROM_PPS_TO_PP_PF(transform_8x8_mode_flag
);
1093 pic_param
.pic_fields
.bits
.field_pic_flag
= 0;
1094 FROM_PPS_TO_PP_PF(constrained_intra_pred_flag
);
1095 FROM_PPS_TO_PP_PF2(bottom_field_pic_order_in_frame_present_flag
,
1096 pic_order_present_flag
);
1097 FROM_PPS_TO_PP_PF(deblocking_filter_control_present_flag
);
1098 FROM_PPS_TO_PP_PF(redundant_pic_cnt_present_flag
);
1099 pic_param
.pic_fields
.bits
.reference_pic_flag
= pic
->ref
;
1100 #undef FROM_PPS_TO_PP_PF
1101 #undef FROM_PPS_TO_PP_PF2
1103 pic_param
.frame_num
= pic
->frame_num
;
1105 InitVAPicture(&pic_param
.CurrPic
);
1106 FillVAPicture(&pic_param
.CurrPic
, pic
);
1108 // Init reference pictures' array.
1109 for (int i
= 0; i
< 16; ++i
)
1110 InitVAPicture(&pic_param
.ReferenceFrames
[i
]);
1112 // And fill it with picture info from DPB.
1113 FillVARefFramesFromDPB(dpb
, pic_param
.ReferenceFrames
,
1114 arraysize(pic_param
.ReferenceFrames
));
1116 pic_param
.num_ref_frames
= sps
->max_num_ref_frames
;
1118 if (!vaapi_wrapper_
->SubmitBuffer(VAPictureParameterBufferType
,
1123 VAIQMatrixBufferH264 iq_matrix_buf
;
1124 memset(&iq_matrix_buf
, 0, sizeof(iq_matrix_buf
));
1126 if (pps
->pic_scaling_matrix_present_flag
) {
1127 for (int i
= 0; i
< 6; ++i
) {
1128 for (int j
= 0; j
< 16; ++j
)
1129 iq_matrix_buf
.ScalingList4x4
[i
][j
] = pps
->scaling_list4x4
[i
][j
];
1132 for (int i
= 0; i
< 2; ++i
) {
1133 for (int j
= 0; j
< 64; ++j
)
1134 iq_matrix_buf
.ScalingList8x8
[i
][j
] = pps
->scaling_list8x8
[i
][j
];
1137 for (int i
= 0; i
< 6; ++i
) {
1138 for (int j
= 0; j
< 16; ++j
)
1139 iq_matrix_buf
.ScalingList4x4
[i
][j
] = sps
->scaling_list4x4
[i
][j
];
1142 for (int i
= 0; i
< 2; ++i
) {
1143 for (int j
= 0; j
< 64; ++j
)
1144 iq_matrix_buf
.ScalingList8x8
[i
][j
] = sps
->scaling_list8x8
[i
][j
];
1148 return vaapi_wrapper_
->SubmitBuffer(VAIQMatrixBufferType
,
1149 sizeof(iq_matrix_buf
),
1153 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitSlice(
1154 const media::H264PPS
* pps
,
1155 const media::H264SliceHeader
* slice_hdr
,
1156 const H264Picture::Vector
& ref_pic_list0
,
1157 const H264Picture::Vector
& ref_pic_list1
,
1158 const scoped_refptr
<H264Picture
>& pic
,
1159 const uint8_t* data
,
1161 VASliceParameterBufferH264 slice_param
;
1162 memset(&slice_param
, 0, sizeof(slice_param
));
1164 slice_param
.slice_data_size
= slice_hdr
->nalu_size
;
1165 slice_param
.slice_data_offset
= 0;
1166 slice_param
.slice_data_flag
= VA_SLICE_DATA_FLAG_ALL
;
1167 slice_param
.slice_data_bit_offset
= slice_hdr
->header_bit_size
;
1169 #define SHDRToSP(a) slice_param.a = slice_hdr->a
1170 SHDRToSP(first_mb_in_slice
);
1171 slice_param
.slice_type
= slice_hdr
->slice_type
% 5;
1172 SHDRToSP(direct_spatial_mv_pred_flag
);
1174 // TODO posciak: make sure parser sets those even when override flags
1175 // in slice header is off.
1176 SHDRToSP(num_ref_idx_l0_active_minus1
);
1177 SHDRToSP(num_ref_idx_l1_active_minus1
);
1178 SHDRToSP(cabac_init_idc
);
1179 SHDRToSP(slice_qp_delta
);
1180 SHDRToSP(disable_deblocking_filter_idc
);
1181 SHDRToSP(slice_alpha_c0_offset_div2
);
1182 SHDRToSP(slice_beta_offset_div2
);
1184 if (((slice_hdr
->IsPSlice() || slice_hdr
->IsSPSlice()) &&
1185 pps
->weighted_pred_flag
) ||
1186 (slice_hdr
->IsBSlice() && pps
->weighted_bipred_idc
== 1)) {
1187 SHDRToSP(luma_log2_weight_denom
);
1188 SHDRToSP(chroma_log2_weight_denom
);
1190 SHDRToSP(luma_weight_l0_flag
);
1191 SHDRToSP(luma_weight_l1_flag
);
1193 SHDRToSP(chroma_weight_l0_flag
);
1194 SHDRToSP(chroma_weight_l1_flag
);
1196 for (int i
= 0; i
<= slice_param
.num_ref_idx_l0_active_minus1
; ++i
) {
1197 slice_param
.luma_weight_l0
[i
] =
1198 slice_hdr
->pred_weight_table_l0
.luma_weight
[i
];
1199 slice_param
.luma_offset_l0
[i
] =
1200 slice_hdr
->pred_weight_table_l0
.luma_offset
[i
];
1202 for (int j
= 0; j
< 2; ++j
) {
1203 slice_param
.chroma_weight_l0
[i
][j
] =
1204 slice_hdr
->pred_weight_table_l0
.chroma_weight
[i
][j
];
1205 slice_param
.chroma_offset_l0
[i
][j
] =
1206 slice_hdr
->pred_weight_table_l0
.chroma_offset
[i
][j
];
1210 if (slice_hdr
->IsBSlice()) {
1211 for (int i
= 0; i
<= slice_param
.num_ref_idx_l1_active_minus1
; ++i
) {
1212 slice_param
.luma_weight_l1
[i
] =
1213 slice_hdr
->pred_weight_table_l1
.luma_weight
[i
];
1214 slice_param
.luma_offset_l1
[i
] =
1215 slice_hdr
->pred_weight_table_l1
.luma_offset
[i
];
1217 for (int j
= 0; j
< 2; ++j
) {
1218 slice_param
.chroma_weight_l1
[i
][j
] =
1219 slice_hdr
->pred_weight_table_l1
.chroma_weight
[i
][j
];
1220 slice_param
.chroma_offset_l1
[i
][j
] =
1221 slice_hdr
->pred_weight_table_l1
.chroma_offset
[i
][j
];
1228 arraysize(slice_param
.RefPicList0
) == arraysize(slice_param
.RefPicList1
),
1229 "Invalid RefPicList sizes");
1231 for (size_t i
= 0; i
< arraysize(slice_param
.RefPicList0
); ++i
) {
1232 InitVAPicture(&slice_param
.RefPicList0
[i
]);
1233 InitVAPicture(&slice_param
.RefPicList1
[i
]);
1237 i
< ref_pic_list0
.size() && i
< arraysize(slice_param
.RefPicList0
);
1239 if (ref_pic_list0
[i
])
1240 FillVAPicture(&slice_param
.RefPicList0
[i
], ref_pic_list0
[i
]);
1243 i
< ref_pic_list1
.size() && i
< arraysize(slice_param
.RefPicList1
);
1245 if (ref_pic_list1
[i
])
1246 FillVAPicture(&slice_param
.RefPicList1
[i
], ref_pic_list1
[i
]);
1249 if (!vaapi_wrapper_
->SubmitBuffer(VASliceParameterBufferType
,
1250 sizeof(slice_param
),
1254 // Can't help it, blame libva...
1255 void* non_const_ptr
= const_cast<uint8
*>(data
);
1256 return vaapi_wrapper_
->SubmitBuffer(VASliceDataBufferType
, size
,
1260 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitDecode(
1261 const scoped_refptr
<H264Picture
>& pic
) {
1262 DVLOG(4) << "Decoding POC " << pic
->pic_order_cnt
;
1263 scoped_refptr
<VaapiDecodeSurface
> dec_surface
=
1264 H264PictureToVaapiDecodeSurface(pic
);
1266 return vaapi_dec_
->DecodeSurface(dec_surface
);
1269 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::OutputPicture(
1270 const scoped_refptr
<H264Picture
>& pic
) {
1271 scoped_refptr
<VaapiDecodeSurface
> dec_surface
=
1272 H264PictureToVaapiDecodeSurface(pic
);
1274 vaapi_dec_
->SurfaceReady(dec_surface
);
1279 void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::Reset() {
1280 vaapi_wrapper_
->DestroyPendingBuffers();
1283 scoped_refptr
<VaapiVideoDecodeAccelerator::VaapiDecodeSurface
>
1284 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::
1285 H264PictureToVaapiDecodeSurface(const scoped_refptr
<H264Picture
>& pic
) {
1286 VaapiH264Picture
* vaapi_pic
= pic
->AsVaapiH264Picture();
1288 return vaapi_pic
->dec_surface();
1291 void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::FillVAPicture(
1292 VAPictureH264
* va_pic
,
1293 scoped_refptr
<H264Picture
> pic
) {
1294 scoped_refptr
<VaapiDecodeSurface
> dec_surface
=
1295 H264PictureToVaapiDecodeSurface(pic
);
1297 va_pic
->picture_id
= dec_surface
->va_surface()->id();
1298 va_pic
->frame_idx
= pic
->frame_num
;
1301 switch (pic
->field
) {
1302 case H264Picture::FIELD_NONE
:
1304 case H264Picture::FIELD_TOP
:
1305 va_pic
->flags
|= VA_PICTURE_H264_TOP_FIELD
;
1307 case H264Picture::FIELD_BOTTOM
:
1308 va_pic
->flags
|= VA_PICTURE_H264_BOTTOM_FIELD
;
1313 va_pic
->flags
|= pic
->long_term
? VA_PICTURE_H264_LONG_TERM_REFERENCE
1314 : VA_PICTURE_H264_SHORT_TERM_REFERENCE
;
1317 va_pic
->TopFieldOrderCnt
= pic
->top_field_order_cnt
;
1318 va_pic
->BottomFieldOrderCnt
= pic
->bottom_field_order_cnt
;
1321 int VaapiVideoDecodeAccelerator::VaapiH264Accelerator::FillVARefFramesFromDPB(
1323 VAPictureH264
* va_pics
,
1325 H264Picture::Vector::const_reverse_iterator rit
;
1328 // Return reference frames in reverse order of insertion.
1329 // Libva does not document this, but other implementations (e.g. mplayer)
1330 // do it this way as well.
1331 for (rit
= dpb
.rbegin(), i
= 0; rit
!= dpb
.rend() && i
< num_pics
; ++rit
) {
1333 FillVAPicture(&va_pics
[i
++], *rit
);
1339 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::VaapiVP8Accelerator(
1340 VaapiVideoDecodeAccelerator
* vaapi_dec
,
1341 VaapiWrapper
* vaapi_wrapper
)
1342 : vaapi_wrapper_(vaapi_wrapper
), vaapi_dec_(vaapi_dec
) {
1343 DCHECK(vaapi_wrapper_
);
1347 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::~VaapiVP8Accelerator() {
1350 scoped_refptr
<VP8Picture
>
1351 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::CreateVP8Picture() {
1352 scoped_refptr
<VaapiDecodeSurface
> va_surface
= vaapi_dec_
->CreateSurface();
1356 return new VaapiVP8Picture(va_surface
);
1359 #define ARRAY_MEMCPY_CHECKED(to, from) \
1361 static_assert(sizeof(to) == sizeof(from), \
1362 #from " and " #to " arrays must be of same size"); \
1363 memcpy(to, from, sizeof(to)); \
1366 bool VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::SubmitDecode(
1367 const scoped_refptr
<VP8Picture
>& pic
,
1368 const media::Vp8FrameHeader
* frame_hdr
,
1369 const scoped_refptr
<VP8Picture
>& last_frame
,
1370 const scoped_refptr
<VP8Picture
>& golden_frame
,
1371 const scoped_refptr
<VP8Picture
>& alt_frame
) {
1372 VAIQMatrixBufferVP8 iq_matrix_buf
;
1373 memset(&iq_matrix_buf
, 0, sizeof(VAIQMatrixBufferVP8
));
1375 const media::Vp8SegmentationHeader
& sgmnt_hdr
= frame_hdr
->segmentation_hdr
;
1376 const media::Vp8QuantizationHeader
& quant_hdr
= frame_hdr
->quantization_hdr
;
1378 arraysize(iq_matrix_buf
.quantization_index
) == media::kMaxMBSegments
,
1379 "incorrect quantization matrix size");
1380 for (size_t i
= 0; i
< media::kMaxMBSegments
; ++i
) {
1381 int q
= quant_hdr
.y_ac_qi
;
1383 if (sgmnt_hdr
.segmentation_enabled
) {
1384 if (sgmnt_hdr
.segment_feature_mode
==
1385 media::Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE
)
1386 q
= sgmnt_hdr
.quantizer_update_value
[i
];
1388 q
+= sgmnt_hdr
.quantizer_update_value
[i
];
1391 #define CLAMP_Q(q) std::min(std::max(q, 0), 127)
1392 static_assert(arraysize(iq_matrix_buf
.quantization_index
[i
]) == 6,
1393 "incorrect quantization matrix size");
1394 iq_matrix_buf
.quantization_index
[i
][0] = CLAMP_Q(q
);
1395 iq_matrix_buf
.quantization_index
[i
][1] = CLAMP_Q(q
+ quant_hdr
.y_dc_delta
);
1396 iq_matrix_buf
.quantization_index
[i
][2] = CLAMP_Q(q
+ quant_hdr
.y2_dc_delta
);
1397 iq_matrix_buf
.quantization_index
[i
][3] = CLAMP_Q(q
+ quant_hdr
.y2_ac_delta
);
1398 iq_matrix_buf
.quantization_index
[i
][4] = CLAMP_Q(q
+ quant_hdr
.uv_dc_delta
);
1399 iq_matrix_buf
.quantization_index
[i
][5] = CLAMP_Q(q
+ quant_hdr
.uv_ac_delta
);
1403 if (!vaapi_wrapper_
->SubmitBuffer(VAIQMatrixBufferType
,
1404 sizeof(VAIQMatrixBufferVP8
),
1408 VAProbabilityDataBufferVP8 prob_buf
;
1409 memset(&prob_buf
, 0, sizeof(VAProbabilityDataBufferVP8
));
1411 const media::Vp8EntropyHeader
& entr_hdr
= frame_hdr
->entropy_hdr
;
1412 ARRAY_MEMCPY_CHECKED(prob_buf
.dct_coeff_probs
, entr_hdr
.coeff_probs
);
1414 if (!vaapi_wrapper_
->SubmitBuffer(VAProbabilityBufferType
,
1415 sizeof(VAProbabilityDataBufferVP8
),
1419 VAPictureParameterBufferVP8 pic_param
;
1420 memset(&pic_param
, 0, sizeof(VAPictureParameterBufferVP8
));
1421 pic_param
.frame_width
= frame_hdr
->width
;
1422 pic_param
.frame_height
= frame_hdr
->height
;
1425 scoped_refptr
<VaapiDecodeSurface
> last_frame_surface
=
1426 VP8PictureToVaapiDecodeSurface(last_frame
);
1427 pic_param
.last_ref_frame
= last_frame_surface
->va_surface()->id();
1429 pic_param
.last_ref_frame
= VA_INVALID_SURFACE
;
1433 scoped_refptr
<VaapiDecodeSurface
> golden_frame_surface
=
1434 VP8PictureToVaapiDecodeSurface(golden_frame
);
1435 pic_param
.golden_ref_frame
= golden_frame_surface
->va_surface()->id();
1437 pic_param
.golden_ref_frame
= VA_INVALID_SURFACE
;
1441 scoped_refptr
<VaapiDecodeSurface
> alt_frame_surface
=
1442 VP8PictureToVaapiDecodeSurface(alt_frame
);
1443 pic_param
.alt_ref_frame
= alt_frame_surface
->va_surface()->id();
1445 pic_param
.alt_ref_frame
= VA_INVALID_SURFACE
;
1448 pic_param
.out_of_loop_frame
= VA_INVALID_SURFACE
;
1450 const media::Vp8LoopFilterHeader
& lf_hdr
= frame_hdr
->loopfilter_hdr
;
1452 #define FHDR_TO_PP_PF(a, b) pic_param.pic_fields.bits.a = (b)
1453 FHDR_TO_PP_PF(key_frame
, frame_hdr
->IsKeyframe() ? 0 : 1);
1454 FHDR_TO_PP_PF(version
, frame_hdr
->version
);
1455 FHDR_TO_PP_PF(segmentation_enabled
, sgmnt_hdr
.segmentation_enabled
);
1456 FHDR_TO_PP_PF(update_mb_segmentation_map
,
1457 sgmnt_hdr
.update_mb_segmentation_map
);
1458 FHDR_TO_PP_PF(update_segment_feature_data
,
1459 sgmnt_hdr
.update_segment_feature_data
);
1460 FHDR_TO_PP_PF(filter_type
, lf_hdr
.type
);
1461 FHDR_TO_PP_PF(sharpness_level
, lf_hdr
.sharpness_level
);
1462 FHDR_TO_PP_PF(loop_filter_adj_enable
, lf_hdr
.loop_filter_adj_enable
);
1463 FHDR_TO_PP_PF(mode_ref_lf_delta_update
, lf_hdr
.mode_ref_lf_delta_update
);
1464 FHDR_TO_PP_PF(sign_bias_golden
, frame_hdr
->sign_bias_golden
);
1465 FHDR_TO_PP_PF(sign_bias_alternate
, frame_hdr
->sign_bias_alternate
);
1466 FHDR_TO_PP_PF(mb_no_coeff_skip
, frame_hdr
->mb_no_skip_coeff
);
1467 FHDR_TO_PP_PF(loop_filter_disable
, lf_hdr
.level
== 0);
1468 #undef FHDR_TO_PP_PF
1470 ARRAY_MEMCPY_CHECKED(pic_param
.mb_segment_tree_probs
, sgmnt_hdr
.segment_prob
);
1472 static_assert(arraysize(sgmnt_hdr
.lf_update_value
) ==
1473 arraysize(pic_param
.loop_filter_level
),
1474 "loop filter level arrays mismatch");
1475 for (size_t i
= 0; i
< arraysize(sgmnt_hdr
.lf_update_value
); ++i
) {
1476 int lf_level
= lf_hdr
.level
;
1477 if (sgmnt_hdr
.segmentation_enabled
) {
1478 if (sgmnt_hdr
.segment_feature_mode
==
1479 media::Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE
)
1480 lf_level
= sgmnt_hdr
.lf_update_value
[i
];
1482 lf_level
+= sgmnt_hdr
.lf_update_value
[i
];
1485 // Clamp to [0..63] range.
1486 lf_level
= std::min(std::max(lf_level
, 0), 63);
1487 pic_param
.loop_filter_level
[i
] = lf_level
;
1490 static_assert(arraysize(lf_hdr
.ref_frame_delta
) ==
1491 arraysize(pic_param
.loop_filter_deltas_ref_frame
) &&
1492 arraysize(lf_hdr
.mb_mode_delta
) ==
1493 arraysize(pic_param
.loop_filter_deltas_mode
) &&
1494 arraysize(lf_hdr
.ref_frame_delta
) ==
1495 arraysize(lf_hdr
.mb_mode_delta
),
1496 "loop filter deltas arrays size mismatch");
1497 for (size_t i
= 0; i
< arraysize(lf_hdr
.ref_frame_delta
); ++i
) {
1498 pic_param
.loop_filter_deltas_ref_frame
[i
] = lf_hdr
.ref_frame_delta
[i
];
1499 pic_param
.loop_filter_deltas_mode
[i
] = lf_hdr
.mb_mode_delta
[i
];
1502 #define FHDR_TO_PP(a) pic_param.a = frame_hdr->a
1503 FHDR_TO_PP(prob_skip_false
);
1504 FHDR_TO_PP(prob_intra
);
1505 FHDR_TO_PP(prob_last
);
1506 FHDR_TO_PP(prob_gf
);
1509 ARRAY_MEMCPY_CHECKED(pic_param
.y_mode_probs
, entr_hdr
.y_mode_probs
);
1510 ARRAY_MEMCPY_CHECKED(pic_param
.uv_mode_probs
, entr_hdr
.uv_mode_probs
);
1511 ARRAY_MEMCPY_CHECKED(pic_param
.mv_probs
, entr_hdr
.mv_probs
);
1513 pic_param
.bool_coder_ctx
.range
= frame_hdr
->bool_dec_range
;
1514 pic_param
.bool_coder_ctx
.value
= frame_hdr
->bool_dec_value
;
1515 pic_param
.bool_coder_ctx
.count
= frame_hdr
->bool_dec_count
;
1517 if (!vaapi_wrapper_
->SubmitBuffer(VAPictureParameterBufferType
,
1518 sizeof(pic_param
), &pic_param
))
1521 VASliceParameterBufferVP8 slice_param
;
1522 memset(&slice_param
, 0, sizeof(slice_param
));
1523 slice_param
.slice_data_size
= frame_hdr
->frame_size
;
1524 slice_param
.slice_data_offset
= frame_hdr
->first_part_offset
;
1525 slice_param
.slice_data_flag
= VA_SLICE_DATA_FLAG_ALL
;
1526 slice_param
.macroblock_offset
= frame_hdr
->macroblock_bit_offset
;
1527 // Number of DCT partitions plus control partition.
1528 slice_param
.num_of_partitions
= frame_hdr
->num_of_dct_partitions
+ 1;
1530 // Per VAAPI, this size only includes the size of the macroblock data in
1531 // the first partition (in bytes), so we have to subtract the header size.
1532 slice_param
.partition_size
[0] =
1533 frame_hdr
->first_part_size
- ((frame_hdr
->macroblock_bit_offset
+ 7) / 8);
1535 for (size_t i
= 0; i
< frame_hdr
->num_of_dct_partitions
; ++i
)
1536 slice_param
.partition_size
[i
+ 1] = frame_hdr
->dct_partition_sizes
[i
];
1538 if (!vaapi_wrapper_
->SubmitBuffer(VASliceParameterBufferType
,
1539 sizeof(VASliceParameterBufferVP8
),
1543 void* non_const_ptr
= const_cast<uint8
*>(frame_hdr
->data
);
1544 if (!vaapi_wrapper_
->SubmitBuffer(VASliceDataBufferType
,
1545 frame_hdr
->frame_size
,
1549 scoped_refptr
<VaapiDecodeSurface
> dec_surface
=
1550 VP8PictureToVaapiDecodeSurface(pic
);
1552 return vaapi_dec_
->DecodeSurface(dec_surface
);
1555 bool VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::OutputPicture(
1556 const scoped_refptr
<VP8Picture
>& pic
) {
1557 scoped_refptr
<VaapiDecodeSurface
> dec_surface
=
1558 VP8PictureToVaapiDecodeSurface(pic
);
1560 vaapi_dec_
->SurfaceReady(dec_surface
);
1564 scoped_refptr
<VaapiVideoDecodeAccelerator::VaapiDecodeSurface
>
1565 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::
1566 VP8PictureToVaapiDecodeSurface(const scoped_refptr
<VP8Picture
>& pic
) {
1567 VaapiVP8Picture
* vaapi_pic
= pic
->AsVaapiVP8Picture();
1569 return vaapi_pic
->dec_surface();
1572 VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::VaapiVP9Accelerator(
1573 VaapiVideoDecodeAccelerator
* vaapi_dec
,
1574 VaapiWrapper
* vaapi_wrapper
)
1575 : vaapi_wrapper_(vaapi_wrapper
), vaapi_dec_(vaapi_dec
) {
1576 DCHECK(vaapi_wrapper_
);
1580 VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::~VaapiVP9Accelerator() {}
1582 scoped_refptr
<VP9Picture
>
1583 VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::CreateVP9Picture() {
1584 scoped_refptr
<VaapiDecodeSurface
> va_surface
= vaapi_dec_
->CreateSurface();
1588 return new VaapiVP9Picture(va_surface
);
1591 bool VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::SubmitDecode(
1592 const scoped_refptr
<VP9Picture
>& pic
,
1593 const media::Vp9Segmentation
& seg
,
1594 const media::Vp9LoopFilter
& lf
,
1595 const std::vector
<scoped_refptr
<VP9Picture
>>& ref_pictures
) {
1596 VADecPictureParameterBufferVP9 pic_param
;
1597 memset(&pic_param
, 0, sizeof(pic_param
));
1599 const media::Vp9FrameHeader
* frame_hdr
= pic
->frame_hdr
.get();
1602 if (frame_hdr
->profile
!= 0) {
1603 DVLOG(1) << "Unsupported profile" << frame_hdr
->profile
;
1607 pic_param
.frame_width
= base::checked_cast
<uint16_t>(frame_hdr
->width
);
1608 pic_param
.frame_height
= base::checked_cast
<uint16_t>(frame_hdr
->height
);
1610 CHECK_EQ(ref_pictures
.size(), arraysize(pic_param
.reference_frames
));
1611 for (size_t i
= 0; i
< arraysize(pic_param
.reference_frames
); ++i
) {
1612 VASurfaceID va_surface_id
;
1613 if (ref_pictures
[i
]) {
1614 scoped_refptr
<VaapiDecodeSurface
> surface
=
1615 VP9PictureToVaapiDecodeSurface(ref_pictures
[i
]);
1616 va_surface_id
= surface
->va_surface()->id();
1618 va_surface_id
= VA_INVALID_SURFACE
;
1621 pic_param
.reference_frames
[i
] = va_surface_id
;
1624 #define FHDR_TO_PP_PF1(a) pic_param.pic_fields.bits.a = frame_hdr->a
1625 #define FHDR_TO_PP_PF2(a, b) pic_param.pic_fields.bits.a = b
1626 FHDR_TO_PP_PF2(subsampling_x
, frame_hdr
->subsampling_x
== 1);
1627 FHDR_TO_PP_PF2(subsampling_y
, frame_hdr
->subsampling_y
== 1);
1628 FHDR_TO_PP_PF2(frame_type
, frame_hdr
->IsKeyframe() ? 0 : 1);
1629 FHDR_TO_PP_PF1(show_frame
);
1630 FHDR_TO_PP_PF1(error_resilient_mode
);
1631 FHDR_TO_PP_PF1(intra_only
);
1632 FHDR_TO_PP_PF1(allow_high_precision_mv
);
1633 FHDR_TO_PP_PF2(mcomp_filter_type
, frame_hdr
->interp_filter
);
1634 FHDR_TO_PP_PF1(frame_parallel_decoding_mode
);
1635 FHDR_TO_PP_PF2(reset_frame_context
, frame_hdr
->reset_context
);
1636 FHDR_TO_PP_PF1(refresh_frame_context
);
1637 FHDR_TO_PP_PF1(frame_context_idx
);
1638 FHDR_TO_PP_PF2(segmentation_enabled
, seg
.enabled
);
1639 FHDR_TO_PP_PF2(segmentation_temporal_update
, seg
.temporal_update
);
1640 FHDR_TO_PP_PF2(segmentation_update_map
, seg
.update_map
);
1641 FHDR_TO_PP_PF2(last_ref_frame
, frame_hdr
->frame_refs
[0]);
1642 FHDR_TO_PP_PF2(last_ref_frame_sign_bias
, frame_hdr
->ref_sign_biases
[0]);
1643 FHDR_TO_PP_PF2(golden_ref_frame
, frame_hdr
->frame_refs
[1]);
1644 FHDR_TO_PP_PF2(golden_ref_frame_sign_bias
, frame_hdr
->ref_sign_biases
[1]);
1645 FHDR_TO_PP_PF2(alt_ref_frame
, frame_hdr
->frame_refs
[2]);
1646 FHDR_TO_PP_PF2(alt_ref_frame_sign_bias
, frame_hdr
->ref_sign_biases
[2]);
1647 FHDR_TO_PP_PF2(lossless_flag
, frame_hdr
->quant_params
.IsLossless());
1648 #undef FHDR_TO_PP_PF2
1649 #undef FHDR_TO_PP_PF1
1651 pic_param
.filter_level
= lf
.filter_level
;
1652 pic_param
.sharpness_level
= lf
.sharpness_level
;
1653 pic_param
.log2_tile_rows
= frame_hdr
->log2_tile_rows
;
1654 pic_param
.log2_tile_columns
= frame_hdr
->log2_tile_cols
;
1655 pic_param
.frame_header_length_in_bytes
= frame_hdr
->uncompressed_header_size
;
1656 pic_param
.first_partition_size
= frame_hdr
->first_partition_size
;
1658 ARRAY_MEMCPY_CHECKED(pic_param
.mb_segment_tree_probs
, seg
.tree_probs
);
1659 ARRAY_MEMCPY_CHECKED(pic_param
.segment_pred_probs
, seg
.pred_probs
);
1661 pic_param
.profile
= frame_hdr
->profile
;
1663 if (!vaapi_wrapper_
->SubmitBuffer(VAPictureParameterBufferType
,
1664 sizeof(pic_param
), &pic_param
))
1667 VASliceParameterBufferVP9 slice_param
;
1668 memset(&slice_param
, 0, sizeof(slice_param
));
1669 slice_param
.slice_data_size
= frame_hdr
->frame_size
;
1670 slice_param
.slice_data_offset
= 0;
1671 slice_param
.slice_data_flag
= VA_SLICE_DATA_FLAG_ALL
;
1673 static_assert(arraysize(media::Vp9Segmentation::feature_enabled
) ==
1674 arraysize(slice_param
.seg_param
),
1675 "seg_param array of incorrect size");
1676 for (size_t i
= 0; i
< arraysize(slice_param
.seg_param
); ++i
) {
1677 VASegmentParameterVP9
& seg_param
= slice_param
.seg_param
[i
];
1678 #define SEG_TO_SP_SF(a, b) seg_param.segment_flags.fields.a = b
1680 segment_reference_enabled
,
1681 seg
.FeatureEnabled(i
, media::Vp9Segmentation::SEG_LVL_REF_FRAME
));
1682 SEG_TO_SP_SF(segment_reference
,
1683 seg
.FeatureData(i
, media::Vp9Segmentation::SEG_LVL_REF_FRAME
));
1684 SEG_TO_SP_SF(segment_reference_skipped
,
1685 seg
.FeatureEnabled(i
, media::Vp9Segmentation::SEG_LVL_SKIP
));
1688 ARRAY_MEMCPY_CHECKED(seg_param
.filter_level
, lf
.lvl
[i
]);
1690 seg_param
.luma_dc_quant_scale
= seg
.y_dequant
[i
][0];
1691 seg_param
.luma_ac_quant_scale
= seg
.y_dequant
[i
][1];
1692 seg_param
.chroma_dc_quant_scale
= seg
.uv_dequant
[i
][0];
1693 seg_param
.chroma_ac_quant_scale
= seg
.uv_dequant
[i
][1];
1696 if (!vaapi_wrapper_
->SubmitBuffer(VASliceParameterBufferType
,
1697 sizeof(slice_param
), &slice_param
))
1700 void* non_const_ptr
= const_cast<uint8
*>(frame_hdr
->data
);
1701 if (!vaapi_wrapper_
->SubmitBuffer(VASliceDataBufferType
,
1702 frame_hdr
->frame_size
, non_const_ptr
))
1705 scoped_refptr
<VaapiDecodeSurface
> dec_surface
=
1706 VP9PictureToVaapiDecodeSurface(pic
);
1708 return vaapi_dec_
->DecodeSurface(dec_surface
);
1711 bool VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::OutputPicture(
1712 const scoped_refptr
<VP9Picture
>& pic
) {
1713 scoped_refptr
<VaapiDecodeSurface
> dec_surface
=
1714 VP9PictureToVaapiDecodeSurface(pic
);
1716 vaapi_dec_
->SurfaceReady(dec_surface
);
1720 scoped_refptr
<VaapiVideoDecodeAccelerator::VaapiDecodeSurface
>
1721 VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::
1722 VP9PictureToVaapiDecodeSurface(const scoped_refptr
<VP9Picture
>& pic
) {
1723 VaapiVP9Picture
* vaapi_pic
= pic
->AsVaapiVP9Picture();
1725 return vaapi_pic
->dec_surface();
1729 media::VideoDecodeAccelerator::SupportedProfiles
1730 VaapiVideoDecodeAccelerator::GetSupportedProfiles() {
1731 return VaapiWrapper::GetSupportedDecodeProfiles();
1734 } // namespace content