1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #include <linux/videodev2.h>
8 #include <sys/eventfd.h>
12 #include "base/bind.h"
13 #include "base/bind_helpers.h"
14 #include "base/callback.h"
15 #include "base/callback_helpers.h"
16 #include "base/command_line.h"
17 #include "base/numerics/safe_conversions.h"
18 #include "base/strings/stringprintf.h"
19 #include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
20 #include "media/base/bind_to_current_loop.h"
21 #include "media/base/media_switches.h"
22 #include "ui/gl/scoped_binders.h"
24 #define LOGF(level) LOG(level) << __FUNCTION__ << "(): "
25 #define DVLOGF(level) DVLOG(level) << __FUNCTION__ << "(): "
27 #define NOTIFY_ERROR(x) \
29 LOG(ERROR) << "Setting error state:" << x; \
33 #define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value) \
35 if (device_->Ioctl(type, arg) != 0) { \
36 PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << #type; \
41 #define IOCTL_OR_ERROR_RETURN(type, arg) \
42 IOCTL_OR_ERROR_RETURN_VALUE(type, arg, ((void)0))
44 #define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
45 IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false)
47 #define IOCTL_OR_LOG_ERROR(type, arg) \
49 if (device_->Ioctl(type, arg) != 0) \
50 PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << #type; \
55 class V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
56 : public base::RefCounted
<V4L2DecodeSurface
> {
58 using ReleaseCB
= base::Callback
<void(int)>;
60 V4L2DecodeSurface(int32 bitstream_id
,
63 const ReleaseCB
& release_cb
);
65 // Mark the surface as decoded. This will also release all references, as
66 // they are not needed anymore.
68 bool decoded() const { return decoded_
; }
70 int32
bitstream_id() const { return bitstream_id_
; }
71 int input_record() const { return input_record_
; }
72 int output_record() const { return output_record_
; }
73 uint32_t config_store() const { return config_store_
; }
75 // Take references to each reference surface and keep them until the
76 // target surface is decoded.
77 void SetReferenceSurfaces(
78 const std::vector
<scoped_refptr
<V4L2DecodeSurface
>>& ref_surfaces
);
80 std::string
ToString() const;
83 friend class base::RefCounted
<V4L2DecodeSurface
>;
89 uint32_t config_store_
;
92 ReleaseCB release_cb_
;
94 std::vector
<scoped_refptr
<V4L2DecodeSurface
>> reference_surfaces_
;
96 DISALLOW_COPY_AND_ASSIGN(V4L2DecodeSurface
);
99 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::V4L2DecodeSurface(
103 const ReleaseCB
& release_cb
)
104 : bitstream_id_(bitstream_id
),
105 input_record_(input_record
),
106 output_record_(output_record
),
107 config_store_(input_record
+ 1),
109 release_cb_(release_cb
) {
112 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::~V4L2DecodeSurface() {
113 DVLOGF(5) << "Releasing output record id=" << output_record_
;
114 release_cb_
.Run(output_record_
);
117 void V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::SetReferenceSurfaces(
118 const std::vector
<scoped_refptr
<V4L2DecodeSurface
>>& ref_surfaces
) {
119 DCHECK(reference_surfaces_
.empty());
120 reference_surfaces_
= ref_surfaces
;
123 void V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::SetDecoded() {
127 // We can now drop references to all reference surfaces for this surface
128 // as we are done with decoding.
129 reference_surfaces_
.clear();
132 std::string
V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::ToString()
135 base::StringAppendF(&out
, "Buffer %d -> %d. ", input_record_
, output_record_
);
136 base::StringAppendF(&out
, "Reference surfaces:");
137 for (const auto& ref
: reference_surfaces_
) {
138 DCHECK_NE(ref
->output_record(), output_record_
);
139 base::StringAppendF(&out
, " %d", ref
->output_record());
144 V4L2SliceVideoDecodeAccelerator::InputRecord::InputRecord()
152 V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord()
156 egl_image(EGL_NO_IMAGE_KHR
),
157 egl_sync(EGL_NO_SYNC_KHR
),
161 struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef
{
163 base::WeakPtr
<VideoDecodeAccelerator::Client
>& client
,
164 const scoped_refptr
<base::SingleThreadTaskRunner
>& client_task_runner
,
165 base::SharedMemory
* shm
,
168 ~BitstreamBufferRef();
169 const base::WeakPtr
<VideoDecodeAccelerator::Client
> client
;
170 const scoped_refptr
<base::SingleThreadTaskRunner
> client_task_runner
;
171 const scoped_ptr
<base::SharedMemory
> shm
;
174 const int32 input_id
;
177 V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
178 base::WeakPtr
<VideoDecodeAccelerator::Client
>& client
,
179 const scoped_refptr
<base::SingleThreadTaskRunner
>& client_task_runner
,
180 base::SharedMemory
* shm
,
184 client_task_runner(client_task_runner
),
191 V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() {
193 DVLOGF(5) << "returning input_id: " << input_id
;
194 client_task_runner
->PostTask(
196 base::Bind(&VideoDecodeAccelerator::Client::NotifyEndOfBitstreamBuffer
,
201 struct V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef
{
202 EGLSyncKHRRef(EGLDisplay egl_display
, EGLSyncKHR egl_sync
);
204 EGLDisplay
const egl_display
;
208 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::EGLSyncKHRRef(
209 EGLDisplay egl_display
,
211 : egl_display(egl_display
), egl_sync(egl_sync
) {
214 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::~EGLSyncKHRRef() {
215 // We don't check for eglDestroySyncKHR failures, because if we get here
216 // with a valid sync object, something went wrong and we are getting
218 if (egl_sync
!= EGL_NO_SYNC_KHR
)
219 eglDestroySyncKHR(egl_display
, egl_sync
);
222 struct V4L2SliceVideoDecodeAccelerator::PictureRecord
{
223 PictureRecord(bool cleared
, const media::Picture
& picture
);
225 bool cleared
; // Whether the texture is cleared and safe to render from.
226 media::Picture picture
; // The decoded picture.
229 V4L2SliceVideoDecodeAccelerator::PictureRecord::PictureRecord(
231 const media::Picture
& picture
)
232 : cleared(cleared
), picture(picture
) {
235 V4L2SliceVideoDecodeAccelerator::PictureRecord::~PictureRecord() {
238 class V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator
239 : public H264Decoder::H264Accelerator
{
241 V4L2H264Accelerator(V4L2SliceVideoDecodeAccelerator
* v4l2_dec
);
242 ~V4L2H264Accelerator() override
;
244 // H264Decoder::H264Accelerator implementation.
245 scoped_refptr
<H264Picture
> CreateH264Picture() override
;
247 bool SubmitFrameMetadata(const media::H264SPS
* sps
,
248 const media::H264PPS
* pps
,
250 const H264Picture::Vector
& ref_pic_listp0
,
251 const H264Picture::Vector
& ref_pic_listb0
,
252 const H264Picture::Vector
& ref_pic_listb1
,
253 const scoped_refptr
<H264Picture
>& pic
) override
;
255 bool SubmitSlice(const media::H264PPS
* pps
,
256 const media::H264SliceHeader
* slice_hdr
,
257 const H264Picture::Vector
& ref_pic_list0
,
258 const H264Picture::Vector
& ref_pic_list1
,
259 const scoped_refptr
<H264Picture
>& pic
,
261 size_t size
) override
;
263 bool SubmitDecode(const scoped_refptr
<H264Picture
>& pic
) override
;
264 bool OutputPicture(const scoped_refptr
<H264Picture
>& pic
) override
;
266 void Reset() override
;
269 // Max size of reference list.
270 static const size_t kDPBIndicesListSize
= 32;
271 void H264PictureListToDPBIndicesList(const H264Picture::Vector
& src_pic_list
,
272 uint8_t dst_list
[kDPBIndicesListSize
]);
274 void H264DPBToV4L2DPB(
276 std::vector
<scoped_refptr
<V4L2DecodeSurface
>>* ref_surfaces
);
278 scoped_refptr
<V4L2DecodeSurface
> H264PictureToV4L2DecodeSurface(
279 const scoped_refptr
<H264Picture
>& pic
);
282 V4L2SliceVideoDecodeAccelerator
* v4l2_dec_
;
284 // TODO(posciak): This should be queried from hardware once supported.
285 static const size_t kMaxSlices
= 16;
286 struct v4l2_ctrl_h264_slice_param v4l2_slice_params_
[kMaxSlices
];
287 struct v4l2_ctrl_h264_decode_param v4l2_decode_param_
;
289 DISALLOW_COPY_AND_ASSIGN(V4L2H264Accelerator
);
292 class V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator
293 : public VP8Decoder::VP8Accelerator
{
295 V4L2VP8Accelerator(V4L2SliceVideoDecodeAccelerator
* v4l2_dec
);
296 ~V4L2VP8Accelerator() override
;
298 // VP8Decoder::VP8Accelerator implementation.
299 scoped_refptr
<VP8Picture
> CreateVP8Picture() override
;
301 bool SubmitDecode(const scoped_refptr
<VP8Picture
>& pic
,
302 const media::Vp8FrameHeader
* frame_hdr
,
303 const scoped_refptr
<VP8Picture
>& last_frame
,
304 const scoped_refptr
<VP8Picture
>& golden_frame
,
305 const scoped_refptr
<VP8Picture
>& alt_frame
) override
;
307 bool OutputPicture(const scoped_refptr
<VP8Picture
>& pic
) override
;
310 scoped_refptr
<V4L2DecodeSurface
> VP8PictureToV4L2DecodeSurface(
311 const scoped_refptr
<VP8Picture
>& pic
);
313 V4L2SliceVideoDecodeAccelerator
* v4l2_dec_
;
315 DISALLOW_COPY_AND_ASSIGN(V4L2VP8Accelerator
);
318 // Codec-specific subclasses of software decoder picture classes.
319 // This allows us to keep decoders oblivious of our implementation details.
320 class V4L2H264Picture
: public H264Picture
{
322 V4L2H264Picture(const scoped_refptr
<
323 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>& dec_surface
);
325 V4L2H264Picture
* AsV4L2H264Picture() override
{ return this; }
326 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
332 ~V4L2H264Picture() override
;
334 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
337 DISALLOW_COPY_AND_ASSIGN(V4L2H264Picture
);
340 V4L2H264Picture::V4L2H264Picture(const scoped_refptr
<
341 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>& dec_surface
)
342 : dec_surface_(dec_surface
) {
345 V4L2H264Picture::~V4L2H264Picture() {
348 class V4L2VP8Picture
: public VP8Picture
{
350 V4L2VP8Picture(const scoped_refptr
<
351 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>& dec_surface
);
353 V4L2VP8Picture
* AsV4L2VP8Picture() override
{ return this; }
354 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
360 ~V4L2VP8Picture() override
;
362 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
365 DISALLOW_COPY_AND_ASSIGN(V4L2VP8Picture
);
368 V4L2VP8Picture::V4L2VP8Picture(const scoped_refptr
<
369 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>& dec_surface
)
370 : dec_surface_(dec_surface
) {
373 V4L2VP8Picture::~V4L2VP8Picture() {
376 V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
377 const scoped_refptr
<V4L2Device
>& device
,
378 EGLDisplay egl_display
,
379 EGLContext egl_context
,
380 const base::WeakPtr
<Client
>& io_client
,
381 const base::Callback
<bool(void)>& make_context_current
,
382 const scoped_refptr
<base::SingleThreadTaskRunner
>& io_task_runner
)
383 : input_planes_count_(0),
384 output_planes_count_(0),
385 child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
386 io_task_runner_(io_task_runner
),
387 io_client_(io_client
),
389 decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"),
390 device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"),
391 input_streamon_(false),
392 input_buffer_queued_count_(0),
393 output_streamon_(false),
394 output_buffer_queued_count_(0),
395 video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN
),
396 output_format_fourcc_(0),
397 state_(kUninitialized
),
398 decoder_flushing_(false),
399 decoder_resetting_(false),
400 surface_set_change_pending_(false),
401 picture_clearing_count_(0),
402 pictures_assigned_(false, false),
403 make_context_current_(make_context_current
),
404 egl_display_(egl_display
),
405 egl_context_(egl_context
),
406 weak_this_factory_(this) {
407 weak_this_
= weak_this_factory_
.GetWeakPtr();
410 V4L2SliceVideoDecodeAccelerator::~V4L2SliceVideoDecodeAccelerator() {
413 DCHECK(child_task_runner_
->BelongsToCurrentThread());
414 DCHECK(!decoder_thread_
.IsRunning());
415 DCHECK(!device_poll_thread_
.IsRunning());
417 DCHECK(input_buffer_map_
.empty());
418 DCHECK(output_buffer_map_
.empty());
421 void V4L2SliceVideoDecodeAccelerator::NotifyError(Error error
) {
422 if (!child_task_runner_
->BelongsToCurrentThread()) {
423 child_task_runner_
->PostTask(
424 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::NotifyError
,
430 client_
->NotifyError(error
);
431 client_ptr_factory_
.reset();
435 bool V4L2SliceVideoDecodeAccelerator::Initialize(
436 media::VideoCodecProfile profile
,
437 VideoDecodeAccelerator::Client
* client
) {
438 DVLOGF(3) << "profile: " << profile
;
439 DCHECK(child_task_runner_
->BelongsToCurrentThread());
440 DCHECK_EQ(state_
, kUninitialized
);
442 client_ptr_factory_
.reset(
443 new base::WeakPtrFactory
<VideoDecodeAccelerator::Client
>(client
));
444 client_
= client_ptr_factory_
->GetWeakPtr();
446 video_profile_
= profile
;
448 if (video_profile_
>= media::H264PROFILE_MIN
&&
449 video_profile_
<= media::H264PROFILE_MAX
) {
450 h264_accelerator_
.reset(new V4L2H264Accelerator(this));
451 decoder_
.reset(new H264Decoder(h264_accelerator_
.get()));
452 } else if (video_profile_
>= media::VP8PROFILE_MIN
&&
453 video_profile_
<= media::VP8PROFILE_MAX
) {
454 vp8_accelerator_
.reset(new V4L2VP8Accelerator(this));
455 decoder_
.reset(new VP8Decoder(vp8_accelerator_
.get()));
457 DLOG(ERROR
) << "Unsupported profile " << video_profile_
;
461 // TODO(posciak): This needs to be queried once supported.
462 input_planes_count_
= 1;
463 output_planes_count_
= 1;
465 if (egl_display_
== EGL_NO_DISPLAY
) {
466 LOG(ERROR
) << "Initialize(): could not get EGLDisplay";
470 // We need the context to be initialized to query extensions.
471 if (!make_context_current_
.Run()) {
472 LOG(ERROR
) << "Initialize(): could not make context current";
476 if (!gfx::g_driver_egl
.ext
.b_EGL_KHR_fence_sync
) {
477 LOG(ERROR
) << "Initialize(): context does not have EGL_KHR_fence_sync";
481 // Capabilities check.
482 struct v4l2_capability caps
;
483 const __u32 kCapsRequired
=
484 V4L2_CAP_VIDEO_CAPTURE_MPLANE
|
485 V4L2_CAP_VIDEO_OUTPUT_MPLANE
|
487 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP
, &caps
);
488 if ((caps
.capabilities
& kCapsRequired
) != kCapsRequired
) {
489 DLOG(ERROR
) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
490 ", caps check failed: 0x" << std::hex
<< caps
.capabilities
;
497 if (!decoder_thread_
.Start()) {
498 DLOG(ERROR
) << "Initialize(): device thread failed to start";
501 decoder_thread_task_runner_
= decoder_thread_
.task_runner();
503 state_
= kInitialized
;
505 // InitializeTask will NOTIFY_ERROR on failure.
506 decoder_thread_task_runner_
->PostTask(
507 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::InitializeTask
,
508 base::Unretained(this)));
510 DVLOGF(1) << "V4L2SliceVideoDecodeAccelerator initialized";
514 void V4L2SliceVideoDecodeAccelerator::InitializeTask() {
516 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
517 DCHECK_EQ(state_
, kInitialized
);
519 if (!CreateInputBuffers())
520 NOTIFY_ERROR(PLATFORM_FAILURE
);
522 // Output buffers will be created once decoder gives us information
523 // about their size and required count.
527 void V4L2SliceVideoDecodeAccelerator::Destroy() {
529 DCHECK(child_task_runner_
->BelongsToCurrentThread());
531 if (decoder_thread_
.IsRunning()) {
532 decoder_thread_task_runner_
->PostTask(
533 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DestroyTask
,
534 base::Unretained(this)));
536 // Wake up decoder thread in case we are waiting in CreateOutputBuffers
537 // for client to provide pictures. Since this is Destroy, we won't be
538 // getting them anymore (AssignPictureBuffers won't be called).
539 pictures_assigned_
.Signal();
541 // Wait for tasks to finish/early-exit.
542 decoder_thread_
.Stop();
546 DVLOGF(3) << "Destroyed";
549 void V4L2SliceVideoDecodeAccelerator::DestroyTask() {
551 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
557 decoder_current_bitstream_buffer_
.reset();
558 while (!decoder_input_queue_
.empty())
559 decoder_input_queue_
.pop();
561 // Stop streaming and the device_poll_thread_.
562 StopDevicePoll(false);
564 DestroyInputBuffers();
565 DestroyOutputs(false);
567 DCHECK(surfaces_at_device_
.empty());
568 DCHECK(surfaces_at_display_
.empty());
569 DCHECK(decoder_display_queue_
.empty());
572 bool V4L2SliceVideoDecodeAccelerator::SetupFormats() {
573 DCHECK_EQ(state_
, kUninitialized
);
575 __u32 input_format_fourcc
=
576 V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_
, true);
577 if (!input_format_fourcc
) {
583 gfx::Size max_resolution
, min_resolution
;
584 device_
->GetSupportedResolution(input_format_fourcc
, &min_resolution
,
586 if (max_resolution
.width() > 1920 && max_resolution
.height() > 1088)
587 input_size
= kInputBufferMaxSizeFor4k
;
589 input_size
= kInputBufferMaxSizeFor1080p
;
591 struct v4l2_format format
;
592 memset(&format
, 0, sizeof(format
));
593 format
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
594 format
.fmt
.pix_mp
.pixelformat
= input_format_fourcc
;
595 format
.fmt
.pix_mp
.plane_fmt
[0].sizeimage
= input_size
;
596 format
.fmt
.pix_mp
.num_planes
= input_planes_count_
;
597 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT
, &format
);
599 // We have to set up the format for output, because the driver may not allow
600 // changing it once we start streaming; whether it can support our chosen
601 // output format or not may depend on the input format.
602 struct v4l2_fmtdesc fmtdesc
;
603 memset(&fmtdesc
, 0, sizeof(fmtdesc
));
604 fmtdesc
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
605 output_format_fourcc_
= 0;
606 while (device_
->Ioctl(VIDIOC_ENUM_FMT
, &fmtdesc
) == 0) {
607 if (device_
->CanCreateEGLImageFrom(fmtdesc
.pixelformat
)) {
608 output_format_fourcc_
= fmtdesc
.pixelformat
;
614 if (output_format_fourcc_
== 0) {
615 LOG(ERROR
) << "Could not find a usable output format";
619 // Only set fourcc for output; resolution, etc., will come from the
620 // driver once it extracts it from the stream.
621 memset(&format
, 0, sizeof(format
));
622 format
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
623 format
.fmt
.pix_mp
.pixelformat
= output_format_fourcc_
;
624 format
.fmt
.pix_mp
.num_planes
= output_planes_count_
;
625 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT
, &format
);
630 bool V4L2SliceVideoDecodeAccelerator::CreateInputBuffers() {
632 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
633 DCHECK(!input_streamon_
);
634 DCHECK(input_buffer_map_
.empty());
636 struct v4l2_requestbuffers reqbufs
;
637 memset(&reqbufs
, 0, sizeof(reqbufs
));
638 reqbufs
.count
= kNumInputBuffers
;
639 reqbufs
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
640 reqbufs
.memory
= V4L2_MEMORY_MMAP
;
641 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS
, &reqbufs
);
642 if (reqbufs
.count
< kNumInputBuffers
) {
643 PLOG(ERROR
) << "Could not allocate enough output buffers";
646 input_buffer_map_
.resize(reqbufs
.count
);
647 for (size_t i
= 0; i
< input_buffer_map_
.size(); ++i
) {
648 free_input_buffers_
.push_back(i
);
650 // Query for the MEMORY_MMAP pointer.
651 struct v4l2_plane planes
[VIDEO_MAX_PLANES
];
652 struct v4l2_buffer buffer
;
653 memset(&buffer
, 0, sizeof(buffer
));
654 memset(planes
, 0, sizeof(planes
));
656 buffer
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
657 buffer
.memory
= V4L2_MEMORY_MMAP
;
658 buffer
.m
.planes
= planes
;
659 buffer
.length
= input_planes_count_
;
660 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF
, &buffer
);
661 void* address
= device_
->Mmap(nullptr,
662 buffer
.m
.planes
[0].length
,
663 PROT_READ
| PROT_WRITE
,
665 buffer
.m
.planes
[0].m
.mem_offset
);
666 if (address
== MAP_FAILED
) {
667 PLOG(ERROR
) << "CreateInputBuffers(): mmap() failed";
670 input_buffer_map_
[i
].address
= address
;
671 input_buffer_map_
[i
].length
= buffer
.m
.planes
[0].length
;
677 bool V4L2SliceVideoDecodeAccelerator::CreateOutputBuffers() {
679 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
680 DCHECK(!output_streamon_
);
681 DCHECK(output_buffer_map_
.empty());
682 DCHECK(surfaces_at_display_
.empty());
683 DCHECK(surfaces_at_device_
.empty());
685 visible_size_
= decoder_
->GetPicSize();
686 size_t num_pictures
= decoder_
->GetRequiredNumOfPictures();
688 DCHECK_GT(num_pictures
, 0u);
689 DCHECK(!visible_size_
.IsEmpty());
691 struct v4l2_format format
;
692 memset(&format
, 0, sizeof(format
));
693 format
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
694 format
.fmt
.pix_mp
.pixelformat
= output_format_fourcc_
;
695 format
.fmt
.pix_mp
.width
= visible_size_
.width();
696 format
.fmt
.pix_mp
.height
= visible_size_
.height();
697 format
.fmt
.pix_mp
.num_planes
= input_planes_count_
;
699 if (device_
->Ioctl(VIDIOC_S_FMT
, &format
) != 0) {
700 PLOG(ERROR
) << "Failed setting format to: " << output_format_fourcc_
;
701 NOTIFY_ERROR(PLATFORM_FAILURE
);
705 coded_size_
.SetSize(base::checked_cast
<int>(format
.fmt
.pix_mp
.width
),
706 base::checked_cast
<int>(format
.fmt
.pix_mp
.height
));
707 DCHECK_EQ(coded_size_
.width() % 16, 0);
708 DCHECK_EQ(coded_size_
.height() % 16, 0);
710 if (!gfx::Rect(coded_size_
).Contains(gfx::Rect(visible_size_
))) {
711 LOG(ERROR
) << "Got invalid adjusted coded size: " << coded_size_
.ToString();
715 struct v4l2_requestbuffers reqbufs
;
716 memset(&reqbufs
, 0, sizeof(reqbufs
));
717 reqbufs
.count
= num_pictures
;
718 reqbufs
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
719 reqbufs
.memory
= V4L2_MEMORY_MMAP
;
720 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS
, &reqbufs
);
722 if (reqbufs
.count
< num_pictures
) {
723 PLOG(ERROR
) << "Could not allocate enough output buffers";
727 output_buffer_map_
.resize(reqbufs
.count
);
729 DVLOGF(3) << "buffer_count=" << output_buffer_map_
.size()
730 << ", visible size=" << visible_size_
.ToString()
731 << ", coded size=" << coded_size_
.ToString();
733 child_task_runner_
->PostTask(
735 base::Bind(&VideoDecodeAccelerator::Client::ProvidePictureBuffers
,
736 client_
, output_buffer_map_
.size(), coded_size_
,
737 device_
->GetTextureTarget()));
739 // Wait for the client to call AssignPictureBuffers() on the Child thread.
740 // We do this, because if we continue decoding without finishing buffer
741 // allocation, we may end up Resetting before AssignPictureBuffers arrives,
742 // resulting in unnecessary complications and subtle bugs.
743 pictures_assigned_
.Wait();
748 void V4L2SliceVideoDecodeAccelerator::DestroyInputBuffers() {
750 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread() ||
751 !decoder_thread_
.IsRunning());
752 DCHECK(!input_streamon_
);
754 for (auto& input_record
: input_buffer_map_
) {
755 if (input_record
.address
!= nullptr)
756 device_
->Munmap(input_record
.address
, input_record
.length
);
759 struct v4l2_requestbuffers reqbufs
;
760 memset(&reqbufs
, 0, sizeof(reqbufs
));
762 reqbufs
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
763 reqbufs
.memory
= V4L2_MEMORY_MMAP
;
764 IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS
, &reqbufs
);
766 input_buffer_map_
.clear();
767 free_input_buffers_
.clear();
770 void V4L2SliceVideoDecodeAccelerator::DismissPictures(
771 std::vector
<int32
> picture_buffer_ids
,
772 base::WaitableEvent
* done
) {
774 DCHECK(child_task_runner_
->BelongsToCurrentThread());
776 for (auto picture_buffer_id
: picture_buffer_ids
) {
777 DVLOGF(1) << "dismissing PictureBuffer id=" << picture_buffer_id
;
778 client_
->DismissPictureBuffer(picture_buffer_id
);
784 void V4L2SliceVideoDecodeAccelerator::DevicePollTask(bool poll_device
) {
786 DCHECK_EQ(device_poll_thread_
.message_loop(), base::MessageLoop::current());
789 if (!device_
->Poll(poll_device
, &event_pending
)) {
790 NOTIFY_ERROR(PLATFORM_FAILURE
);
794 // All processing should happen on ServiceDeviceTask(), since we shouldn't
795 // touch encoder state from this thread.
796 decoder_thread_task_runner_
->PostTask(
797 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask
,
798 base::Unretained(this)));
801 void V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask() {
803 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
805 // ServiceDeviceTask() should only ever be scheduled from DevicePollTask().
808 SchedulePollIfNeeded();
811 void V4L2SliceVideoDecodeAccelerator::SchedulePollIfNeeded() {
812 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
814 if (!device_poll_thread_
.IsRunning()) {
815 DVLOGF(2) << "Device poll thread stopped, will not schedule poll";
819 DCHECK(input_streamon_
|| output_streamon_
);
821 if (input_buffer_queued_count_
+ output_buffer_queued_count_
== 0) {
822 DVLOGF(4) << "No buffers queued, will not schedule poll";
826 DVLOGF(4) << "Scheduling device poll task";
828 device_poll_thread_
.message_loop()->PostTask(
829 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask
,
830 base::Unretained(this), true));
832 DVLOGF(2) << "buffer counts: "
833 << "INPUT[" << decoder_input_queue_
.size() << "]"
835 << free_input_buffers_
.size() << "+"
836 << input_buffer_queued_count_
<< "/"
837 << input_buffer_map_
.size() << "]->["
838 << free_output_buffers_
.size() << "+"
839 << output_buffer_queued_count_
<< "/"
840 << output_buffer_map_
.size() << "]"
841 << " => DISPLAYQ[" << decoder_display_queue_
.size() << "]"
842 << " => CLIENT[" << surfaces_at_display_
.size() << "]";
845 void V4L2SliceVideoDecodeAccelerator::Enqueue(
846 const scoped_refptr
<V4L2DecodeSurface
>& dec_surface
) {
847 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
849 const int old_inputs_queued
= input_buffer_queued_count_
;
850 const int old_outputs_queued
= output_buffer_queued_count_
;
852 if (!EnqueueInputRecord(dec_surface
->input_record(),
853 dec_surface
->config_store())) {
854 DVLOGF(1) << "Failed queueing an input buffer";
855 NOTIFY_ERROR(PLATFORM_FAILURE
);
859 if (!EnqueueOutputRecord(dec_surface
->output_record())) {
860 DVLOGF(1) << "Failed queueing an output buffer";
861 NOTIFY_ERROR(PLATFORM_FAILURE
);
866 surfaces_at_device_
.insert(std::make_pair(dec_surface
->output_record(),
867 dec_surface
)).second
;
870 if (old_inputs_queued
== 0 && old_outputs_queued
== 0)
871 SchedulePollIfNeeded();
874 void V4L2SliceVideoDecodeAccelerator::Dequeue() {
876 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
878 struct v4l2_buffer dqbuf
;
879 struct v4l2_plane planes
[VIDEO_MAX_PLANES
];
880 while (input_buffer_queued_count_
> 0) {
881 DCHECK(input_streamon_
);
882 memset(&dqbuf
, 0, sizeof(dqbuf
));
883 memset(&planes
, 0, sizeof(planes
));
884 dqbuf
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
885 dqbuf
.memory
= V4L2_MEMORY_USERPTR
;
886 dqbuf
.m
.planes
= planes
;
887 dqbuf
.length
= input_planes_count_
;
888 if (device_
->Ioctl(VIDIOC_DQBUF
, &dqbuf
) != 0) {
889 if (errno
== EAGAIN
) {
890 // EAGAIN if we're just out of buffers to dequeue.
893 PLOG(ERROR
) << "ioctl() failed: VIDIOC_DQBUF";
894 NOTIFY_ERROR(PLATFORM_FAILURE
);
897 InputRecord
& input_record
= input_buffer_map_
[dqbuf
.index
];
898 DCHECK(input_record
.at_device
);
899 input_record
.at_device
= false;
900 ReuseInputBuffer(dqbuf
.index
);
901 input_buffer_queued_count_
--;
902 DVLOGF(4) << "Dequeued input=" << dqbuf
.index
903 << " count: " << input_buffer_queued_count_
;
906 while (output_buffer_queued_count_
> 0) {
907 DCHECK(output_streamon_
);
908 memset(&dqbuf
, 0, sizeof(dqbuf
));
909 memset(&planes
, 0, sizeof(planes
));
910 dqbuf
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
911 dqbuf
.memory
= V4L2_MEMORY_MMAP
;
912 dqbuf
.m
.planes
= planes
;
913 dqbuf
.length
= output_planes_count_
;
914 if (device_
->Ioctl(VIDIOC_DQBUF
, &dqbuf
) != 0) {
915 if (errno
== EAGAIN
) {
916 // EAGAIN if we're just out of buffers to dequeue.
919 PLOG(ERROR
) << "ioctl() failed: VIDIOC_DQBUF";
920 NOTIFY_ERROR(PLATFORM_FAILURE
);
923 OutputRecord
& output_record
= output_buffer_map_
[dqbuf
.index
];
924 DCHECK(output_record
.at_device
);
925 output_record
.at_device
= false;
926 output_buffer_queued_count_
--;
927 DVLOGF(3) << "Dequeued output=" << dqbuf
.index
928 << " count " << output_buffer_queued_count_
;
930 V4L2DecodeSurfaceByOutputId::iterator it
=
931 surfaces_at_device_
.find(dqbuf
.index
);
932 if (it
== surfaces_at_device_
.end()) {
933 DLOG(ERROR
) << "Got invalid surface from device.";
934 NOTIFY_ERROR(PLATFORM_FAILURE
);
937 it
->second
->SetDecoded();
938 surfaces_at_device_
.erase(it
);
941 // A frame was decoded, see if we can output it.
944 ProcessPendingEventsIfNeeded();
947 void V4L2SliceVideoDecodeAccelerator::ProcessPendingEventsIfNeeded() {
948 // Process pending events, if any, in the correct order.
949 // We always first process the surface set change, as it is an internal
950 // event from the decoder and interleaving it with external requests would
951 // put the decoder in an undefined state.
952 FinishSurfaceSetChangeIfNeeded();
954 // Process external (client) requests.
955 FinishFlushIfNeeded();
956 FinishResetIfNeeded();
959 void V4L2SliceVideoDecodeAccelerator::ReuseInputBuffer(int index
) {
960 DVLOGF(4) << "Reusing input buffer, index=" << index
;
961 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
963 DCHECK_LT(index
, static_cast<int>(input_buffer_map_
.size()));
964 InputRecord
& input_record
= input_buffer_map_
[index
];
966 DCHECK(!input_record
.at_device
);
967 input_record
.input_id
= -1;
968 input_record
.bytes_used
= 0;
970 DCHECK_EQ(std::count(free_input_buffers_
.begin(), free_input_buffers_
.end(),
972 free_input_buffers_
.push_back(index
);
975 void V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer(int index
) {
976 DVLOGF(4) << "Reusing output buffer, index=" << index
;
977 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
979 DCHECK_LT(index
, static_cast<int>(output_buffer_map_
.size()));
980 OutputRecord
& output_record
= output_buffer_map_
[index
];
981 DCHECK(!output_record
.at_device
);
982 DCHECK(!output_record
.at_client
);
984 DCHECK_EQ(std::count(free_output_buffers_
.begin(), free_output_buffers_
.end(),
986 free_output_buffers_
.push_back(index
);
988 ScheduleDecodeBufferTaskIfNeeded();
991 bool V4L2SliceVideoDecodeAccelerator::EnqueueInputRecord(
993 uint32_t config_store
) {
995 DCHECK_LT(index
, static_cast<int>(input_buffer_map_
.size()));
996 DCHECK_GT(config_store
, 0u);
998 // Enqueue an input (VIDEO_OUTPUT) buffer for an input video frame.
999 InputRecord
& input_record
= input_buffer_map_
[index
];
1000 DCHECK(!input_record
.at_device
);
1001 struct v4l2_buffer qbuf
;
1002 struct v4l2_plane qbuf_planes
[VIDEO_MAX_PLANES
];
1003 memset(&qbuf
, 0, sizeof(qbuf
));
1004 memset(qbuf_planes
, 0, sizeof(qbuf_planes
));
1006 qbuf
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
1007 qbuf
.memory
= V4L2_MEMORY_MMAP
;
1008 qbuf
.m
.planes
= qbuf_planes
;
1009 qbuf
.m
.planes
[0].bytesused
= input_record
.bytes_used
;
1010 qbuf
.length
= input_planes_count_
;
1011 qbuf
.config_store
= config_store
;
1012 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF
, &qbuf
);
1013 input_record
.at_device
= true;
1014 input_buffer_queued_count_
++;
1015 DVLOGF(4) << "Enqueued input=" << qbuf
.index
1016 << " count: " << input_buffer_queued_count_
;
1021 bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index
) {
1023 DCHECK_LT(index
, static_cast<int>(output_buffer_map_
.size()));
1025 // Enqueue an output (VIDEO_CAPTURE) buffer.
1026 OutputRecord
& output_record
= output_buffer_map_
[index
];
1027 DCHECK(!output_record
.at_device
);
1028 DCHECK(!output_record
.at_client
);
1029 DCHECK_NE(output_record
.egl_image
, EGL_NO_IMAGE_KHR
);
1030 DCHECK_NE(output_record
.picture_id
, -1);
1032 if (output_record
.egl_sync
!= EGL_NO_SYNC_KHR
) {
1033 // If we have to wait for completion, wait. Note that
1034 // free_output_buffers_ is a FIFO queue, so we always wait on the
1035 // buffer that has been in the queue the longest.
1036 if (eglClientWaitSyncKHR(egl_display_
, output_record
.egl_sync
, 0,
1037 EGL_FOREVER_KHR
) == EGL_FALSE
) {
1038 // This will cause tearing, but is safe otherwise.
1039 DVLOGF(1) << "eglClientWaitSyncKHR failed!";
1041 if (eglDestroySyncKHR(egl_display_
, output_record
.egl_sync
) != EGL_TRUE
) {
1042 LOGF(ERROR
) << "eglDestroySyncKHR failed!";
1043 NOTIFY_ERROR(PLATFORM_FAILURE
);
1046 output_record
.egl_sync
= EGL_NO_SYNC_KHR
;
1049 struct v4l2_buffer qbuf
;
1050 struct v4l2_plane qbuf_planes
[VIDEO_MAX_PLANES
];
1051 memset(&qbuf
, 0, sizeof(qbuf
));
1052 memset(qbuf_planes
, 0, sizeof(qbuf_planes
));
1054 qbuf
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1055 qbuf
.memory
= V4L2_MEMORY_MMAP
;
1056 qbuf
.m
.planes
= qbuf_planes
;
1057 qbuf
.length
= output_planes_count_
;
1058 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF
, &qbuf
);
1059 output_record
.at_device
= true;
1060 output_buffer_queued_count_
++;
1061 DVLOGF(4) << "Enqueued output=" << qbuf
.index
1062 << " count: " << output_buffer_queued_count_
;
1067 bool V4L2SliceVideoDecodeAccelerator::StartDevicePoll() {
1068 DVLOGF(3) << "Starting device poll";
1069 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1070 DCHECK(!device_poll_thread_
.IsRunning());
1072 // Start up the device poll thread and schedule its first DevicePollTask().
1073 if (!device_poll_thread_
.Start()) {
1074 DLOG(ERROR
) << "StartDevicePoll(): Device thread failed to start";
1075 NOTIFY_ERROR(PLATFORM_FAILURE
);
1078 if (!input_streamon_
) {
1079 __u32 type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
1080 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMON
, &type
);
1081 input_streamon_
= true;
1084 if (!output_streamon_
) {
1085 __u32 type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1086 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMON
, &type
);
1087 output_streamon_
= true;
1090 device_poll_thread_
.message_loop()->PostTask(
1091 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask
,
1092 base::Unretained(this), true));
1097 bool V4L2SliceVideoDecodeAccelerator::StopDevicePoll(bool keep_input_state
) {
1098 DVLOGF(3) << "Stopping device poll";
1099 if (decoder_thread_
.IsRunning())
1100 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1102 // Signal the DevicePollTask() to stop, and stop the device poll thread.
1103 if (!device_
->SetDevicePollInterrupt()) {
1104 PLOG(ERROR
) << "SetDevicePollInterrupt(): failed";
1105 NOTIFY_ERROR(PLATFORM_FAILURE
);
1108 device_poll_thread_
.Stop();
1109 DVLOGF(3) << "Device poll thread stopped";
1111 // Clear the interrupt now, to be sure.
1112 if (!device_
->ClearDevicePollInterrupt()) {
1113 NOTIFY_ERROR(PLATFORM_FAILURE
);
1117 if (!keep_input_state
) {
1118 if (input_streamon_
) {
1119 __u32 type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
1120 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF
, &type
);
1122 input_streamon_
= false;
1125 if (output_streamon_
) {
1126 __u32 type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1127 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF
, &type
);
1129 output_streamon_
= false;
1131 if (!keep_input_state
) {
1132 for (size_t i
= 0; i
< input_buffer_map_
.size(); ++i
) {
1133 InputRecord
& input_record
= input_buffer_map_
[i
];
1134 if (input_record
.at_device
) {
1135 input_record
.at_device
= false;
1136 ReuseInputBuffer(i
);
1137 input_buffer_queued_count_
--;
1140 DCHECK_EQ(input_buffer_queued_count_
, 0);
1143 // STREAMOFF makes the driver drop all buffers without decoding and DQBUFing,
1144 // so we mark them all as at_device = false and clear surfaces_at_device_.
1145 for (size_t i
= 0; i
< output_buffer_map_
.size(); ++i
) {
1146 OutputRecord
& output_record
= output_buffer_map_
[i
];
1147 if (output_record
.at_device
) {
1148 output_record
.at_device
= false;
1149 output_buffer_queued_count_
--;
1152 surfaces_at_device_
.clear();
1153 DCHECK_EQ(output_buffer_queued_count_
, 0);
1155 // Drop all surfaces that were awaiting decode before being displayed,
1156 // since we've just cancelled all outstanding decodes.
1157 while (!decoder_display_queue_
.empty())
1158 decoder_display_queue_
.pop();
1160 DVLOGF(3) << "Device poll stopped";
1164 void V4L2SliceVideoDecodeAccelerator::Decode(
1165 const media::BitstreamBuffer
& bitstream_buffer
) {
1166 DVLOGF(3) << "input_id=" << bitstream_buffer
.id()
1167 << ", size=" << bitstream_buffer
.size();
1168 DCHECK(io_task_runner_
->BelongsToCurrentThread());
1170 decoder_thread_task_runner_
->PostTask(
1171 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeTask
,
1172 base::Unretained(this), bitstream_buffer
));
1175 void V4L2SliceVideoDecodeAccelerator::DecodeTask(
1176 const media::BitstreamBuffer
& bitstream_buffer
) {
1177 DVLOGF(3) << "input_id=" << bitstream_buffer
.id()
1178 << " size=" << bitstream_buffer
.size();
1179 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1181 scoped_ptr
<BitstreamBufferRef
> bitstream_record(new BitstreamBufferRef(
1182 io_client_
, io_task_runner_
,
1183 new base::SharedMemory(bitstream_buffer
.handle(), true),
1184 bitstream_buffer
.size(), bitstream_buffer
.id()));
1185 if (!bitstream_record
->shm
->Map(bitstream_buffer
.size())) {
1186 LOGF(ERROR
) << "Could not map bitstream_buffer";
1187 NOTIFY_ERROR(UNREADABLE_INPUT
);
1190 DVLOGF(3) << "mapped at=" << bitstream_record
->shm
->memory();
1192 decoder_input_queue_
.push(
1193 linked_ptr
<BitstreamBufferRef
>(bitstream_record
.release()));
1195 ScheduleDecodeBufferTaskIfNeeded();
1198 bool V4L2SliceVideoDecodeAccelerator::TrySetNewBistreamBuffer() {
1199 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1200 DCHECK(!decoder_current_bitstream_buffer_
);
1202 if (decoder_input_queue_
.empty())
1205 decoder_current_bitstream_buffer_
.reset(
1206 decoder_input_queue_
.front().release());
1207 decoder_input_queue_
.pop();
1209 if (decoder_current_bitstream_buffer_
->input_id
== kFlushBufferId
) {
1210 // This is a buffer we queued for ourselves to trigger flush at this time.
1215 const uint8_t* const data
= reinterpret_cast<const uint8_t*>(
1216 decoder_current_bitstream_buffer_
->shm
->memory());
1217 const size_t data_size
= decoder_current_bitstream_buffer_
->size
;
1218 decoder_
->SetStream(data
, data_size
);
1223 void V4L2SliceVideoDecodeAccelerator::ScheduleDecodeBufferTaskIfNeeded() {
1224 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1225 if (state_
== kDecoding
) {
1226 decoder_thread_task_runner_
->PostTask(
1228 base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeBufferTask
,
1229 base::Unretained(this)));
1233 void V4L2SliceVideoDecodeAccelerator::DecodeBufferTask() {
1235 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1237 if (state_
!= kDecoding
) {
1238 DVLOGF(3) << "Early exit, not in kDecoding";
1243 AcceleratedVideoDecoder::DecodeResult res
;
1244 res
= decoder_
->Decode();
1246 case AcceleratedVideoDecoder::kAllocateNewSurfaces
:
1247 DVLOGF(2) << "Decoder requesting a new set of surfaces";
1248 InitiateSurfaceSetChange();
1251 case AcceleratedVideoDecoder::kRanOutOfStreamData
:
1252 decoder_current_bitstream_buffer_
.reset();
1253 if (!TrySetNewBistreamBuffer())
1258 case AcceleratedVideoDecoder::kRanOutOfSurfaces
:
1259 // No more surfaces for the decoder, we'll come back once we have more.
1260 DVLOGF(4) << "Ran out of surfaces";
1263 case AcceleratedVideoDecoder::kDecodeError
:
1264 DVLOGF(1) << "Error decoding stream";
1265 NOTIFY_ERROR(PLATFORM_FAILURE
);
1271 void V4L2SliceVideoDecodeAccelerator::InitiateSurfaceSetChange() {
1273 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1275 DCHECK_EQ(state_
, kDecoding
);
1278 DCHECK(!surface_set_change_pending_
);
1279 surface_set_change_pending_
= true;
1281 FinishSurfaceSetChangeIfNeeded();
1284 void V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChangeIfNeeded() {
1286 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1288 if (!surface_set_change_pending_
|| !surfaces_at_device_
.empty())
1291 DCHECK_EQ(state_
, kIdle
);
1292 DCHECK(decoder_display_queue_
.empty());
1293 // All output buffers should've been returned from decoder and device by now.
1294 // The only remaining owner of surfaces may be display (client), and we will
1295 // dismiss them when destroying output buffers below.
1296 DCHECK_EQ(free_output_buffers_
.size() + surfaces_at_display_
.size(),
1297 output_buffer_map_
.size());
1299 // Keep input queue running while we switch outputs.
1300 if (!StopDevicePoll(true)) {
1301 NOTIFY_ERROR(PLATFORM_FAILURE
);
1305 // This will return only once all buffers are dismissed and destroyed.
1306 // This does not wait until they are displayed however, as display retains
1307 // references to the buffers bound to textures and will release them
1308 // after displaying.
1309 if (!DestroyOutputs(true)) {
1310 NOTIFY_ERROR(PLATFORM_FAILURE
);
1314 if (!CreateOutputBuffers()) {
1315 NOTIFY_ERROR(PLATFORM_FAILURE
);
1319 if (!StartDevicePoll()) {
1320 NOTIFY_ERROR(PLATFORM_FAILURE
);
1324 DVLOGF(3) << "Surface set change finished";
1326 surface_set_change_pending_
= false;
1328 ScheduleDecodeBufferTaskIfNeeded();
1331 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss
) {
1333 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1334 std::vector
<EGLImageKHR
> egl_images_to_destroy
;
1335 std::vector
<int32
> picture_buffers_to_dismiss
;
1337 if (output_buffer_map_
.empty())
1340 for (auto output_record
: output_buffer_map_
) {
1341 DCHECK(!output_record
.at_device
);
1343 if (output_record
.egl_sync
!= EGL_NO_SYNC_KHR
) {
1344 if (eglDestroySyncKHR(egl_display_
, output_record
.egl_sync
) != EGL_TRUE
)
1345 DVLOGF(1) << "eglDestroySyncKHR failed.";
1348 if (output_record
.egl_image
!= EGL_NO_IMAGE_KHR
) {
1349 child_task_runner_
->PostTask(
1351 base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage
), device_
,
1352 egl_display_
, output_record
.egl_image
));
1355 picture_buffers_to_dismiss
.push_back(output_record
.picture_id
);
1359 DVLOGF(2) << "Scheduling picture dismissal";
1360 base::WaitableEvent
done(false, false);
1361 child_task_runner_
->PostTask(
1362 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DismissPictures
,
1363 weak_this_
, picture_buffers_to_dismiss
, &done
));
1367 // At this point client can't call ReusePictureBuffer on any of the pictures
1368 // anymore, so it's safe to destroy.
1369 return DestroyOutputBuffers();
1372 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputBuffers() {
1374 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread() ||
1375 !decoder_thread_
.IsRunning());
1376 DCHECK(!output_streamon_
);
1377 DCHECK(surfaces_at_device_
.empty());
1378 DCHECK(decoder_display_queue_
.empty());
1379 DCHECK_EQ(surfaces_at_display_
.size() + free_output_buffers_
.size(),
1380 output_buffer_map_
.size());
1382 if (output_buffer_map_
.empty())
1385 // It's ok to do this, client will retain references to textures, but we are
1386 // not interested in reusing the surfaces anymore.
1387 // This will prevent us from reusing old surfaces in case we have some
1388 // ReusePictureBuffer() pending on ChildThread already. It's ok to ignore
1389 // them, because we have already dismissed them (in DestroyOutputs()).
1390 for (const auto& surface_at_display
: surfaces_at_display_
) {
1391 size_t index
= surface_at_display
.second
->output_record();
1392 DCHECK_LT(index
, output_buffer_map_
.size());
1393 OutputRecord
& output_record
= output_buffer_map_
[index
];
1394 DCHECK(output_record
.at_client
);
1395 output_record
.at_client
= false;
1397 surfaces_at_display_
.clear();
1398 DCHECK_EQ(free_output_buffers_
.size(), output_buffer_map_
.size());
1400 free_output_buffers_
.clear();
1401 output_buffer_map_
.clear();
1403 struct v4l2_requestbuffers reqbufs
;
1404 memset(&reqbufs
, 0, sizeof(reqbufs
));
1406 reqbufs
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1407 reqbufs
.memory
= V4L2_MEMORY_MMAP
;
1408 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS
, &reqbufs
);
1413 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
1414 const std::vector
<media::PictureBuffer
>& buffers
) {
1416 DCHECK(child_task_runner_
->BelongsToCurrentThread());
1418 if (buffers
.size() != output_buffer_map_
.size()) {
1419 DLOG(ERROR
) << "Failed to provide requested picture buffers. "
1420 << "(Got " << buffers
.size()
1421 << ", requested " << output_buffer_map_
.size() << ")";
1422 NOTIFY_ERROR(INVALID_ARGUMENT
);
1426 if (!make_context_current_
.Run()) {
1427 DLOG(ERROR
) << "could not make context current";
1428 NOTIFY_ERROR(PLATFORM_FAILURE
);
1432 gfx::ScopedTextureBinder
bind_restore(GL_TEXTURE_EXTERNAL_OES
, 0);
1434 // It's safe to manipulate all the buffer state here, because the decoder
1435 // thread is waiting on pictures_assigned_.
1436 DCHECK(free_output_buffers_
.empty());
1437 for (size_t i
= 0; i
< output_buffer_map_
.size(); ++i
) {
1438 DCHECK(buffers
[i
].size() == coded_size_
);
1440 OutputRecord
& output_record
= output_buffer_map_
[i
];
1441 DCHECK(!output_record
.at_device
);
1442 DCHECK(!output_record
.at_client
);
1443 DCHECK_EQ(output_record
.egl_image
, EGL_NO_IMAGE_KHR
);
1444 DCHECK_EQ(output_record
.egl_sync
, EGL_NO_SYNC_KHR
);
1445 DCHECK_EQ(output_record
.picture_id
, -1);
1446 DCHECK_EQ(output_record
.cleared
, false);
1448 EGLImageKHR egl_image
= device_
->CreateEGLImage(egl_display_
,
1450 buffers
[i
].texture_id(),
1453 output_format_fourcc_
,
1454 output_planes_count_
);
1455 if (egl_image
== EGL_NO_IMAGE_KHR
) {
1456 LOGF(ERROR
) << "Could not create EGLImageKHR";
1457 // Ownership of EGLImages allocated in previous iterations of this loop
1458 // has been transferred to output_buffer_map_. After we error-out here
1459 // the destructor will handle their cleanup.
1460 NOTIFY_ERROR(PLATFORM_FAILURE
);
1464 output_record
.egl_image
= egl_image
;
1465 output_record
.picture_id
= buffers
[i
].id();
1466 free_output_buffers_
.push_back(i
);
1467 DVLOGF(3) << "buffer[" << i
<< "]: picture_id=" << output_record
.picture_id
;
1470 pictures_assigned_
.Signal();
1473 void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
1474 int32 picture_buffer_id
) {
1475 DCHECK(child_task_runner_
->BelongsToCurrentThread());
1476 DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id
;
1478 if (!make_context_current_
.Run()) {
1479 LOGF(ERROR
) << "could not make context current";
1480 NOTIFY_ERROR(PLATFORM_FAILURE
);
1484 EGLSyncKHR egl_sync
=
1485 eglCreateSyncKHR(egl_display_
, EGL_SYNC_FENCE_KHR
, NULL
);
1486 if (egl_sync
== EGL_NO_SYNC_KHR
) {
1487 LOGF(ERROR
) << "eglCreateSyncKHR() failed";
1488 NOTIFY_ERROR(PLATFORM_FAILURE
);
1492 scoped_ptr
<EGLSyncKHRRef
> egl_sync_ref(
1493 new EGLSyncKHRRef(egl_display_
, egl_sync
));
1494 decoder_thread_task_runner_
->PostTask(
1496 base::Bind(&V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask
,
1497 base::Unretained(this), picture_buffer_id
,
1498 base::Passed(&egl_sync_ref
)));
1501 void V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask(
1502 int32 picture_buffer_id
,
1503 scoped_ptr
<EGLSyncKHRRef
> egl_sync_ref
) {
1504 DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id
;
1505 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1507 V4L2DecodeSurfaceByPictureBufferId::iterator it
=
1508 surfaces_at_display_
.find(picture_buffer_id
);
1509 if (it
== surfaces_at_display_
.end()) {
1510 // It's possible that we've already posted a DismissPictureBuffer for this
1511 // picture, but it has not yet executed when this ReusePictureBuffer was
1512 // posted to us by the client. In that case just ignore this (we've already
1513 // dismissed it and accounted for that) and let the sync object get
1515 DVLOGF(3) << "got picture id=" << picture_buffer_id
1516 << " not in use (anymore?).";
1520 OutputRecord
& output_record
= output_buffer_map_
[it
->second
->output_record()];
1521 if (output_record
.at_device
|| !output_record
.at_client
) {
1522 DVLOGF(1) << "picture_buffer_id not reusable";
1523 NOTIFY_ERROR(INVALID_ARGUMENT
);
1527 DCHECK_EQ(output_record
.egl_sync
, EGL_NO_SYNC_KHR
);
1528 DCHECK(!output_record
.at_device
);
1529 output_record
.at_client
= false;
1530 output_record
.egl_sync
= egl_sync_ref
->egl_sync
;
1531 // Take ownership of the EGLSync.
1532 egl_sync_ref
->egl_sync
= EGL_NO_SYNC_KHR
;
1533 surfaces_at_display_
.erase(it
);
1536 void V4L2SliceVideoDecodeAccelerator::Flush() {
1538 DCHECK(child_task_runner_
->BelongsToCurrentThread());
1540 decoder_thread_task_runner_
->PostTask(
1541 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::FlushTask
,
1542 base::Unretained(this)));
1545 void V4L2SliceVideoDecodeAccelerator::FlushTask() {
1547 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1549 if (!decoder_input_queue_
.empty()) {
1550 // We are not done with pending inputs, so queue an empty buffer,
1551 // which - when reached - will trigger flush sequence.
1552 decoder_input_queue_
.push(
1553 linked_ptr
<BitstreamBufferRef
>(new BitstreamBufferRef(
1554 io_client_
, io_task_runner_
, nullptr, 0, kFlushBufferId
)));
1558 // No more inputs pending, so just finish flushing here.
1562 void V4L2SliceVideoDecodeAccelerator::InitiateFlush() {
1564 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1566 DCHECK(!decoder_flushing_
);
1567 DCHECK_EQ(state_
, kDecoding
);
1570 // This will trigger output for all remaining surfaces in the decoder.
1571 // However, not all of them may be decoded yet (they would be queued
1572 // in hardware then).
1573 if (!decoder_
->Flush()) {
1574 DVLOGF(1) << "Failed flushing the decoder.";
1575 NOTIFY_ERROR(PLATFORM_FAILURE
);
1579 // Put the decoder in an idle state, ready to resume.
1582 decoder_flushing_
= true;
1584 decoder_thread_task_runner_
->PostTask(
1586 base::Bind(&V4L2SliceVideoDecodeAccelerator::FinishFlushIfNeeded
,
1587 base::Unretained(this)));
1590 void V4L2SliceVideoDecodeAccelerator::FinishFlushIfNeeded() {
1592 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1594 if (!decoder_flushing_
|| !surfaces_at_device_
.empty())
1597 DCHECK_EQ(state_
, kIdle
);
1599 // At this point, all remaining surfaces are decoded and dequeued, and since
1600 // we have already scheduled output for them in InitiateFlush(), their
1601 // respective PictureReady calls have been posted (or they have been queued on
1602 // pending_picture_ready_). So at this time, once we SendPictureReady(),
1603 // we will have all remaining PictureReady() posted to the client and we
1604 // can post NotifyFlushDone().
1605 DCHECK(decoder_display_queue_
.empty());
1607 // Decoder should have already returned all surfaces and all surfaces are
1608 // out of hardware. There can be no other owners of input buffers.
1609 DCHECK_EQ(free_input_buffers_
.size(), input_buffer_map_
.size());
1613 child_task_runner_
->PostTask(FROM_HERE
,
1614 base::Bind(&Client::NotifyFlushDone
, client_
));
1616 decoder_flushing_
= false;
1618 DVLOGF(3) << "Flush finished";
1620 ScheduleDecodeBufferTaskIfNeeded();
1623 void V4L2SliceVideoDecodeAccelerator::Reset() {
1625 DCHECK(child_task_runner_
->BelongsToCurrentThread());
1627 decoder_thread_task_runner_
->PostTask(
1628 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::ResetTask
,
1629 base::Unretained(this)));
1632 void V4L2SliceVideoDecodeAccelerator::ResetTask() {
1634 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1636 if (decoder_resetting_
) {
1637 // This is a bug in the client, multiple Reset()s before NotifyResetDone()
1639 NOTREACHED() << "Client should not be requesting multiple Reset()s";
1643 DCHECK_EQ(state_
, kDecoding
);
1646 // Put the decoder in an idle state, ready to resume.
1649 decoder_resetting_
= true;
1651 // Drop all remaining inputs.
1652 decoder_current_bitstream_buffer_
.reset();
1653 while (!decoder_input_queue_
.empty())
1654 decoder_input_queue_
.pop();
1656 FinishResetIfNeeded();
1659 void V4L2SliceVideoDecodeAccelerator::FinishResetIfNeeded() {
1661 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1663 if (!decoder_resetting_
|| !surfaces_at_device_
.empty())
1666 DCHECK_EQ(state_
, kIdle
);
1667 DCHECK(!decoder_flushing_
);
1670 // Drop any pending outputs.
1671 while (!decoder_display_queue_
.empty())
1672 decoder_display_queue_
.pop();
1674 // At this point we can have no input buffers in the decoder, because we
1675 // Reset()ed it in ResetTask(), and have not scheduled any new Decode()s
1676 // having been in kIdle since. We don't have any surfaces in the HW either -
1677 // we just checked that surfaces_at_device_.empty(), and inputs are tied
1678 // to surfaces. Since there can be no other owners of input buffers, we can
1679 // simply mark them all as available.
1680 DCHECK_EQ(input_buffer_queued_count_
, 0);
1681 free_input_buffers_
.clear();
1682 for (size_t i
= 0; i
< input_buffer_map_
.size(); ++i
) {
1683 DCHECK(!input_buffer_map_
[i
].at_device
);
1684 ReuseInputBuffer(i
);
1687 decoder_resetting_
= false;
1689 child_task_runner_
->PostTask(FROM_HERE
,
1690 base::Bind(&Client::NotifyResetDone
, client_
));
1692 DVLOGF(3) << "Reset finished";
1695 ScheduleDecodeBufferTaskIfNeeded();
1698 void V4L2SliceVideoDecodeAccelerator::SetErrorState(Error error
) {
1699 // We can touch decoder_state_ only if this is the decoder thread or the
1700 // decoder thread isn't running.
1701 if (decoder_thread_
.IsRunning() &&
1702 !decoder_thread_task_runner_
->BelongsToCurrentThread()) {
1703 decoder_thread_task_runner_
->PostTask(
1704 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::SetErrorState
,
1705 base::Unretained(this), error
));
1709 // Post NotifyError only if we are already initialized, as the API does
1710 // not allow doing so before that.
1711 if (state_
!= kError
&& state_
!= kUninitialized
)
1717 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::V4L2H264Accelerator(
1718 V4L2SliceVideoDecodeAccelerator
* v4l2_dec
)
1719 : num_slices_(0), v4l2_dec_(v4l2_dec
) {
1723 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::~V4L2H264Accelerator() {
1726 scoped_refptr
<H264Picture
>
1727 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::CreateH264Picture() {
1728 scoped_refptr
<V4L2DecodeSurface
> dec_surface
= v4l2_dec_
->CreateSurface();
1732 return new V4L2H264Picture(dec_surface
);
1735 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::
1736 H264PictureListToDPBIndicesList(const H264Picture::Vector
& src_pic_list
,
1737 uint8_t dst_list
[kDPBIndicesListSize
]) {
1739 for (i
= 0; i
< src_pic_list
.size() && i
< kDPBIndicesListSize
; ++i
) {
1740 const scoped_refptr
<H264Picture
>& pic
= src_pic_list
[i
];
1741 dst_list
[i
] = pic
? pic
->dpb_position
: VIDEO_MAX_FRAME
;
1744 while (i
< kDPBIndicesListSize
)
1745 dst_list
[i
++] = VIDEO_MAX_FRAME
;
1748 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::H264DPBToV4L2DPB(
1750 std::vector
<scoped_refptr
<V4L2DecodeSurface
>>* ref_surfaces
) {
1751 memset(v4l2_decode_param_
.dpb
, 0, sizeof(v4l2_decode_param_
.dpb
));
1753 for (const auto& pic
: dpb
) {
1754 if (i
>= arraysize(v4l2_decode_param_
.dpb
)) {
1755 DVLOG(1) << "Invalid DPB size";
1758 struct v4l2_h264_dpb_entry
& entry
= v4l2_decode_param_
.dpb
[i
++];
1759 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
1760 H264PictureToV4L2DecodeSurface(pic
);
1761 entry
.buf_index
= dec_surface
->output_record();
1762 entry
.frame_num
= pic
->frame_num
;
1763 entry
.pic_num
= pic
->pic_num
;
1764 entry
.top_field_order_cnt
= pic
->top_field_order_cnt
;
1765 entry
.bottom_field_order_cnt
= pic
->bottom_field_order_cnt
;
1766 entry
.flags
= (pic
->ref
? V4L2_H264_DPB_ENTRY_FLAG_ACTIVE
: 0) |
1767 (pic
->long_term
? V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM
: 0);
1769 ref_surfaces
->push_back(dec_surface
);
1773 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitFrameMetadata(
1774 const media::H264SPS
* sps
,
1775 const media::H264PPS
* pps
,
1777 const H264Picture::Vector
& ref_pic_listp0
,
1778 const H264Picture::Vector
& ref_pic_listb0
,
1779 const H264Picture::Vector
& ref_pic_listb1
,
1780 const scoped_refptr
<H264Picture
>& pic
) {
1781 struct v4l2_ext_control ctrl
;
1782 std::vector
<struct v4l2_ext_control
> ctrls
;
1784 struct v4l2_ctrl_h264_sps v4l2_sps
;
1785 memset(&v4l2_sps
, 0, sizeof(v4l2_sps
));
1786 v4l2_sps
.constraint_set_flags
=
1787 sps
->constraint_set0_flag
? V4L2_H264_SPS_CONSTRAINT_SET0_FLAG
: 0 |
1788 sps
->constraint_set1_flag
? V4L2_H264_SPS_CONSTRAINT_SET1_FLAG
: 0 |
1789 sps
->constraint_set2_flag
? V4L2_H264_SPS_CONSTRAINT_SET2_FLAG
: 0 |
1790 sps
->constraint_set3_flag
? V4L2_H264_SPS_CONSTRAINT_SET3_FLAG
: 0 |
1791 sps
->constraint_set4_flag
? V4L2_H264_SPS_CONSTRAINT_SET4_FLAG
: 0 |
1792 sps
->constraint_set5_flag
? V4L2_H264_SPS_CONSTRAINT_SET5_FLAG
: 0;
1793 #define SPS_TO_V4L2SPS(a) v4l2_sps.a = sps->a
1794 SPS_TO_V4L2SPS(profile_idc
);
1795 SPS_TO_V4L2SPS(level_idc
);
1796 SPS_TO_V4L2SPS(seq_parameter_set_id
);
1797 SPS_TO_V4L2SPS(chroma_format_idc
);
1798 SPS_TO_V4L2SPS(bit_depth_luma_minus8
);
1799 SPS_TO_V4L2SPS(bit_depth_chroma_minus8
);
1800 SPS_TO_V4L2SPS(log2_max_frame_num_minus4
);
1801 SPS_TO_V4L2SPS(pic_order_cnt_type
);
1802 SPS_TO_V4L2SPS(log2_max_pic_order_cnt_lsb_minus4
);
1803 SPS_TO_V4L2SPS(offset_for_non_ref_pic
);
1804 SPS_TO_V4L2SPS(offset_for_top_to_bottom_field
);
1805 SPS_TO_V4L2SPS(num_ref_frames_in_pic_order_cnt_cycle
);
1807 static_assert(arraysize(v4l2_sps
.offset_for_ref_frame
) ==
1808 arraysize(sps
->offset_for_ref_frame
),
1809 "offset_for_ref_frame arrays must be same size");
1810 for (size_t i
= 0; i
< arraysize(v4l2_sps
.offset_for_ref_frame
); ++i
)
1811 v4l2_sps
.offset_for_ref_frame
[i
] = sps
->offset_for_ref_frame
[i
];
1812 SPS_TO_V4L2SPS(max_num_ref_frames
);
1813 SPS_TO_V4L2SPS(pic_width_in_mbs_minus1
);
1814 SPS_TO_V4L2SPS(pic_height_in_map_units_minus1
);
1815 #undef SPS_TO_V4L2SPS
1817 #define SET_V4L2_SPS_FLAG_IF(cond, flag) \
1818 v4l2_sps.flags |= ((sps->cond) ? (flag) : 0)
1819 SET_V4L2_SPS_FLAG_IF(separate_colour_plane_flag
,
1820 V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE
);
1821 SET_V4L2_SPS_FLAG_IF(qpprime_y_zero_transform_bypass_flag
,
1822 V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS
);
1823 SET_V4L2_SPS_FLAG_IF(delta_pic_order_always_zero_flag
,
1824 V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO
);
1825 SET_V4L2_SPS_FLAG_IF(gaps_in_frame_num_value_allowed_flag
,
1826 V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED
);
1827 SET_V4L2_SPS_FLAG_IF(frame_mbs_only_flag
, V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY
);
1828 SET_V4L2_SPS_FLAG_IF(mb_adaptive_frame_field_flag
,
1829 V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD
);
1830 SET_V4L2_SPS_FLAG_IF(direct_8x8_inference_flag
,
1831 V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE
);
1833 memset(&ctrl
, 0, sizeof(ctrl
));
1834 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_SPS
;
1835 ctrl
.size
= sizeof(v4l2_sps
);
1836 ctrl
.p_h264_sps
= &v4l2_sps
;
1837 ctrls
.push_back(ctrl
);
1839 struct v4l2_ctrl_h264_pps v4l2_pps
;
1840 memset(&v4l2_pps
, 0, sizeof(v4l2_pps
));
1841 #define PPS_TO_V4L2PPS(a) v4l2_pps.a = pps->a
1842 PPS_TO_V4L2PPS(pic_parameter_set_id
);
1843 PPS_TO_V4L2PPS(seq_parameter_set_id
);
1844 PPS_TO_V4L2PPS(num_slice_groups_minus1
);
1845 PPS_TO_V4L2PPS(num_ref_idx_l0_default_active_minus1
);
1846 PPS_TO_V4L2PPS(num_ref_idx_l1_default_active_minus1
);
1847 PPS_TO_V4L2PPS(weighted_bipred_idc
);
1848 PPS_TO_V4L2PPS(pic_init_qp_minus26
);
1849 PPS_TO_V4L2PPS(pic_init_qs_minus26
);
1850 PPS_TO_V4L2PPS(chroma_qp_index_offset
);
1851 PPS_TO_V4L2PPS(second_chroma_qp_index_offset
);
1852 #undef PPS_TO_V4L2PPS
1854 #define SET_V4L2_PPS_FLAG_IF(cond, flag) \
1855 v4l2_pps.flags |= ((pps->cond) ? (flag) : 0)
1856 SET_V4L2_PPS_FLAG_IF(entropy_coding_mode_flag
,
1857 V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE
);
1858 SET_V4L2_PPS_FLAG_IF(
1859 bottom_field_pic_order_in_frame_present_flag
,
1860 V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT
);
1861 SET_V4L2_PPS_FLAG_IF(weighted_pred_flag
, V4L2_H264_PPS_FLAG_WEIGHTED_PRED
);
1862 SET_V4L2_PPS_FLAG_IF(deblocking_filter_control_present_flag
,
1863 V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT
);
1864 SET_V4L2_PPS_FLAG_IF(constrained_intra_pred_flag
,
1865 V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED
);
1866 SET_V4L2_PPS_FLAG_IF(redundant_pic_cnt_present_flag
,
1867 V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT
);
1868 SET_V4L2_PPS_FLAG_IF(transform_8x8_mode_flag
,
1869 V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE
);
1870 SET_V4L2_PPS_FLAG_IF(pic_scaling_matrix_present_flag
,
1871 V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT
);
1872 #undef SET_V4L2_PPS_FLAG_IF
1873 memset(&ctrl
, 0, sizeof(ctrl
));
1874 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_PPS
;
1875 ctrl
.size
= sizeof(v4l2_pps
);
1876 ctrl
.p_h264_pps
= &v4l2_pps
;
1877 ctrls
.push_back(ctrl
);
1879 struct v4l2_ctrl_h264_scaling_matrix v4l2_scaling_matrix
;
1880 memset(&v4l2_scaling_matrix
, 0, sizeof(v4l2_scaling_matrix
));
1881 static_assert(arraysize(v4l2_scaling_matrix
.scaling_list_4x4
) <=
1882 arraysize(pps
->scaling_list4x4
) &&
1883 arraysize(v4l2_scaling_matrix
.scaling_list_4x4
[0]) <=
1884 arraysize(pps
->scaling_list4x4
[0]) &&
1885 arraysize(v4l2_scaling_matrix
.scaling_list_8x8
) <=
1886 arraysize(pps
->scaling_list8x8
) &&
1887 arraysize(v4l2_scaling_matrix
.scaling_list_8x8
[0]) <=
1888 arraysize(pps
->scaling_list8x8
[0]),
1889 "scaling_lists must be of correct size");
1890 for (size_t i
= 0; i
< arraysize(v4l2_scaling_matrix
.scaling_list_4x4
); ++i
) {
1891 for (size_t j
= 0; j
< arraysize(v4l2_scaling_matrix
.scaling_list_4x4
[i
]);
1893 v4l2_scaling_matrix
.scaling_list_4x4
[i
][j
] = pps
->scaling_list4x4
[i
][j
];
1896 for (size_t i
= 0; i
< arraysize(v4l2_scaling_matrix
.scaling_list_8x8
); ++i
) {
1897 for (size_t j
= 0; j
< arraysize(v4l2_scaling_matrix
.scaling_list_8x8
[i
]);
1899 v4l2_scaling_matrix
.scaling_list_8x8
[i
][j
] = pps
->scaling_list8x8
[i
][j
];
1902 memset(&ctrl
, 0, sizeof(ctrl
));
1903 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX
;
1904 ctrl
.size
= sizeof(v4l2_scaling_matrix
);
1905 ctrl
.p_h264_scal_mtrx
= &v4l2_scaling_matrix
;
1906 ctrls
.push_back(ctrl
);
1908 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
1909 H264PictureToV4L2DecodeSurface(pic
);
1911 struct v4l2_ext_controls ext_ctrls
;
1912 memset(&ext_ctrls
, 0, sizeof(ext_ctrls
));
1913 ext_ctrls
.count
= ctrls
.size();
1914 ext_ctrls
.controls
= &ctrls
[0];
1915 ext_ctrls
.config_store
= dec_surface
->config_store();
1916 v4l2_dec_
->SubmitExtControls(&ext_ctrls
);
1918 H264PictureListToDPBIndicesList(ref_pic_listp0
,
1919 v4l2_decode_param_
.ref_pic_list_p0
);
1920 H264PictureListToDPBIndicesList(ref_pic_listb0
,
1921 v4l2_decode_param_
.ref_pic_list_b0
);
1922 H264PictureListToDPBIndicesList(ref_pic_listb1
,
1923 v4l2_decode_param_
.ref_pic_list_b1
);
1925 std::vector
<scoped_refptr
<V4L2DecodeSurface
>> ref_surfaces
;
1926 H264DPBToV4L2DPB(dpb
, &ref_surfaces
);
1927 dec_surface
->SetReferenceSurfaces(ref_surfaces
);
1932 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitSlice(
1933 const media::H264PPS
* pps
,
1934 const media::H264SliceHeader
* slice_hdr
,
1935 const H264Picture::Vector
& ref_pic_list0
,
1936 const H264Picture::Vector
& ref_pic_list1
,
1937 const scoped_refptr
<H264Picture
>& pic
,
1938 const uint8_t* data
,
1940 if (num_slices_
== kMaxSlices
) {
1941 LOGF(ERROR
) << "Over limit of supported slices per frame";
1945 struct v4l2_ctrl_h264_slice_param
& v4l2_slice_param
=
1946 v4l2_slice_params_
[num_slices_
++];
1947 memset(&v4l2_slice_param
, 0, sizeof(v4l2_slice_param
));
1949 v4l2_slice_param
.size
= size
;
1950 #define SHDR_TO_V4L2SPARM(a) v4l2_slice_param.a = slice_hdr->a
1951 SHDR_TO_V4L2SPARM(header_bit_size
);
1952 SHDR_TO_V4L2SPARM(first_mb_in_slice
);
1953 SHDR_TO_V4L2SPARM(slice_type
);
1954 SHDR_TO_V4L2SPARM(pic_parameter_set_id
);
1955 SHDR_TO_V4L2SPARM(colour_plane_id
);
1956 SHDR_TO_V4L2SPARM(frame_num
);
1957 SHDR_TO_V4L2SPARM(idr_pic_id
);
1958 SHDR_TO_V4L2SPARM(pic_order_cnt_lsb
);
1959 SHDR_TO_V4L2SPARM(delta_pic_order_cnt_bottom
);
1960 SHDR_TO_V4L2SPARM(delta_pic_order_cnt0
);
1961 SHDR_TO_V4L2SPARM(delta_pic_order_cnt1
);
1962 SHDR_TO_V4L2SPARM(redundant_pic_cnt
);
1963 SHDR_TO_V4L2SPARM(dec_ref_pic_marking_bit_size
);
1964 SHDR_TO_V4L2SPARM(cabac_init_idc
);
1965 SHDR_TO_V4L2SPARM(slice_qp_delta
);
1966 SHDR_TO_V4L2SPARM(slice_qs_delta
);
1967 SHDR_TO_V4L2SPARM(disable_deblocking_filter_idc
);
1968 SHDR_TO_V4L2SPARM(slice_alpha_c0_offset_div2
);
1969 SHDR_TO_V4L2SPARM(slice_beta_offset_div2
);
1970 SHDR_TO_V4L2SPARM(num_ref_idx_l0_active_minus1
);
1971 SHDR_TO_V4L2SPARM(num_ref_idx_l1_active_minus1
);
1972 SHDR_TO_V4L2SPARM(pic_order_cnt_bit_size
);
1973 #undef SHDR_TO_V4L2SPARM
1975 #define SET_V4L2_SPARM_FLAG_IF(cond, flag) \
1976 v4l2_slice_param.flags |= ((slice_hdr->cond) ? (flag) : 0)
1977 SET_V4L2_SPARM_FLAG_IF(field_pic_flag
, V4L2_SLICE_FLAG_FIELD_PIC
);
1978 SET_V4L2_SPARM_FLAG_IF(bottom_field_flag
, V4L2_SLICE_FLAG_BOTTOM_FIELD
);
1979 SET_V4L2_SPARM_FLAG_IF(direct_spatial_mv_pred_flag
,
1980 V4L2_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED
);
1981 SET_V4L2_SPARM_FLAG_IF(sp_for_switch_flag
, V4L2_SLICE_FLAG_SP_FOR_SWITCH
);
1982 #undef SET_V4L2_SPARM_FLAG_IF
1984 struct v4l2_h264_pred_weight_table
* pred_weight_table
=
1985 &v4l2_slice_param
.pred_weight_table
;
1987 if (((slice_hdr
->IsPSlice() || slice_hdr
->IsSPSlice()) &&
1988 pps
->weighted_pred_flag
) ||
1989 (slice_hdr
->IsBSlice() && pps
->weighted_bipred_idc
== 1)) {
1990 pred_weight_table
->luma_log2_weight_denom
=
1991 slice_hdr
->luma_log2_weight_denom
;
1992 pred_weight_table
->chroma_log2_weight_denom
=
1993 slice_hdr
->chroma_log2_weight_denom
;
1995 struct v4l2_h264_weight_factors
* factorsl0
=
1996 &pred_weight_table
->weight_factors
[0];
1998 for (int i
= 0; i
< 32; ++i
) {
1999 factorsl0
->luma_weight
[i
] =
2000 slice_hdr
->pred_weight_table_l0
.luma_weight
[i
];
2001 factorsl0
->luma_offset
[i
] =
2002 slice_hdr
->pred_weight_table_l0
.luma_offset
[i
];
2004 for (int j
= 0; j
< 2; ++j
) {
2005 factorsl0
->chroma_weight
[i
][j
] =
2006 slice_hdr
->pred_weight_table_l0
.chroma_weight
[i
][j
];
2007 factorsl0
->chroma_offset
[i
][j
] =
2008 slice_hdr
->pred_weight_table_l0
.chroma_offset
[i
][j
];
2012 if (slice_hdr
->IsBSlice()) {
2013 struct v4l2_h264_weight_factors
* factorsl1
=
2014 &pred_weight_table
->weight_factors
[1];
2016 for (int i
= 0; i
< 32; ++i
) {
2017 factorsl1
->luma_weight
[i
] =
2018 slice_hdr
->pred_weight_table_l1
.luma_weight
[i
];
2019 factorsl1
->luma_offset
[i
] =
2020 slice_hdr
->pred_weight_table_l1
.luma_offset
[i
];
2022 for (int j
= 0; j
< 2; ++j
) {
2023 factorsl1
->chroma_weight
[i
][j
] =
2024 slice_hdr
->pred_weight_table_l1
.chroma_weight
[i
][j
];
2025 factorsl1
->chroma_offset
[i
][j
] =
2026 slice_hdr
->pred_weight_table_l1
.chroma_offset
[i
][j
];
2032 H264PictureListToDPBIndicesList(ref_pic_list0
,
2033 v4l2_slice_param
.ref_pic_list0
);
2034 H264PictureListToDPBIndicesList(ref_pic_list1
,
2035 v4l2_slice_param
.ref_pic_list1
);
2037 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2038 H264PictureToV4L2DecodeSurface(pic
);
2040 v4l2_decode_param_
.nal_ref_idc
= slice_hdr
->nal_ref_idc
;
2042 // TODO(posciak): Don't add start code back here, but have it passed from
2044 size_t data_copy_size
= size
+ 3;
2045 scoped_ptr
<uint8_t[]> data_copy(new uint8_t[data_copy_size
]);
2046 memset(data_copy
.get(), 0, data_copy_size
);
2047 data_copy
[2] = 0x01;
2048 memcpy(data_copy
.get() + 3, data
, size
);
2049 return v4l2_dec_
->SubmitSlice(dec_surface
->input_record(), data_copy
.get(),
2053 bool V4L2SliceVideoDecodeAccelerator::SubmitSlice(int index
,
2054 const uint8_t* data
,
2056 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2058 InputRecord
& input_record
= input_buffer_map_
[index
];
2060 if (input_record
.bytes_used
+ size
> input_record
.length
) {
2061 DVLOGF(1) << "Input buffer too small";
2065 memcpy(static_cast<uint8_t*>(input_record
.address
) + input_record
.bytes_used
,
2067 input_record
.bytes_used
+= size
;
2072 bool V4L2SliceVideoDecodeAccelerator::SubmitExtControls(
2073 struct v4l2_ext_controls
* ext_ctrls
) {
2074 DCHECK_GT(ext_ctrls
->config_store
, 0u);
2075 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_EXT_CTRLS
, ext_ctrls
);
2079 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitDecode(
2080 const scoped_refptr
<H264Picture
>& pic
) {
2081 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2082 H264PictureToV4L2DecodeSurface(pic
);
2084 v4l2_decode_param_
.num_slices
= num_slices_
;
2085 v4l2_decode_param_
.idr_pic_flag
= pic
->idr
;
2086 v4l2_decode_param_
.top_field_order_cnt
= pic
->top_field_order_cnt
;
2087 v4l2_decode_param_
.bottom_field_order_cnt
= pic
->bottom_field_order_cnt
;
2089 struct v4l2_ext_control ctrl
;
2090 std::vector
<struct v4l2_ext_control
> ctrls
;
2092 memset(&ctrl
, 0, sizeof(ctrl
));
2093 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM
;
2094 ctrl
.size
= sizeof(v4l2_slice_params_
);
2095 ctrl
.p_h264_slice_param
= v4l2_slice_params_
;
2096 ctrls
.push_back(ctrl
);
2098 memset(&ctrl
, 0, sizeof(ctrl
));
2099 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAM
;
2100 ctrl
.size
= sizeof(v4l2_decode_param_
);
2101 ctrl
.p_h264_decode_param
= &v4l2_decode_param_
;
2102 ctrls
.push_back(ctrl
);
2104 struct v4l2_ext_controls ext_ctrls
;
2105 memset(&ext_ctrls
, 0, sizeof(ext_ctrls
));
2106 ext_ctrls
.count
= ctrls
.size();
2107 ext_ctrls
.controls
= &ctrls
[0];
2108 ext_ctrls
.config_store
= dec_surface
->config_store();
2109 v4l2_dec_
->SubmitExtControls(&ext_ctrls
);
2113 v4l2_dec_
->DecodeSurface(dec_surface
);
2117 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::OutputPicture(
2118 const scoped_refptr
<H264Picture
>& pic
) {
2119 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2120 H264PictureToV4L2DecodeSurface(pic
);
2121 v4l2_dec_
->SurfaceReady(dec_surface
);
2125 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::Reset() {
2127 memset(&v4l2_decode_param_
, 0, sizeof(v4l2_decode_param_
));
2128 memset(&v4l2_slice_params_
, 0, sizeof(v4l2_slice_params_
));
2131 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
2132 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::
2133 H264PictureToV4L2DecodeSurface(const scoped_refptr
<H264Picture
>& pic
) {
2134 V4L2H264Picture
* v4l2_pic
= pic
->AsV4L2H264Picture();
2136 return v4l2_pic
->dec_surface();
2139 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::V4L2VP8Accelerator(
2140 V4L2SliceVideoDecodeAccelerator
* v4l2_dec
)
2141 : v4l2_dec_(v4l2_dec
) {
2145 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::~V4L2VP8Accelerator() {
2148 scoped_refptr
<VP8Picture
>
2149 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::CreateVP8Picture() {
2150 scoped_refptr
<V4L2DecodeSurface
> dec_surface
= v4l2_dec_
->CreateSurface();
2154 return new V4L2VP8Picture(dec_surface
);
2157 #define ARRAY_MEMCPY_CHECKED(to, from) \
2159 static_assert(sizeof(to) == sizeof(from), \
2160 #from " and " #to " arrays must be of same size"); \
2161 memcpy(to, from, sizeof(to)); \
2164 static void FillV4L2SegmentationHeader(
2165 const media::Vp8SegmentationHeader
& vp8_sgmnt_hdr
,
2166 struct v4l2_vp8_sgmnt_hdr
* v4l2_sgmnt_hdr
) {
2167 #define SET_V4L2_SGMNT_HDR_FLAG_IF(cond, flag) \
2168 v4l2_sgmnt_hdr->flags |= ((vp8_sgmnt_hdr.cond) ? (flag) : 0)
2169 SET_V4L2_SGMNT_HDR_FLAG_IF(segmentation_enabled
,
2170 V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED
);
2171 SET_V4L2_SGMNT_HDR_FLAG_IF(update_mb_segmentation_map
,
2172 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP
);
2173 SET_V4L2_SGMNT_HDR_FLAG_IF(update_segment_feature_data
,
2174 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA
);
2175 #undef SET_V4L2_SPARM_FLAG_IF
2176 v4l2_sgmnt_hdr
->segment_feature_mode
= vp8_sgmnt_hdr
.segment_feature_mode
;
2178 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr
->quant_update
,
2179 vp8_sgmnt_hdr
.quantizer_update_value
);
2180 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr
->lf_update
,
2181 vp8_sgmnt_hdr
.lf_update_value
);
2182 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr
->segment_probs
,
2183 vp8_sgmnt_hdr
.segment_prob
);
2186 static void FillV4L2LoopfilterHeader(
2187 const media::Vp8LoopFilterHeader
& vp8_loopfilter_hdr
,
2188 struct v4l2_vp8_loopfilter_hdr
* v4l2_lf_hdr
) {
2189 #define SET_V4L2_LF_HDR_FLAG_IF(cond, flag) \
2190 v4l2_lf_hdr->flags |= ((vp8_loopfilter_hdr.cond) ? (flag) : 0)
2191 SET_V4L2_LF_HDR_FLAG_IF(loop_filter_adj_enable
, V4L2_VP8_LF_HDR_ADJ_ENABLE
);
2192 SET_V4L2_LF_HDR_FLAG_IF(mode_ref_lf_delta_update
,
2193 V4L2_VP8_LF_HDR_DELTA_UPDATE
);
2194 #undef SET_V4L2_SGMNT_HDR_FLAG_IF
2196 #define LF_HDR_TO_V4L2_LF_HDR(a) v4l2_lf_hdr->a = vp8_loopfilter_hdr.a;
2197 LF_HDR_TO_V4L2_LF_HDR(type
);
2198 LF_HDR_TO_V4L2_LF_HDR(level
);
2199 LF_HDR_TO_V4L2_LF_HDR(sharpness_level
);
2200 #undef LF_HDR_TO_V4L2_LF_HDR
2202 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr
->ref_frm_delta_magnitude
,
2203 vp8_loopfilter_hdr
.ref_frame_delta
);
2204 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr
->mb_mode_delta_magnitude
,
2205 vp8_loopfilter_hdr
.mb_mode_delta
);
2208 static void FillV4L2QuantizationHeader(
2209 const media::Vp8QuantizationHeader
& vp8_quant_hdr
,
2210 struct v4l2_vp8_quantization_hdr
* v4l2_quant_hdr
) {
2211 v4l2_quant_hdr
->y_ac_qi
= vp8_quant_hdr
.y_ac_qi
;
2212 v4l2_quant_hdr
->y_dc_delta
= vp8_quant_hdr
.y_dc_delta
;
2213 v4l2_quant_hdr
->y2_dc_delta
= vp8_quant_hdr
.y2_dc_delta
;
2214 v4l2_quant_hdr
->y2_ac_delta
= vp8_quant_hdr
.y2_ac_delta
;
2215 v4l2_quant_hdr
->uv_dc_delta
= vp8_quant_hdr
.uv_dc_delta
;
2216 v4l2_quant_hdr
->uv_ac_delta
= vp8_quant_hdr
.uv_ac_delta
;
2219 static void FillV4L2EntropyHeader(
2220 const media::Vp8EntropyHeader
& vp8_entropy_hdr
,
2221 struct v4l2_vp8_entropy_hdr
* v4l2_entropy_hdr
) {
2222 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr
->coeff_probs
,
2223 vp8_entropy_hdr
.coeff_probs
);
2224 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr
->y_mode_probs
,
2225 vp8_entropy_hdr
.y_mode_probs
);
2226 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr
->uv_mode_probs
,
2227 vp8_entropy_hdr
.uv_mode_probs
);
2228 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr
->mv_probs
,
2229 vp8_entropy_hdr
.mv_probs
);
2232 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::SubmitDecode(
2233 const scoped_refptr
<VP8Picture
>& pic
,
2234 const media::Vp8FrameHeader
* frame_hdr
,
2235 const scoped_refptr
<VP8Picture
>& last_frame
,
2236 const scoped_refptr
<VP8Picture
>& golden_frame
,
2237 const scoped_refptr
<VP8Picture
>& alt_frame
) {
2238 struct v4l2_ctrl_vp8_frame_hdr v4l2_frame_hdr
;
2239 memset(&v4l2_frame_hdr
, 0, sizeof(v4l2_frame_hdr
));
2241 #define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a
2242 FHDR_TO_V4L2_FHDR(key_frame
);
2243 FHDR_TO_V4L2_FHDR(version
);
2244 FHDR_TO_V4L2_FHDR(width
);
2245 FHDR_TO_V4L2_FHDR(horizontal_scale
);
2246 FHDR_TO_V4L2_FHDR(height
);
2247 FHDR_TO_V4L2_FHDR(vertical_scale
);
2248 FHDR_TO_V4L2_FHDR(sign_bias_golden
);
2249 FHDR_TO_V4L2_FHDR(sign_bias_alternate
);
2250 FHDR_TO_V4L2_FHDR(prob_skip_false
);
2251 FHDR_TO_V4L2_FHDR(prob_intra
);
2252 FHDR_TO_V4L2_FHDR(prob_last
);
2253 FHDR_TO_V4L2_FHDR(prob_gf
);
2254 FHDR_TO_V4L2_FHDR(bool_dec_range
);
2255 FHDR_TO_V4L2_FHDR(bool_dec_value
);
2256 FHDR_TO_V4L2_FHDR(bool_dec_count
);
2257 #undef FHDR_TO_V4L2_FHDR
2259 #define SET_V4L2_FRM_HDR_FLAG_IF(cond, flag) \
2260 v4l2_frame_hdr.flags |= ((frame_hdr->cond) ? (flag) : 0)
2261 SET_V4L2_FRM_HDR_FLAG_IF(is_experimental
,
2262 V4L2_VP8_FRAME_HDR_FLAG_EXPERIMENTAL
);
2263 SET_V4L2_FRM_HDR_FLAG_IF(show_frame
, V4L2_VP8_FRAME_HDR_FLAG_SHOW_FRAME
);
2264 SET_V4L2_FRM_HDR_FLAG_IF(mb_no_skip_coeff
,
2265 V4L2_VP8_FRAME_HDR_FLAG_MB_NO_SKIP_COEFF
);
2266 #undef SET_V4L2_FRM_HDR_FLAG_IF
2268 FillV4L2SegmentationHeader(frame_hdr
->segmentation_hdr
,
2269 &v4l2_frame_hdr
.sgmnt_hdr
);
2271 FillV4L2LoopfilterHeader(frame_hdr
->loopfilter_hdr
, &v4l2_frame_hdr
.lf_hdr
);
2273 FillV4L2QuantizationHeader(frame_hdr
->quantization_hdr
,
2274 &v4l2_frame_hdr
.quant_hdr
);
2276 FillV4L2EntropyHeader(frame_hdr
->entropy_hdr
, &v4l2_frame_hdr
.entropy_hdr
);
2278 v4l2_frame_hdr
.first_part_size
=
2279 base::checked_cast
<__u32
>(frame_hdr
->first_part_size
);
2280 v4l2_frame_hdr
.first_part_offset
=
2281 base::checked_cast
<__u32
>(frame_hdr
->first_part_offset
);
2282 v4l2_frame_hdr
.macroblock_bit_offset
=
2283 base::checked_cast
<__u32
>(frame_hdr
->macroblock_bit_offset
);
2284 v4l2_frame_hdr
.num_dct_parts
= frame_hdr
->num_of_dct_partitions
;
2286 static_assert(arraysize(v4l2_frame_hdr
.dct_part_sizes
) ==
2287 arraysize(frame_hdr
->dct_partition_sizes
),
2288 "DCT partition size arrays must have equal number of elements");
2289 for (size_t i
= 0; i
< frame_hdr
->num_of_dct_partitions
&&
2290 i
< arraysize(v4l2_frame_hdr
.dct_part_sizes
); ++i
)
2291 v4l2_frame_hdr
.dct_part_sizes
[i
] = frame_hdr
->dct_partition_sizes
[i
];
2293 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2294 VP8PictureToV4L2DecodeSurface(pic
);
2295 std::vector
<scoped_refptr
<V4L2DecodeSurface
>> ref_surfaces
;
2298 scoped_refptr
<V4L2DecodeSurface
> last_frame_surface
=
2299 VP8PictureToV4L2DecodeSurface(last_frame
);
2300 v4l2_frame_hdr
.last_frame
= last_frame_surface
->output_record();
2301 ref_surfaces
.push_back(last_frame_surface
);
2303 v4l2_frame_hdr
.last_frame
= VIDEO_MAX_FRAME
;
2307 scoped_refptr
<V4L2DecodeSurface
> golden_frame_surface
=
2308 VP8PictureToV4L2DecodeSurface(golden_frame
);
2309 v4l2_frame_hdr
.golden_frame
= golden_frame_surface
->output_record();
2310 ref_surfaces
.push_back(golden_frame_surface
);
2312 v4l2_frame_hdr
.golden_frame
= VIDEO_MAX_FRAME
;
2316 scoped_refptr
<V4L2DecodeSurface
> alt_frame_surface
=
2317 VP8PictureToV4L2DecodeSurface(alt_frame
);
2318 v4l2_frame_hdr
.alt_frame
= alt_frame_surface
->output_record();
2319 ref_surfaces
.push_back(alt_frame_surface
);
2321 v4l2_frame_hdr
.alt_frame
= VIDEO_MAX_FRAME
;
2324 struct v4l2_ext_control ctrl
;
2325 memset(&ctrl
, 0, sizeof(ctrl
));
2326 ctrl
.id
= V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR
;
2327 ctrl
.size
= sizeof(v4l2_frame_hdr
);
2328 ctrl
.p_vp8_frame_hdr
= &v4l2_frame_hdr
;
2330 struct v4l2_ext_controls ext_ctrls
;
2331 memset(&ext_ctrls
, 0, sizeof(ext_ctrls
));
2332 ext_ctrls
.count
= 1;
2333 ext_ctrls
.controls
= &ctrl
;
2334 ext_ctrls
.config_store
= dec_surface
->config_store();
2336 if (!v4l2_dec_
->SubmitExtControls(&ext_ctrls
))
2339 dec_surface
->SetReferenceSurfaces(ref_surfaces
);
2341 if (!v4l2_dec_
->SubmitSlice(dec_surface
->input_record(), frame_hdr
->data
,
2342 frame_hdr
->frame_size
))
2345 v4l2_dec_
->DecodeSurface(dec_surface
);
2349 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::OutputPicture(
2350 const scoped_refptr
<VP8Picture
>& pic
) {
2351 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2352 VP8PictureToV4L2DecodeSurface(pic
);
2354 v4l2_dec_
->SurfaceReady(dec_surface
);
2358 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
2359 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::
2360 VP8PictureToV4L2DecodeSurface(const scoped_refptr
<VP8Picture
>& pic
) {
2361 V4L2VP8Picture
* v4l2_pic
= pic
->AsV4L2VP8Picture();
2363 return v4l2_pic
->dec_surface();
2366 void V4L2SliceVideoDecodeAccelerator::DecodeSurface(
2367 const scoped_refptr
<V4L2DecodeSurface
>& dec_surface
) {
2368 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2370 DVLOGF(3) << "Submitting decode for surface: " << dec_surface
->ToString();
2371 Enqueue(dec_surface
);
2374 void V4L2SliceVideoDecodeAccelerator::SurfaceReady(
2375 const scoped_refptr
<V4L2DecodeSurface
>& dec_surface
) {
2377 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2379 decoder_display_queue_
.push(dec_surface
);
2380 TryOutputSurfaces();
2383 void V4L2SliceVideoDecodeAccelerator::TryOutputSurfaces() {
2384 while (!decoder_display_queue_
.empty()) {
2385 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2386 decoder_display_queue_
.front();
2388 if (!dec_surface
->decoded())
2391 decoder_display_queue_
.pop();
2392 OutputSurface(dec_surface
);
2396 void V4L2SliceVideoDecodeAccelerator::OutputSurface(
2397 const scoped_refptr
<V4L2DecodeSurface
>& dec_surface
) {
2398 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2400 OutputRecord
& output_record
=
2401 output_buffer_map_
[dec_surface
->output_record()];
2404 surfaces_at_display_
.insert(std::make_pair(output_record
.picture_id
,
2405 dec_surface
)).second
;
2408 DCHECK(!output_record
.at_client
);
2409 DCHECK(!output_record
.at_device
);
2410 DCHECK_NE(output_record
.egl_image
, EGL_NO_IMAGE_KHR
);
2411 DCHECK_NE(output_record
.picture_id
, -1);
2412 output_record
.at_client
= true;
2414 media::Picture
picture(output_record
.picture_id
, dec_surface
->bitstream_id(),
2415 gfx::Rect(visible_size_
), false);
2416 DVLOGF(3) << dec_surface
->ToString()
2417 << ", bitstream_id: " << picture
.bitstream_buffer_id()
2418 << ", picture_id: " << picture
.picture_buffer_id();
2419 pending_picture_ready_
.push(PictureRecord(output_record
.cleared
, picture
));
2421 output_record
.cleared
= true;
2424 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
2425 V4L2SliceVideoDecodeAccelerator::CreateSurface() {
2426 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2427 DCHECK_EQ(state_
, kDecoding
);
2429 if (free_input_buffers_
.empty() || free_output_buffers_
.empty())
2432 int input
= free_input_buffers_
.front();
2433 free_input_buffers_
.pop_front();
2434 int output
= free_output_buffers_
.front();
2435 free_output_buffers_
.pop_front();
2437 InputRecord
& input_record
= input_buffer_map_
[input
];
2438 DCHECK_EQ(input_record
.bytes_used
, 0u);
2439 DCHECK_EQ(input_record
.input_id
, -1);
2440 DCHECK(decoder_current_bitstream_buffer_
!= nullptr);
2441 input_record
.input_id
= decoder_current_bitstream_buffer_
->input_id
;
2443 scoped_refptr
<V4L2DecodeSurface
> dec_surface
= new V4L2DecodeSurface(
2444 decoder_current_bitstream_buffer_
->input_id
, input
, output
,
2445 base::Bind(&V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer
,
2446 base::Unretained(this)));
2448 DVLOGF(4) << "Created surface " << input
<< " -> " << output
;
2452 void V4L2SliceVideoDecodeAccelerator::SendPictureReady() {
2454 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2455 bool resetting_or_flushing
= (decoder_resetting_
|| decoder_flushing_
);
2456 while (!pending_picture_ready_
.empty()) {
2457 bool cleared
= pending_picture_ready_
.front().cleared
;
2458 const media::Picture
& picture
= pending_picture_ready_
.front().picture
;
2459 if (cleared
&& picture_clearing_count_
== 0) {
2460 DVLOGF(4) << "Posting picture ready to IO for: "
2461 << picture
.picture_buffer_id();
2462 // This picture is cleared. Post it to IO thread to reduce latency. This
2463 // should be the case after all pictures are cleared at the beginning.
2464 io_task_runner_
->PostTask(
2465 FROM_HERE
, base::Bind(&Client::PictureReady
, io_client_
, picture
));
2466 pending_picture_ready_
.pop();
2467 } else if (!cleared
|| resetting_or_flushing
) {
2468 DVLOGF(3) << "cleared=" << pending_picture_ready_
.front().cleared
2469 << ", decoder_resetting_=" << decoder_resetting_
2470 << ", decoder_flushing_=" << decoder_flushing_
2471 << ", picture_clearing_count_=" << picture_clearing_count_
;
2472 DVLOGF(4) << "Posting picture ready to GPU for: "
2473 << picture
.picture_buffer_id();
2474 // If the picture is not cleared, post it to the child thread because it
2475 // has to be cleared in the child thread. A picture only needs to be
2476 // cleared once. If the decoder is resetting or flushing, send all
2477 // pictures to ensure PictureReady arrive before reset or flush done.
2478 child_task_runner_
->PostTaskAndReply(
2479 FROM_HERE
, base::Bind(&Client::PictureReady
, client_
, picture
),
2480 // Unretained is safe. If Client::PictureReady gets to run, |this| is
2481 // alive. Destroy() will wait the decode thread to finish.
2482 base::Bind(&V4L2SliceVideoDecodeAccelerator::PictureCleared
,
2483 base::Unretained(this)));
2484 picture_clearing_count_
++;
2485 pending_picture_ready_
.pop();
2487 // This picture is cleared. But some pictures are about to be cleared on
2488 // the child thread. To preserve the order, do not send this until those
2489 // pictures are cleared.
2495 void V4L2SliceVideoDecodeAccelerator::PictureCleared() {
2496 DVLOGF(3) << "clearing count=" << picture_clearing_count_
;
2497 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2498 DCHECK_GT(picture_clearing_count_
, 0);
2499 picture_clearing_count_
--;
2503 bool V4L2SliceVideoDecodeAccelerator::CanDecodeOnIOThread() {
2508 media::VideoDecodeAccelerator::SupportedProfiles
2509 V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() {
2510 scoped_refptr
<V4L2Device
> device
= V4L2Device::Create(V4L2Device::kDecoder
);
2512 return SupportedProfiles();
2514 const uint32_t supported_formats
[] = {
2515 V4L2_PIX_FMT_H264_SLICE
, V4L2_PIX_FMT_VP8_FRAME
};
2516 return device
->GetSupportedDecodeProfiles(arraysize(supported_formats
),
2520 } // namespace content