1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #include <linux/videodev2.h>
8 #include <sys/eventfd.h>
12 #include "base/bind.h"
13 #include "base/bind_helpers.h"
14 #include "base/callback.h"
15 #include "base/callback_helpers.h"
16 #include "base/command_line.h"
17 #include "base/numerics/safe_conversions.h"
18 #include "base/strings/stringprintf.h"
19 #include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
20 #include "media/base/bind_to_current_loop.h"
21 #include "media/base/media_switches.h"
22 #include "ui/gl/scoped_binders.h"
24 #define LOGF(level) LOG(level) << __FUNCTION__ << "(): "
25 #define DVLOGF(level) DVLOG(level) << __FUNCTION__ << "(): "
27 #define NOTIFY_ERROR(x) \
29 LOG(ERROR) << "Setting error state:" << x; \
33 #define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str) \
35 if (device_->Ioctl(type, arg) != 0) { \
36 PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << type_str; \
41 #define IOCTL_OR_ERROR_RETURN(type, arg) \
42 IOCTL_OR_ERROR_RETURN_VALUE(type, arg, ((void)0), #type)
44 #define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
45 IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type)
47 #define IOCTL_OR_LOG_ERROR(type, arg) \
49 if (device_->Ioctl(type, arg) != 0) \
50 PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << #type; \
55 class V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
56 : public base::RefCounted
<V4L2DecodeSurface
> {
58 using ReleaseCB
= base::Callback
<void(int)>;
60 V4L2DecodeSurface(int32 bitstream_id
,
63 const ReleaseCB
& release_cb
);
65 // Mark the surface as decoded. This will also release all references, as
66 // they are not needed anymore.
68 bool decoded() const { return decoded_
; }
70 int32
bitstream_id() const { return bitstream_id_
; }
71 int input_record() const { return input_record_
; }
72 int output_record() const { return output_record_
; }
73 uint32_t config_store() const { return config_store_
; }
75 // Take references to each reference surface and keep them until the
76 // target surface is decoded.
77 void SetReferenceSurfaces(
78 const std::vector
<scoped_refptr
<V4L2DecodeSurface
>>& ref_surfaces
);
80 std::string
ToString() const;
83 friend class base::RefCounted
<V4L2DecodeSurface
>;
89 uint32_t config_store_
;
92 ReleaseCB release_cb_
;
94 std::vector
<scoped_refptr
<V4L2DecodeSurface
>> reference_surfaces_
;
96 DISALLOW_COPY_AND_ASSIGN(V4L2DecodeSurface
);
99 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::V4L2DecodeSurface(
103 const ReleaseCB
& release_cb
)
104 : bitstream_id_(bitstream_id
),
105 input_record_(input_record
),
106 output_record_(output_record
),
107 config_store_(input_record
+ 1),
109 release_cb_(release_cb
) {
112 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::~V4L2DecodeSurface() {
113 DVLOGF(5) << "Releasing output record id=" << output_record_
;
114 release_cb_
.Run(output_record_
);
117 void V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::SetReferenceSurfaces(
118 const std::vector
<scoped_refptr
<V4L2DecodeSurface
>>& ref_surfaces
) {
119 DCHECK(reference_surfaces_
.empty());
120 reference_surfaces_
= ref_surfaces
;
123 void V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::SetDecoded() {
127 // We can now drop references to all reference surfaces for this surface
128 // as we are done with decoding.
129 reference_surfaces_
.clear();
132 std::string
V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::ToString()
135 base::StringAppendF(&out
, "Buffer %d -> %d. ", input_record_
, output_record_
);
136 base::StringAppendF(&out
, "Reference surfaces:");
137 for (const auto& ref
: reference_surfaces_
) {
138 DCHECK_NE(ref
->output_record(), output_record_
);
139 base::StringAppendF(&out
, " %d", ref
->output_record());
144 V4L2SliceVideoDecodeAccelerator::InputRecord::InputRecord()
152 V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord()
156 egl_image(EGL_NO_IMAGE_KHR
),
157 egl_sync(EGL_NO_SYNC_KHR
),
161 struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef
{
163 base::WeakPtr
<VideoDecodeAccelerator::Client
>& client
,
164 const scoped_refptr
<base::SingleThreadTaskRunner
>& client_task_runner
,
165 base::SharedMemory
* shm
,
168 ~BitstreamBufferRef();
169 const base::WeakPtr
<VideoDecodeAccelerator::Client
> client
;
170 const scoped_refptr
<base::SingleThreadTaskRunner
> client_task_runner
;
171 const scoped_ptr
<base::SharedMemory
> shm
;
174 const int32 input_id
;
177 V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
178 base::WeakPtr
<VideoDecodeAccelerator::Client
>& client
,
179 const scoped_refptr
<base::SingleThreadTaskRunner
>& client_task_runner
,
180 base::SharedMemory
* shm
,
184 client_task_runner(client_task_runner
),
191 V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() {
193 DVLOGF(5) << "returning input_id: " << input_id
;
194 client_task_runner
->PostTask(
196 base::Bind(&VideoDecodeAccelerator::Client::NotifyEndOfBitstreamBuffer
,
201 struct V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef
{
202 EGLSyncKHRRef(EGLDisplay egl_display
, EGLSyncKHR egl_sync
);
204 EGLDisplay
const egl_display
;
208 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::EGLSyncKHRRef(
209 EGLDisplay egl_display
,
211 : egl_display(egl_display
), egl_sync(egl_sync
) {
214 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::~EGLSyncKHRRef() {
215 // We don't check for eglDestroySyncKHR failures, because if we get here
216 // with a valid sync object, something went wrong and we are getting
218 if (egl_sync
!= EGL_NO_SYNC_KHR
)
219 eglDestroySyncKHR(egl_display
, egl_sync
);
222 struct V4L2SliceVideoDecodeAccelerator::PictureRecord
{
223 PictureRecord(bool cleared
, const media::Picture
& picture
);
225 bool cleared
; // Whether the texture is cleared and safe to render from.
226 media::Picture picture
; // The decoded picture.
229 V4L2SliceVideoDecodeAccelerator::PictureRecord::PictureRecord(
231 const media::Picture
& picture
)
232 : cleared(cleared
), picture(picture
) {
235 V4L2SliceVideoDecodeAccelerator::PictureRecord::~PictureRecord() {
238 class V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator
239 : public H264Decoder::H264Accelerator
{
241 V4L2H264Accelerator(V4L2SliceVideoDecodeAccelerator
* v4l2_dec
);
242 ~V4L2H264Accelerator() override
;
244 // H264Decoder::H264Accelerator implementation.
245 scoped_refptr
<H264Picture
> CreateH264Picture() override
;
247 bool SubmitFrameMetadata(const media::H264SPS
* sps
,
248 const media::H264PPS
* pps
,
250 const H264Picture::Vector
& ref_pic_listp0
,
251 const H264Picture::Vector
& ref_pic_listb0
,
252 const H264Picture::Vector
& ref_pic_listb1
,
253 const scoped_refptr
<H264Picture
>& pic
) override
;
255 bool SubmitSlice(const media::H264PPS
* pps
,
256 const media::H264SliceHeader
* slice_hdr
,
257 const H264Picture::Vector
& ref_pic_list0
,
258 const H264Picture::Vector
& ref_pic_list1
,
259 const scoped_refptr
<H264Picture
>& pic
,
261 size_t size
) override
;
263 bool SubmitDecode(const scoped_refptr
<H264Picture
>& pic
) override
;
264 bool OutputPicture(const scoped_refptr
<H264Picture
>& pic
) override
;
266 void Reset() override
;
269 // Max size of reference list.
270 static const size_t kDPBIndicesListSize
= 32;
271 void H264PictureListToDPBIndicesList(const H264Picture::Vector
& src_pic_list
,
272 uint8_t dst_list
[kDPBIndicesListSize
]);
274 void H264DPBToV4L2DPB(
276 std::vector
<scoped_refptr
<V4L2DecodeSurface
>>* ref_surfaces
);
278 scoped_refptr
<V4L2DecodeSurface
> H264PictureToV4L2DecodeSurface(
279 const scoped_refptr
<H264Picture
>& pic
);
282 V4L2SliceVideoDecodeAccelerator
* v4l2_dec_
;
284 // TODO(posciak): This should be queried from hardware once supported.
285 static const size_t kMaxSlices
= 16;
286 struct v4l2_ctrl_h264_slice_param v4l2_slice_params_
[kMaxSlices
];
287 struct v4l2_ctrl_h264_decode_param v4l2_decode_param_
;
289 DISALLOW_COPY_AND_ASSIGN(V4L2H264Accelerator
);
292 class V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator
293 : public VP8Decoder::VP8Accelerator
{
295 V4L2VP8Accelerator(V4L2SliceVideoDecodeAccelerator
* v4l2_dec
);
296 ~V4L2VP8Accelerator() override
;
298 // VP8Decoder::VP8Accelerator implementation.
299 scoped_refptr
<VP8Picture
> CreateVP8Picture() override
;
301 bool SubmitDecode(const scoped_refptr
<VP8Picture
>& pic
,
302 const media::Vp8FrameHeader
* frame_hdr
,
303 const scoped_refptr
<VP8Picture
>& last_frame
,
304 const scoped_refptr
<VP8Picture
>& golden_frame
,
305 const scoped_refptr
<VP8Picture
>& alt_frame
) override
;
307 bool OutputPicture(const scoped_refptr
<VP8Picture
>& pic
) override
;
310 scoped_refptr
<V4L2DecodeSurface
> VP8PictureToV4L2DecodeSurface(
311 const scoped_refptr
<VP8Picture
>& pic
);
313 V4L2SliceVideoDecodeAccelerator
* v4l2_dec_
;
315 DISALLOW_COPY_AND_ASSIGN(V4L2VP8Accelerator
);
318 // Codec-specific subclasses of software decoder picture classes.
319 // This allows us to keep decoders oblivious of our implementation details.
320 class V4L2H264Picture
: public H264Picture
{
322 V4L2H264Picture(const scoped_refptr
<
323 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>& dec_surface
);
325 V4L2H264Picture
* AsV4L2H264Picture() override
{ return this; }
326 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
332 ~V4L2H264Picture() override
;
334 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
337 DISALLOW_COPY_AND_ASSIGN(V4L2H264Picture
);
340 V4L2H264Picture::V4L2H264Picture(const scoped_refptr
<
341 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>& dec_surface
)
342 : dec_surface_(dec_surface
) {
345 V4L2H264Picture::~V4L2H264Picture() {
348 class V4L2VP8Picture
: public VP8Picture
{
350 V4L2VP8Picture(const scoped_refptr
<
351 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>& dec_surface
);
353 V4L2VP8Picture
* AsV4L2VP8Picture() override
{ return this; }
354 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
360 ~V4L2VP8Picture() override
;
362 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
365 DISALLOW_COPY_AND_ASSIGN(V4L2VP8Picture
);
368 V4L2VP8Picture::V4L2VP8Picture(const scoped_refptr
<
369 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>& dec_surface
)
370 : dec_surface_(dec_surface
) {
373 V4L2VP8Picture::~V4L2VP8Picture() {
376 V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
377 const scoped_refptr
<V4L2Device
>& device
,
378 EGLDisplay egl_display
,
379 EGLContext egl_context
,
380 const base::WeakPtr
<Client
>& io_client
,
381 const base::Callback
<bool(void)>& make_context_current
,
382 const scoped_refptr
<base::SingleThreadTaskRunner
>& io_task_runner
)
383 : input_planes_count_(0),
384 output_planes_count_(0),
385 child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
386 io_task_runner_(io_task_runner
),
387 io_client_(io_client
),
389 decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"),
390 device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"),
391 input_streamon_(false),
392 input_buffer_queued_count_(0),
393 output_streamon_(false),
394 output_buffer_queued_count_(0),
395 video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN
),
396 output_format_fourcc_(0),
397 state_(kUninitialized
),
398 decoder_flushing_(false),
399 decoder_resetting_(false),
400 surface_set_change_pending_(false),
401 picture_clearing_count_(0),
402 pictures_assigned_(false, false),
403 make_context_current_(make_context_current
),
404 egl_display_(egl_display
),
405 egl_context_(egl_context
),
406 weak_this_factory_(this) {
407 weak_this_
= weak_this_factory_
.GetWeakPtr();
410 V4L2SliceVideoDecodeAccelerator::~V4L2SliceVideoDecodeAccelerator() {
413 DCHECK(child_task_runner_
->BelongsToCurrentThread());
414 DCHECK(!decoder_thread_
.IsRunning());
415 DCHECK(!device_poll_thread_
.IsRunning());
417 DCHECK(input_buffer_map_
.empty());
418 DCHECK(output_buffer_map_
.empty());
421 void V4L2SliceVideoDecodeAccelerator::NotifyError(Error error
) {
422 if (!child_task_runner_
->BelongsToCurrentThread()) {
423 child_task_runner_
->PostTask(
424 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::NotifyError
,
430 client_
->NotifyError(error
);
431 client_ptr_factory_
.reset();
435 bool V4L2SliceVideoDecodeAccelerator::Initialize(
436 media::VideoCodecProfile profile
,
437 VideoDecodeAccelerator::Client
* client
) {
438 DVLOGF(3) << "profile: " << profile
;
439 DCHECK(child_task_runner_
->BelongsToCurrentThread());
440 DCHECK_EQ(state_
, kUninitialized
);
442 client_ptr_factory_
.reset(
443 new base::WeakPtrFactory
<VideoDecodeAccelerator::Client
>(client
));
444 client_
= client_ptr_factory_
->GetWeakPtr();
446 video_profile_
= profile
;
448 if (video_profile_
>= media::H264PROFILE_MIN
&&
449 video_profile_
<= media::H264PROFILE_MAX
) {
450 h264_accelerator_
.reset(new V4L2H264Accelerator(this));
451 decoder_
.reset(new H264Decoder(h264_accelerator_
.get()));
452 } else if (video_profile_
>= media::VP8PROFILE_MIN
&&
453 video_profile_
<= media::VP8PROFILE_MAX
) {
454 vp8_accelerator_
.reset(new V4L2VP8Accelerator(this));
455 decoder_
.reset(new VP8Decoder(vp8_accelerator_
.get()));
457 DLOG(ERROR
) << "Unsupported profile " << video_profile_
;
461 // TODO(posciak): This needs to be queried once supported.
462 input_planes_count_
= 1;
463 output_planes_count_
= 1;
465 if (egl_display_
== EGL_NO_DISPLAY
) {
466 LOG(ERROR
) << "Initialize(): could not get EGLDisplay";
470 // We need the context to be initialized to query extensions.
471 if (!make_context_current_
.Run()) {
472 LOG(ERROR
) << "Initialize(): could not make context current";
476 if (!gfx::g_driver_egl
.ext
.b_EGL_KHR_fence_sync
) {
477 LOG(ERROR
) << "Initialize(): context does not have EGL_KHR_fence_sync";
481 // Capabilities check.
482 struct v4l2_capability caps
;
483 const __u32 kCapsRequired
=
484 V4L2_CAP_VIDEO_CAPTURE_MPLANE
|
485 V4L2_CAP_VIDEO_OUTPUT_MPLANE
|
487 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP
, &caps
);
488 if ((caps
.capabilities
& kCapsRequired
) != kCapsRequired
) {
489 DLOG(ERROR
) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
490 ", caps check failed: 0x" << std::hex
<< caps
.capabilities
;
497 if (!decoder_thread_
.Start()) {
498 DLOG(ERROR
) << "Initialize(): device thread failed to start";
501 decoder_thread_task_runner_
= decoder_thread_
.task_runner();
503 state_
= kInitialized
;
505 // InitializeTask will NOTIFY_ERROR on failure.
506 decoder_thread_task_runner_
->PostTask(
507 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::InitializeTask
,
508 base::Unretained(this)));
510 DVLOGF(1) << "V4L2SliceVideoDecodeAccelerator initialized";
514 void V4L2SliceVideoDecodeAccelerator::InitializeTask() {
516 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
517 DCHECK_EQ(state_
, kInitialized
);
519 if (!CreateInputBuffers())
520 NOTIFY_ERROR(PLATFORM_FAILURE
);
522 // Output buffers will be created once decoder gives us information
523 // about their size and required count.
527 void V4L2SliceVideoDecodeAccelerator::Destroy() {
529 DCHECK(child_task_runner_
->BelongsToCurrentThread());
531 if (decoder_thread_
.IsRunning()) {
532 decoder_thread_task_runner_
->PostTask(
533 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DestroyTask
,
534 base::Unretained(this)));
536 // Wake up decoder thread in case we are waiting in CreateOutputBuffers
537 // for client to provide pictures. Since this is Destroy, we won't be
538 // getting them anymore (AssignPictureBuffers won't be called).
539 pictures_assigned_
.Signal();
541 // Wait for tasks to finish/early-exit.
542 decoder_thread_
.Stop();
546 DVLOGF(3) << "Destroyed";
549 void V4L2SliceVideoDecodeAccelerator::DestroyTask() {
551 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
557 decoder_current_bitstream_buffer_
.reset();
558 while (!decoder_input_queue_
.empty())
559 decoder_input_queue_
.pop();
561 // Stop streaming and the device_poll_thread_.
562 StopDevicePoll(false);
564 DestroyInputBuffers();
565 DestroyOutputs(false);
567 DCHECK(surfaces_at_device_
.empty());
568 DCHECK(surfaces_at_display_
.empty());
569 DCHECK(decoder_display_queue_
.empty());
572 bool V4L2SliceVideoDecodeAccelerator::SetupFormats() {
573 DCHECK_EQ(state_
, kUninitialized
);
575 __u32 input_format_fourcc
=
576 V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_
, true);
577 if (!input_format_fourcc
) {
583 gfx::Size max_resolution
, min_resolution
;
584 device_
->GetSupportedResolution(input_format_fourcc
, &min_resolution
,
586 if (max_resolution
.width() > 1920 && max_resolution
.height() > 1088)
587 input_size
= kInputBufferMaxSizeFor4k
;
589 input_size
= kInputBufferMaxSizeFor1080p
;
591 struct v4l2_fmtdesc fmtdesc
;
592 memset(&fmtdesc
, 0, sizeof(fmtdesc
));
593 fmtdesc
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
594 bool is_format_supported
= false;
595 while (device_
->Ioctl(VIDIOC_ENUM_FMT
, &fmtdesc
) == 0) {
596 if (fmtdesc
.pixelformat
== input_format_fourcc
) {
597 is_format_supported
= true;
603 if (!is_format_supported
) {
604 DVLOG(1) << "Input fourcc " << input_format_fourcc
605 << " not supported by device.";
609 struct v4l2_format format
;
610 memset(&format
, 0, sizeof(format
));
611 format
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
612 format
.fmt
.pix_mp
.pixelformat
= input_format_fourcc
;
613 format
.fmt
.pix_mp
.plane_fmt
[0].sizeimage
= input_size
;
614 format
.fmt
.pix_mp
.num_planes
= input_planes_count_
;
615 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT
, &format
);
617 // We have to set up the format for output, because the driver may not allow
618 // changing it once we start streaming; whether it can support our chosen
619 // output format or not may depend on the input format.
620 memset(&fmtdesc
, 0, sizeof(fmtdesc
));
621 fmtdesc
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
622 output_format_fourcc_
= 0;
623 while (device_
->Ioctl(VIDIOC_ENUM_FMT
, &fmtdesc
) == 0) {
624 if (device_
->CanCreateEGLImageFrom(fmtdesc
.pixelformat
)) {
625 output_format_fourcc_
= fmtdesc
.pixelformat
;
631 if (output_format_fourcc_
== 0) {
632 LOG(ERROR
) << "Could not find a usable output format";
636 // Only set fourcc for output; resolution, etc., will come from the
637 // driver once it extracts it from the stream.
638 memset(&format
, 0, sizeof(format
));
639 format
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
640 format
.fmt
.pix_mp
.pixelformat
= output_format_fourcc_
;
641 format
.fmt
.pix_mp
.num_planes
= output_planes_count_
;
642 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT
, &format
);
647 bool V4L2SliceVideoDecodeAccelerator::CreateInputBuffers() {
649 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
650 DCHECK(!input_streamon_
);
651 DCHECK(input_buffer_map_
.empty());
653 struct v4l2_requestbuffers reqbufs
;
654 memset(&reqbufs
, 0, sizeof(reqbufs
));
655 reqbufs
.count
= kNumInputBuffers
;
656 reqbufs
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
657 reqbufs
.memory
= V4L2_MEMORY_MMAP
;
658 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS
, &reqbufs
);
659 if (reqbufs
.count
< kNumInputBuffers
) {
660 PLOG(ERROR
) << "Could not allocate enough output buffers";
663 input_buffer_map_
.resize(reqbufs
.count
);
664 for (size_t i
= 0; i
< input_buffer_map_
.size(); ++i
) {
665 free_input_buffers_
.push_back(i
);
667 // Query for the MEMORY_MMAP pointer.
668 struct v4l2_plane planes
[VIDEO_MAX_PLANES
];
669 struct v4l2_buffer buffer
;
670 memset(&buffer
, 0, sizeof(buffer
));
671 memset(planes
, 0, sizeof(planes
));
673 buffer
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
674 buffer
.memory
= V4L2_MEMORY_MMAP
;
675 buffer
.m
.planes
= planes
;
676 buffer
.length
= input_planes_count_
;
677 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF
, &buffer
);
678 void* address
= device_
->Mmap(nullptr,
679 buffer
.m
.planes
[0].length
,
680 PROT_READ
| PROT_WRITE
,
682 buffer
.m
.planes
[0].m
.mem_offset
);
683 if (address
== MAP_FAILED
) {
684 PLOG(ERROR
) << "CreateInputBuffers(): mmap() failed";
687 input_buffer_map_
[i
].address
= address
;
688 input_buffer_map_
[i
].length
= buffer
.m
.planes
[0].length
;
694 bool V4L2SliceVideoDecodeAccelerator::CreateOutputBuffers() {
696 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
697 DCHECK(!output_streamon_
);
698 DCHECK(output_buffer_map_
.empty());
699 DCHECK(surfaces_at_display_
.empty());
700 DCHECK(surfaces_at_device_
.empty());
702 visible_size_
= decoder_
->GetPicSize();
703 size_t num_pictures
= decoder_
->GetRequiredNumOfPictures();
705 DCHECK_GT(num_pictures
, 0u);
706 DCHECK(!visible_size_
.IsEmpty());
708 struct v4l2_format format
;
709 memset(&format
, 0, sizeof(format
));
710 format
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
711 format
.fmt
.pix_mp
.pixelformat
= output_format_fourcc_
;
712 format
.fmt
.pix_mp
.width
= visible_size_
.width();
713 format
.fmt
.pix_mp
.height
= visible_size_
.height();
714 format
.fmt
.pix_mp
.num_planes
= input_planes_count_
;
716 if (device_
->Ioctl(VIDIOC_S_FMT
, &format
) != 0) {
717 PLOG(ERROR
) << "Failed setting format to: " << output_format_fourcc_
;
718 NOTIFY_ERROR(PLATFORM_FAILURE
);
722 coded_size_
.SetSize(base::checked_cast
<int>(format
.fmt
.pix_mp
.width
),
723 base::checked_cast
<int>(format
.fmt
.pix_mp
.height
));
724 DCHECK_EQ(coded_size_
.width() % 16, 0);
725 DCHECK_EQ(coded_size_
.height() % 16, 0);
727 if (!gfx::Rect(coded_size_
).Contains(gfx::Rect(visible_size_
))) {
728 LOG(ERROR
) << "Got invalid adjusted coded size: " << coded_size_
.ToString();
732 DVLOGF(3) << "buffer_count=" << num_pictures
733 << ", visible size=" << visible_size_
.ToString()
734 << ", coded size=" << coded_size_
.ToString();
736 child_task_runner_
->PostTask(
738 base::Bind(&VideoDecodeAccelerator::Client::ProvidePictureBuffers
,
739 client_
, num_pictures
, coded_size_
,
740 device_
->GetTextureTarget()));
742 // Wait for the client to call AssignPictureBuffers() on the Child thread.
743 // We do this, because if we continue decoding without finishing buffer
744 // allocation, we may end up Resetting before AssignPictureBuffers arrives,
745 // resulting in unnecessary complications and subtle bugs.
746 pictures_assigned_
.Wait();
751 void V4L2SliceVideoDecodeAccelerator::DestroyInputBuffers() {
753 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread() ||
754 !decoder_thread_
.IsRunning());
755 DCHECK(!input_streamon_
);
757 for (auto& input_record
: input_buffer_map_
) {
758 if (input_record
.address
!= nullptr)
759 device_
->Munmap(input_record
.address
, input_record
.length
);
762 struct v4l2_requestbuffers reqbufs
;
763 memset(&reqbufs
, 0, sizeof(reqbufs
));
765 reqbufs
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
766 reqbufs
.memory
= V4L2_MEMORY_MMAP
;
767 IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS
, &reqbufs
);
769 input_buffer_map_
.clear();
770 free_input_buffers_
.clear();
773 void V4L2SliceVideoDecodeAccelerator::DismissPictures(
774 std::vector
<int32
> picture_buffer_ids
,
775 base::WaitableEvent
* done
) {
777 DCHECK(child_task_runner_
->BelongsToCurrentThread());
779 for (auto picture_buffer_id
: picture_buffer_ids
) {
780 DVLOGF(1) << "dismissing PictureBuffer id=" << picture_buffer_id
;
781 client_
->DismissPictureBuffer(picture_buffer_id
);
787 void V4L2SliceVideoDecodeAccelerator::DevicePollTask(bool poll_device
) {
789 DCHECK_EQ(device_poll_thread_
.message_loop(), base::MessageLoop::current());
792 if (!device_
->Poll(poll_device
, &event_pending
)) {
793 NOTIFY_ERROR(PLATFORM_FAILURE
);
797 // All processing should happen on ServiceDeviceTask(), since we shouldn't
798 // touch encoder state from this thread.
799 decoder_thread_task_runner_
->PostTask(
800 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask
,
801 base::Unretained(this)));
804 void V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask() {
806 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
808 // ServiceDeviceTask() should only ever be scheduled from DevicePollTask().
811 SchedulePollIfNeeded();
814 void V4L2SliceVideoDecodeAccelerator::SchedulePollIfNeeded() {
815 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
817 if (!device_poll_thread_
.IsRunning()) {
818 DVLOGF(2) << "Device poll thread stopped, will not schedule poll";
822 DCHECK(input_streamon_
|| output_streamon_
);
824 if (input_buffer_queued_count_
+ output_buffer_queued_count_
== 0) {
825 DVLOGF(4) << "No buffers queued, will not schedule poll";
829 DVLOGF(4) << "Scheduling device poll task";
831 device_poll_thread_
.message_loop()->PostTask(
832 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask
,
833 base::Unretained(this), true));
835 DVLOGF(2) << "buffer counts: "
836 << "INPUT[" << decoder_input_queue_
.size() << "]"
838 << free_input_buffers_
.size() << "+"
839 << input_buffer_queued_count_
<< "/"
840 << input_buffer_map_
.size() << "]->["
841 << free_output_buffers_
.size() << "+"
842 << output_buffer_queued_count_
<< "/"
843 << output_buffer_map_
.size() << "]"
844 << " => DISPLAYQ[" << decoder_display_queue_
.size() << "]"
845 << " => CLIENT[" << surfaces_at_display_
.size() << "]";
848 void V4L2SliceVideoDecodeAccelerator::Enqueue(
849 const scoped_refptr
<V4L2DecodeSurface
>& dec_surface
) {
850 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
852 const int old_inputs_queued
= input_buffer_queued_count_
;
853 const int old_outputs_queued
= output_buffer_queued_count_
;
855 if (!EnqueueInputRecord(dec_surface
->input_record(),
856 dec_surface
->config_store())) {
857 DVLOGF(1) << "Failed queueing an input buffer";
858 NOTIFY_ERROR(PLATFORM_FAILURE
);
862 if (!EnqueueOutputRecord(dec_surface
->output_record())) {
863 DVLOGF(1) << "Failed queueing an output buffer";
864 NOTIFY_ERROR(PLATFORM_FAILURE
);
869 surfaces_at_device_
.insert(std::make_pair(dec_surface
->output_record(),
870 dec_surface
)).second
;
873 if (old_inputs_queued
== 0 && old_outputs_queued
== 0)
874 SchedulePollIfNeeded();
877 void V4L2SliceVideoDecodeAccelerator::Dequeue() {
879 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
881 struct v4l2_buffer dqbuf
;
882 struct v4l2_plane planes
[VIDEO_MAX_PLANES
];
883 while (input_buffer_queued_count_
> 0) {
884 DCHECK(input_streamon_
);
885 memset(&dqbuf
, 0, sizeof(dqbuf
));
886 memset(&planes
, 0, sizeof(planes
));
887 dqbuf
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
888 dqbuf
.memory
= V4L2_MEMORY_USERPTR
;
889 dqbuf
.m
.planes
= planes
;
890 dqbuf
.length
= input_planes_count_
;
891 if (device_
->Ioctl(VIDIOC_DQBUF
, &dqbuf
) != 0) {
892 if (errno
== EAGAIN
) {
893 // EAGAIN if we're just out of buffers to dequeue.
896 PLOG(ERROR
) << "ioctl() failed: VIDIOC_DQBUF";
897 NOTIFY_ERROR(PLATFORM_FAILURE
);
900 InputRecord
& input_record
= input_buffer_map_
[dqbuf
.index
];
901 DCHECK(input_record
.at_device
);
902 input_record
.at_device
= false;
903 ReuseInputBuffer(dqbuf
.index
);
904 input_buffer_queued_count_
--;
905 DVLOGF(4) << "Dequeued input=" << dqbuf
.index
906 << " count: " << input_buffer_queued_count_
;
909 while (output_buffer_queued_count_
> 0) {
910 DCHECK(output_streamon_
);
911 memset(&dqbuf
, 0, sizeof(dqbuf
));
912 memset(&planes
, 0, sizeof(planes
));
913 dqbuf
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
914 dqbuf
.memory
= V4L2_MEMORY_MMAP
;
915 dqbuf
.m
.planes
= planes
;
916 dqbuf
.length
= output_planes_count_
;
917 if (device_
->Ioctl(VIDIOC_DQBUF
, &dqbuf
) != 0) {
918 if (errno
== EAGAIN
) {
919 // EAGAIN if we're just out of buffers to dequeue.
922 PLOG(ERROR
) << "ioctl() failed: VIDIOC_DQBUF";
923 NOTIFY_ERROR(PLATFORM_FAILURE
);
926 OutputRecord
& output_record
= output_buffer_map_
[dqbuf
.index
];
927 DCHECK(output_record
.at_device
);
928 output_record
.at_device
= false;
929 output_buffer_queued_count_
--;
930 DVLOGF(3) << "Dequeued output=" << dqbuf
.index
931 << " count " << output_buffer_queued_count_
;
933 V4L2DecodeSurfaceByOutputId::iterator it
=
934 surfaces_at_device_
.find(dqbuf
.index
);
935 if (it
== surfaces_at_device_
.end()) {
936 DLOG(ERROR
) << "Got invalid surface from device.";
937 NOTIFY_ERROR(PLATFORM_FAILURE
);
940 it
->second
->SetDecoded();
941 surfaces_at_device_
.erase(it
);
944 // A frame was decoded, see if we can output it.
947 ProcessPendingEventsIfNeeded();
950 void V4L2SliceVideoDecodeAccelerator::ProcessPendingEventsIfNeeded() {
951 // Process pending events, if any, in the correct order.
952 // We always first process the surface set change, as it is an internal
953 // event from the decoder and interleaving it with external requests would
954 // put the decoder in an undefined state.
955 FinishSurfaceSetChangeIfNeeded();
957 // Process external (client) requests.
958 FinishFlushIfNeeded();
959 FinishResetIfNeeded();
962 void V4L2SliceVideoDecodeAccelerator::ReuseInputBuffer(int index
) {
963 DVLOGF(4) << "Reusing input buffer, index=" << index
;
964 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
966 DCHECK_LT(index
, static_cast<int>(input_buffer_map_
.size()));
967 InputRecord
& input_record
= input_buffer_map_
[index
];
969 DCHECK(!input_record
.at_device
);
970 input_record
.input_id
= -1;
971 input_record
.bytes_used
= 0;
973 DCHECK_EQ(std::count(free_input_buffers_
.begin(), free_input_buffers_
.end(),
975 free_input_buffers_
.push_back(index
);
978 void V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer(int index
) {
979 DVLOGF(4) << "Reusing output buffer, index=" << index
;
980 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
982 DCHECK_LT(index
, static_cast<int>(output_buffer_map_
.size()));
983 OutputRecord
& output_record
= output_buffer_map_
[index
];
984 DCHECK(!output_record
.at_device
);
985 DCHECK(!output_record
.at_client
);
987 DCHECK_EQ(std::count(free_output_buffers_
.begin(), free_output_buffers_
.end(),
989 free_output_buffers_
.push_back(index
);
991 ScheduleDecodeBufferTaskIfNeeded();
994 bool V4L2SliceVideoDecodeAccelerator::EnqueueInputRecord(
996 uint32_t config_store
) {
998 DCHECK_LT(index
, static_cast<int>(input_buffer_map_
.size()));
999 DCHECK_GT(config_store
, 0u);
1001 // Enqueue an input (VIDEO_OUTPUT) buffer for an input video frame.
1002 InputRecord
& input_record
= input_buffer_map_
[index
];
1003 DCHECK(!input_record
.at_device
);
1004 struct v4l2_buffer qbuf
;
1005 struct v4l2_plane qbuf_planes
[VIDEO_MAX_PLANES
];
1006 memset(&qbuf
, 0, sizeof(qbuf
));
1007 memset(qbuf_planes
, 0, sizeof(qbuf_planes
));
1009 qbuf
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
1010 qbuf
.memory
= V4L2_MEMORY_MMAP
;
1011 qbuf
.m
.planes
= qbuf_planes
;
1012 qbuf
.m
.planes
[0].bytesused
= input_record
.bytes_used
;
1013 qbuf
.length
= input_planes_count_
;
1014 qbuf
.config_store
= config_store
;
1015 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF
, &qbuf
);
1016 input_record
.at_device
= true;
1017 input_buffer_queued_count_
++;
1018 DVLOGF(4) << "Enqueued input=" << qbuf
.index
1019 << " count: " << input_buffer_queued_count_
;
1024 bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index
) {
1026 DCHECK_LT(index
, static_cast<int>(output_buffer_map_
.size()));
1028 // Enqueue an output (VIDEO_CAPTURE) buffer.
1029 OutputRecord
& output_record
= output_buffer_map_
[index
];
1030 DCHECK(!output_record
.at_device
);
1031 DCHECK(!output_record
.at_client
);
1032 DCHECK_NE(output_record
.egl_image
, EGL_NO_IMAGE_KHR
);
1033 DCHECK_NE(output_record
.picture_id
, -1);
1035 if (output_record
.egl_sync
!= EGL_NO_SYNC_KHR
) {
1036 // If we have to wait for completion, wait. Note that
1037 // free_output_buffers_ is a FIFO queue, so we always wait on the
1038 // buffer that has been in the queue the longest.
1039 if (eglClientWaitSyncKHR(egl_display_
, output_record
.egl_sync
, 0,
1040 EGL_FOREVER_KHR
) == EGL_FALSE
) {
1041 // This will cause tearing, but is safe otherwise.
1042 DVLOGF(1) << "eglClientWaitSyncKHR failed!";
1044 if (eglDestroySyncKHR(egl_display_
, output_record
.egl_sync
) != EGL_TRUE
) {
1045 LOGF(ERROR
) << "eglDestroySyncKHR failed!";
1046 NOTIFY_ERROR(PLATFORM_FAILURE
);
1049 output_record
.egl_sync
= EGL_NO_SYNC_KHR
;
1052 struct v4l2_buffer qbuf
;
1053 struct v4l2_plane qbuf_planes
[VIDEO_MAX_PLANES
];
1054 memset(&qbuf
, 0, sizeof(qbuf
));
1055 memset(qbuf_planes
, 0, sizeof(qbuf_planes
));
1057 qbuf
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1058 qbuf
.memory
= V4L2_MEMORY_MMAP
;
1059 qbuf
.m
.planes
= qbuf_planes
;
1060 qbuf
.length
= output_planes_count_
;
1061 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF
, &qbuf
);
1062 output_record
.at_device
= true;
1063 output_buffer_queued_count_
++;
1064 DVLOGF(4) << "Enqueued output=" << qbuf
.index
1065 << " count: " << output_buffer_queued_count_
;
1070 bool V4L2SliceVideoDecodeAccelerator::StartDevicePoll() {
1071 DVLOGF(3) << "Starting device poll";
1072 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1073 DCHECK(!device_poll_thread_
.IsRunning());
1075 // Start up the device poll thread and schedule its first DevicePollTask().
1076 if (!device_poll_thread_
.Start()) {
1077 DLOG(ERROR
) << "StartDevicePoll(): Device thread failed to start";
1078 NOTIFY_ERROR(PLATFORM_FAILURE
);
1081 if (!input_streamon_
) {
1082 __u32 type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
1083 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMON
, &type
);
1084 input_streamon_
= true;
1087 if (!output_streamon_
) {
1088 __u32 type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1089 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMON
, &type
);
1090 output_streamon_
= true;
1093 device_poll_thread_
.message_loop()->PostTask(
1094 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask
,
1095 base::Unretained(this), true));
1100 bool V4L2SliceVideoDecodeAccelerator::StopDevicePoll(bool keep_input_state
) {
1101 DVLOGF(3) << "Stopping device poll";
1102 if (decoder_thread_
.IsRunning())
1103 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1105 // Signal the DevicePollTask() to stop, and stop the device poll thread.
1106 if (!device_
->SetDevicePollInterrupt()) {
1107 PLOG(ERROR
) << "SetDevicePollInterrupt(): failed";
1108 NOTIFY_ERROR(PLATFORM_FAILURE
);
1111 device_poll_thread_
.Stop();
1112 DVLOGF(3) << "Device poll thread stopped";
1114 // Clear the interrupt now, to be sure.
1115 if (!device_
->ClearDevicePollInterrupt()) {
1116 NOTIFY_ERROR(PLATFORM_FAILURE
);
1120 if (!keep_input_state
) {
1121 if (input_streamon_
) {
1122 __u32 type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
1123 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF
, &type
);
1125 input_streamon_
= false;
1128 if (output_streamon_
) {
1129 __u32 type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1130 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF
, &type
);
1132 output_streamon_
= false;
1134 if (!keep_input_state
) {
1135 for (size_t i
= 0; i
< input_buffer_map_
.size(); ++i
) {
1136 InputRecord
& input_record
= input_buffer_map_
[i
];
1137 if (input_record
.at_device
) {
1138 input_record
.at_device
= false;
1139 ReuseInputBuffer(i
);
1140 input_buffer_queued_count_
--;
1143 DCHECK_EQ(input_buffer_queued_count_
, 0);
1146 // STREAMOFF makes the driver drop all buffers without decoding and DQBUFing,
1147 // so we mark them all as at_device = false and clear surfaces_at_device_.
1148 for (size_t i
= 0; i
< output_buffer_map_
.size(); ++i
) {
1149 OutputRecord
& output_record
= output_buffer_map_
[i
];
1150 if (output_record
.at_device
) {
1151 output_record
.at_device
= false;
1152 output_buffer_queued_count_
--;
1155 surfaces_at_device_
.clear();
1156 DCHECK_EQ(output_buffer_queued_count_
, 0);
1158 // Drop all surfaces that were awaiting decode before being displayed,
1159 // since we've just cancelled all outstanding decodes.
1160 while (!decoder_display_queue_
.empty())
1161 decoder_display_queue_
.pop();
1163 DVLOGF(3) << "Device poll stopped";
1167 void V4L2SliceVideoDecodeAccelerator::Decode(
1168 const media::BitstreamBuffer
& bitstream_buffer
) {
1169 DVLOGF(3) << "input_id=" << bitstream_buffer
.id()
1170 << ", size=" << bitstream_buffer
.size();
1171 DCHECK(io_task_runner_
->BelongsToCurrentThread());
1173 decoder_thread_task_runner_
->PostTask(
1174 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeTask
,
1175 base::Unretained(this), bitstream_buffer
));
1178 void V4L2SliceVideoDecodeAccelerator::DecodeTask(
1179 const media::BitstreamBuffer
& bitstream_buffer
) {
1180 DVLOGF(3) << "input_id=" << bitstream_buffer
.id()
1181 << " size=" << bitstream_buffer
.size();
1182 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1184 scoped_ptr
<BitstreamBufferRef
> bitstream_record(new BitstreamBufferRef(
1185 io_client_
, io_task_runner_
,
1186 new base::SharedMemory(bitstream_buffer
.handle(), true),
1187 bitstream_buffer
.size(), bitstream_buffer
.id()));
1188 if (!bitstream_record
->shm
->Map(bitstream_buffer
.size())) {
1189 LOGF(ERROR
) << "Could not map bitstream_buffer";
1190 NOTIFY_ERROR(UNREADABLE_INPUT
);
1193 DVLOGF(3) << "mapped at=" << bitstream_record
->shm
->memory();
1195 decoder_input_queue_
.push(
1196 linked_ptr
<BitstreamBufferRef
>(bitstream_record
.release()));
1198 ScheduleDecodeBufferTaskIfNeeded();
1201 bool V4L2SliceVideoDecodeAccelerator::TrySetNewBistreamBuffer() {
1202 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1203 DCHECK(!decoder_current_bitstream_buffer_
);
1205 if (decoder_input_queue_
.empty())
1208 decoder_current_bitstream_buffer_
.reset(
1209 decoder_input_queue_
.front().release());
1210 decoder_input_queue_
.pop();
1212 if (decoder_current_bitstream_buffer_
->input_id
== kFlushBufferId
) {
1213 // This is a buffer we queued for ourselves to trigger flush at this time.
1218 const uint8_t* const data
= reinterpret_cast<const uint8_t*>(
1219 decoder_current_bitstream_buffer_
->shm
->memory());
1220 const size_t data_size
= decoder_current_bitstream_buffer_
->size
;
1221 decoder_
->SetStream(data
, data_size
);
1226 void V4L2SliceVideoDecodeAccelerator::ScheduleDecodeBufferTaskIfNeeded() {
1227 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1228 if (state_
== kDecoding
) {
1229 decoder_thread_task_runner_
->PostTask(
1231 base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeBufferTask
,
1232 base::Unretained(this)));
1236 void V4L2SliceVideoDecodeAccelerator::DecodeBufferTask() {
1238 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1240 if (state_
!= kDecoding
) {
1241 DVLOGF(3) << "Early exit, not in kDecoding";
1246 AcceleratedVideoDecoder::DecodeResult res
;
1247 res
= decoder_
->Decode();
1249 case AcceleratedVideoDecoder::kAllocateNewSurfaces
:
1250 DVLOGF(2) << "Decoder requesting a new set of surfaces";
1251 InitiateSurfaceSetChange();
1254 case AcceleratedVideoDecoder::kRanOutOfStreamData
:
1255 decoder_current_bitstream_buffer_
.reset();
1256 if (!TrySetNewBistreamBuffer())
1261 case AcceleratedVideoDecoder::kRanOutOfSurfaces
:
1262 // No more surfaces for the decoder, we'll come back once we have more.
1263 DVLOGF(4) << "Ran out of surfaces";
1266 case AcceleratedVideoDecoder::kDecodeError
:
1267 DVLOGF(1) << "Error decoding stream";
1268 NOTIFY_ERROR(PLATFORM_FAILURE
);
1274 void V4L2SliceVideoDecodeAccelerator::InitiateSurfaceSetChange() {
1276 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1278 DCHECK_EQ(state_
, kDecoding
);
1281 DCHECK(!surface_set_change_pending_
);
1282 surface_set_change_pending_
= true;
1284 FinishSurfaceSetChangeIfNeeded();
1287 void V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChangeIfNeeded() {
1289 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1291 if (!surface_set_change_pending_
|| !surfaces_at_device_
.empty())
1294 DCHECK_EQ(state_
, kIdle
);
1295 DCHECK(decoder_display_queue_
.empty());
1296 // All output buffers should've been returned from decoder and device by now.
1297 // The only remaining owner of surfaces may be display (client), and we will
1298 // dismiss them when destroying output buffers below.
1299 DCHECK_EQ(free_output_buffers_
.size() + surfaces_at_display_
.size(),
1300 output_buffer_map_
.size());
1302 // Keep input queue running while we switch outputs.
1303 if (!StopDevicePoll(true)) {
1304 NOTIFY_ERROR(PLATFORM_FAILURE
);
1308 // This will return only once all buffers are dismissed and destroyed.
1309 // This does not wait until they are displayed however, as display retains
1310 // references to the buffers bound to textures and will release them
1311 // after displaying.
1312 if (!DestroyOutputs(true)) {
1313 NOTIFY_ERROR(PLATFORM_FAILURE
);
1317 if (!CreateOutputBuffers()) {
1318 NOTIFY_ERROR(PLATFORM_FAILURE
);
1322 if (!StartDevicePoll()) {
1323 NOTIFY_ERROR(PLATFORM_FAILURE
);
1327 DVLOGF(3) << "Surface set change finished";
1329 surface_set_change_pending_
= false;
1331 ScheduleDecodeBufferTaskIfNeeded();
1334 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss
) {
1336 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1337 std::vector
<EGLImageKHR
> egl_images_to_destroy
;
1338 std::vector
<int32
> picture_buffers_to_dismiss
;
1340 if (output_buffer_map_
.empty())
1343 for (auto output_record
: output_buffer_map_
) {
1344 DCHECK(!output_record
.at_device
);
1346 if (output_record
.egl_sync
!= EGL_NO_SYNC_KHR
) {
1347 if (eglDestroySyncKHR(egl_display_
, output_record
.egl_sync
) != EGL_TRUE
)
1348 DVLOGF(1) << "eglDestroySyncKHR failed.";
1351 if (output_record
.egl_image
!= EGL_NO_IMAGE_KHR
) {
1352 child_task_runner_
->PostTask(
1354 base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage
), device_
,
1355 egl_display_
, output_record
.egl_image
));
1358 picture_buffers_to_dismiss
.push_back(output_record
.picture_id
);
1362 DVLOGF(2) << "Scheduling picture dismissal";
1363 base::WaitableEvent
done(false, false);
1364 child_task_runner_
->PostTask(
1365 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DismissPictures
,
1366 weak_this_
, picture_buffers_to_dismiss
, &done
));
1370 // At this point client can't call ReusePictureBuffer on any of the pictures
1371 // anymore, so it's safe to destroy.
1372 return DestroyOutputBuffers();
1375 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputBuffers() {
1377 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread() ||
1378 !decoder_thread_
.IsRunning());
1379 DCHECK(!output_streamon_
);
1380 DCHECK(surfaces_at_device_
.empty());
1381 DCHECK(decoder_display_queue_
.empty());
1382 DCHECK_EQ(surfaces_at_display_
.size() + free_output_buffers_
.size(),
1383 output_buffer_map_
.size());
1385 if (output_buffer_map_
.empty())
1388 // It's ok to do this, client will retain references to textures, but we are
1389 // not interested in reusing the surfaces anymore.
1390 // This will prevent us from reusing old surfaces in case we have some
1391 // ReusePictureBuffer() pending on ChildThread already. It's ok to ignore
1392 // them, because we have already dismissed them (in DestroyOutputs()).
1393 for (const auto& surface_at_display
: surfaces_at_display_
) {
1394 size_t index
= surface_at_display
.second
->output_record();
1395 DCHECK_LT(index
, output_buffer_map_
.size());
1396 OutputRecord
& output_record
= output_buffer_map_
[index
];
1397 DCHECK(output_record
.at_client
);
1398 output_record
.at_client
= false;
1400 surfaces_at_display_
.clear();
1401 DCHECK_EQ(free_output_buffers_
.size(), output_buffer_map_
.size());
1403 free_output_buffers_
.clear();
1404 output_buffer_map_
.clear();
1406 struct v4l2_requestbuffers reqbufs
;
1407 memset(&reqbufs
, 0, sizeof(reqbufs
));
1409 reqbufs
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1410 reqbufs
.memory
= V4L2_MEMORY_MMAP
;
1411 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS
, &reqbufs
);
1416 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
1417 const std::vector
<media::PictureBuffer
>& buffers
) {
1419 DCHECK(child_task_runner_
->BelongsToCurrentThread());
1421 const uint32_t req_buffer_count
= decoder_
->GetRequiredNumOfPictures();
1423 if (buffers
.size() < req_buffer_count
) {
1424 DLOG(ERROR
) << "Failed to provide requested picture buffers. "
1425 << "(Got " << buffers
.size()
1426 << ", requested " << req_buffer_count
<< ")";
1427 NOTIFY_ERROR(INVALID_ARGUMENT
);
1431 if (!make_context_current_
.Run()) {
1432 DLOG(ERROR
) << "could not make context current";
1433 NOTIFY_ERROR(PLATFORM_FAILURE
);
1437 gfx::ScopedTextureBinder
bind_restore(GL_TEXTURE_EXTERNAL_OES
, 0);
1439 // It's safe to manipulate all the buffer state here, because the decoder
1440 // thread is waiting on pictures_assigned_.
1442 // Allocate the output buffers.
1443 struct v4l2_requestbuffers reqbufs
;
1444 memset(&reqbufs
, 0, sizeof(reqbufs
));
1445 reqbufs
.count
= buffers
.size();
1446 reqbufs
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1447 reqbufs
.memory
= V4L2_MEMORY_MMAP
;
1448 IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS
, &reqbufs
);
1450 if (reqbufs
.count
!= buffers
.size()) {
1451 DLOG(ERROR
) << "Could not allocate enough output buffers";
1452 NOTIFY_ERROR(PLATFORM_FAILURE
);
1456 output_buffer_map_
.resize(buffers
.size());
1458 DCHECK(free_output_buffers_
.empty());
1459 for (size_t i
= 0; i
< output_buffer_map_
.size(); ++i
) {
1460 DCHECK(buffers
[i
].size() == coded_size_
);
1462 OutputRecord
& output_record
= output_buffer_map_
[i
];
1463 DCHECK(!output_record
.at_device
);
1464 DCHECK(!output_record
.at_client
);
1465 DCHECK_EQ(output_record
.egl_image
, EGL_NO_IMAGE_KHR
);
1466 DCHECK_EQ(output_record
.egl_sync
, EGL_NO_SYNC_KHR
);
1467 DCHECK_EQ(output_record
.picture_id
, -1);
1468 DCHECK_EQ(output_record
.cleared
, false);
1470 EGLImageKHR egl_image
= device_
->CreateEGLImage(egl_display_
,
1472 buffers
[i
].texture_id(),
1475 output_format_fourcc_
,
1476 output_planes_count_
);
1477 if (egl_image
== EGL_NO_IMAGE_KHR
) {
1478 LOGF(ERROR
) << "Could not create EGLImageKHR";
1479 // Ownership of EGLImages allocated in previous iterations of this loop
1480 // has been transferred to output_buffer_map_. After we error-out here
1481 // the destructor will handle their cleanup.
1482 NOTIFY_ERROR(PLATFORM_FAILURE
);
1486 output_record
.egl_image
= egl_image
;
1487 output_record
.picture_id
= buffers
[i
].id();
1488 free_output_buffers_
.push_back(i
);
1489 DVLOGF(3) << "buffer[" << i
<< "]: picture_id=" << output_record
.picture_id
;
1492 pictures_assigned_
.Signal();
1495 void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
1496 int32 picture_buffer_id
) {
1497 DCHECK(child_task_runner_
->BelongsToCurrentThread());
1498 DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id
;
1500 if (!make_context_current_
.Run()) {
1501 LOGF(ERROR
) << "could not make context current";
1502 NOTIFY_ERROR(PLATFORM_FAILURE
);
1506 EGLSyncKHR egl_sync
=
1507 eglCreateSyncKHR(egl_display_
, EGL_SYNC_FENCE_KHR
, NULL
);
1508 if (egl_sync
== EGL_NO_SYNC_KHR
) {
1509 LOGF(ERROR
) << "eglCreateSyncKHR() failed";
1510 NOTIFY_ERROR(PLATFORM_FAILURE
);
1514 scoped_ptr
<EGLSyncKHRRef
> egl_sync_ref(
1515 new EGLSyncKHRRef(egl_display_
, egl_sync
));
1516 decoder_thread_task_runner_
->PostTask(
1518 base::Bind(&V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask
,
1519 base::Unretained(this), picture_buffer_id
,
1520 base::Passed(&egl_sync_ref
)));
1523 void V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask(
1524 int32 picture_buffer_id
,
1525 scoped_ptr
<EGLSyncKHRRef
> egl_sync_ref
) {
1526 DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id
;
1527 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1529 V4L2DecodeSurfaceByPictureBufferId::iterator it
=
1530 surfaces_at_display_
.find(picture_buffer_id
);
1531 if (it
== surfaces_at_display_
.end()) {
1532 // It's possible that we've already posted a DismissPictureBuffer for this
1533 // picture, but it has not yet executed when this ReusePictureBuffer was
1534 // posted to us by the client. In that case just ignore this (we've already
1535 // dismissed it and accounted for that) and let the sync object get
1537 DVLOGF(3) << "got picture id=" << picture_buffer_id
1538 << " not in use (anymore?).";
1542 OutputRecord
& output_record
= output_buffer_map_
[it
->second
->output_record()];
1543 if (output_record
.at_device
|| !output_record
.at_client
) {
1544 DVLOGF(1) << "picture_buffer_id not reusable";
1545 NOTIFY_ERROR(INVALID_ARGUMENT
);
1549 DCHECK_EQ(output_record
.egl_sync
, EGL_NO_SYNC_KHR
);
1550 DCHECK(!output_record
.at_device
);
1551 output_record
.at_client
= false;
1552 output_record
.egl_sync
= egl_sync_ref
->egl_sync
;
1553 // Take ownership of the EGLSync.
1554 egl_sync_ref
->egl_sync
= EGL_NO_SYNC_KHR
;
1555 surfaces_at_display_
.erase(it
);
1558 void V4L2SliceVideoDecodeAccelerator::Flush() {
1560 DCHECK(child_task_runner_
->BelongsToCurrentThread());
1562 decoder_thread_task_runner_
->PostTask(
1563 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::FlushTask
,
1564 base::Unretained(this)));
1567 void V4L2SliceVideoDecodeAccelerator::FlushTask() {
1569 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1571 if (!decoder_input_queue_
.empty()) {
1572 // We are not done with pending inputs, so queue an empty buffer,
1573 // which - when reached - will trigger flush sequence.
1574 decoder_input_queue_
.push(
1575 linked_ptr
<BitstreamBufferRef
>(new BitstreamBufferRef(
1576 io_client_
, io_task_runner_
, nullptr, 0, kFlushBufferId
)));
1580 // No more inputs pending, so just finish flushing here.
1584 void V4L2SliceVideoDecodeAccelerator::InitiateFlush() {
1586 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1588 DCHECK(!decoder_flushing_
);
1589 DCHECK_EQ(state_
, kDecoding
);
1592 // This will trigger output for all remaining surfaces in the decoder.
1593 // However, not all of them may be decoded yet (they would be queued
1594 // in hardware then).
1595 if (!decoder_
->Flush()) {
1596 DVLOGF(1) << "Failed flushing the decoder.";
1597 NOTIFY_ERROR(PLATFORM_FAILURE
);
1601 // Put the decoder in an idle state, ready to resume.
1604 decoder_flushing_
= true;
1606 decoder_thread_task_runner_
->PostTask(
1608 base::Bind(&V4L2SliceVideoDecodeAccelerator::FinishFlushIfNeeded
,
1609 base::Unretained(this)));
1612 void V4L2SliceVideoDecodeAccelerator::FinishFlushIfNeeded() {
1614 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1616 if (!decoder_flushing_
|| !surfaces_at_device_
.empty())
1619 DCHECK_EQ(state_
, kIdle
);
1621 // At this point, all remaining surfaces are decoded and dequeued, and since
1622 // we have already scheduled output for them in InitiateFlush(), their
1623 // respective PictureReady calls have been posted (or they have been queued on
1624 // pending_picture_ready_). So at this time, once we SendPictureReady(),
1625 // we will have all remaining PictureReady() posted to the client and we
1626 // can post NotifyFlushDone().
1627 DCHECK(decoder_display_queue_
.empty());
1629 // Decoder should have already returned all surfaces and all surfaces are
1630 // out of hardware. There can be no other owners of input buffers.
1631 DCHECK_EQ(free_input_buffers_
.size(), input_buffer_map_
.size());
1635 child_task_runner_
->PostTask(FROM_HERE
,
1636 base::Bind(&Client::NotifyFlushDone
, client_
));
1638 decoder_flushing_
= false;
1640 DVLOGF(3) << "Flush finished";
1642 ScheduleDecodeBufferTaskIfNeeded();
1645 void V4L2SliceVideoDecodeAccelerator::Reset() {
1647 DCHECK(child_task_runner_
->BelongsToCurrentThread());
1649 decoder_thread_task_runner_
->PostTask(
1650 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::ResetTask
,
1651 base::Unretained(this)));
1654 void V4L2SliceVideoDecodeAccelerator::ResetTask() {
1656 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1658 if (decoder_resetting_
) {
1659 // This is a bug in the client, multiple Reset()s before NotifyResetDone()
1661 NOTREACHED() << "Client should not be requesting multiple Reset()s";
1665 DCHECK_EQ(state_
, kDecoding
);
1668 // Put the decoder in an idle state, ready to resume.
1671 decoder_resetting_
= true;
1673 // Drop all remaining inputs.
1674 decoder_current_bitstream_buffer_
.reset();
1675 while (!decoder_input_queue_
.empty())
1676 decoder_input_queue_
.pop();
1678 FinishResetIfNeeded();
1681 void V4L2SliceVideoDecodeAccelerator::FinishResetIfNeeded() {
1683 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
1685 if (!decoder_resetting_
|| !surfaces_at_device_
.empty())
1688 DCHECK_EQ(state_
, kIdle
);
1689 DCHECK(!decoder_flushing_
);
1692 // Drop any pending outputs.
1693 while (!decoder_display_queue_
.empty())
1694 decoder_display_queue_
.pop();
1696 // At this point we can have no input buffers in the decoder, because we
1697 // Reset()ed it in ResetTask(), and have not scheduled any new Decode()s
1698 // having been in kIdle since. We don't have any surfaces in the HW either -
1699 // we just checked that surfaces_at_device_.empty(), and inputs are tied
1700 // to surfaces. Since there can be no other owners of input buffers, we can
1701 // simply mark them all as available.
1702 DCHECK_EQ(input_buffer_queued_count_
, 0);
1703 free_input_buffers_
.clear();
1704 for (size_t i
= 0; i
< input_buffer_map_
.size(); ++i
) {
1705 DCHECK(!input_buffer_map_
[i
].at_device
);
1706 ReuseInputBuffer(i
);
1709 decoder_resetting_
= false;
1711 child_task_runner_
->PostTask(FROM_HERE
,
1712 base::Bind(&Client::NotifyResetDone
, client_
));
1714 DVLOGF(3) << "Reset finished";
1717 ScheduleDecodeBufferTaskIfNeeded();
1720 void V4L2SliceVideoDecodeAccelerator::SetErrorState(Error error
) {
1721 // We can touch decoder_state_ only if this is the decoder thread or the
1722 // decoder thread isn't running.
1723 if (decoder_thread_
.IsRunning() &&
1724 !decoder_thread_task_runner_
->BelongsToCurrentThread()) {
1725 decoder_thread_task_runner_
->PostTask(
1726 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::SetErrorState
,
1727 base::Unretained(this), error
));
1731 // Post NotifyError only if we are already initialized, as the API does
1732 // not allow doing so before that.
1733 if (state_
!= kError
&& state_
!= kUninitialized
)
1739 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::V4L2H264Accelerator(
1740 V4L2SliceVideoDecodeAccelerator
* v4l2_dec
)
1741 : num_slices_(0), v4l2_dec_(v4l2_dec
) {
1745 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::~V4L2H264Accelerator() {
1748 scoped_refptr
<H264Picture
>
1749 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::CreateH264Picture() {
1750 scoped_refptr
<V4L2DecodeSurface
> dec_surface
= v4l2_dec_
->CreateSurface();
1754 return new V4L2H264Picture(dec_surface
);
1757 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::
1758 H264PictureListToDPBIndicesList(const H264Picture::Vector
& src_pic_list
,
1759 uint8_t dst_list
[kDPBIndicesListSize
]) {
1761 for (i
= 0; i
< src_pic_list
.size() && i
< kDPBIndicesListSize
; ++i
) {
1762 const scoped_refptr
<H264Picture
>& pic
= src_pic_list
[i
];
1763 dst_list
[i
] = pic
? pic
->dpb_position
: VIDEO_MAX_FRAME
;
1766 while (i
< kDPBIndicesListSize
)
1767 dst_list
[i
++] = VIDEO_MAX_FRAME
;
1770 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::H264DPBToV4L2DPB(
1772 std::vector
<scoped_refptr
<V4L2DecodeSurface
>>* ref_surfaces
) {
1773 memset(v4l2_decode_param_
.dpb
, 0, sizeof(v4l2_decode_param_
.dpb
));
1775 for (const auto& pic
: dpb
) {
1776 if (i
>= arraysize(v4l2_decode_param_
.dpb
)) {
1777 DVLOG(1) << "Invalid DPB size";
1780 struct v4l2_h264_dpb_entry
& entry
= v4l2_decode_param_
.dpb
[i
++];
1781 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
1782 H264PictureToV4L2DecodeSurface(pic
);
1783 entry
.buf_index
= dec_surface
->output_record();
1784 entry
.frame_num
= pic
->frame_num
;
1785 entry
.pic_num
= pic
->pic_num
;
1786 entry
.top_field_order_cnt
= pic
->top_field_order_cnt
;
1787 entry
.bottom_field_order_cnt
= pic
->bottom_field_order_cnt
;
1788 entry
.flags
= (pic
->ref
? V4L2_H264_DPB_ENTRY_FLAG_ACTIVE
: 0) |
1789 (pic
->long_term
? V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM
: 0);
1791 ref_surfaces
->push_back(dec_surface
);
1795 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitFrameMetadata(
1796 const media::H264SPS
* sps
,
1797 const media::H264PPS
* pps
,
1799 const H264Picture::Vector
& ref_pic_listp0
,
1800 const H264Picture::Vector
& ref_pic_listb0
,
1801 const H264Picture::Vector
& ref_pic_listb1
,
1802 const scoped_refptr
<H264Picture
>& pic
) {
1803 struct v4l2_ext_control ctrl
;
1804 std::vector
<struct v4l2_ext_control
> ctrls
;
1806 struct v4l2_ctrl_h264_sps v4l2_sps
;
1807 memset(&v4l2_sps
, 0, sizeof(v4l2_sps
));
1808 v4l2_sps
.constraint_set_flags
=
1809 sps
->constraint_set0_flag
? V4L2_H264_SPS_CONSTRAINT_SET0_FLAG
: 0 |
1810 sps
->constraint_set1_flag
? V4L2_H264_SPS_CONSTRAINT_SET1_FLAG
: 0 |
1811 sps
->constraint_set2_flag
? V4L2_H264_SPS_CONSTRAINT_SET2_FLAG
: 0 |
1812 sps
->constraint_set3_flag
? V4L2_H264_SPS_CONSTRAINT_SET3_FLAG
: 0 |
1813 sps
->constraint_set4_flag
? V4L2_H264_SPS_CONSTRAINT_SET4_FLAG
: 0 |
1814 sps
->constraint_set5_flag
? V4L2_H264_SPS_CONSTRAINT_SET5_FLAG
: 0;
1815 #define SPS_TO_V4L2SPS(a) v4l2_sps.a = sps->a
1816 SPS_TO_V4L2SPS(profile_idc
);
1817 SPS_TO_V4L2SPS(level_idc
);
1818 SPS_TO_V4L2SPS(seq_parameter_set_id
);
1819 SPS_TO_V4L2SPS(chroma_format_idc
);
1820 SPS_TO_V4L2SPS(bit_depth_luma_minus8
);
1821 SPS_TO_V4L2SPS(bit_depth_chroma_minus8
);
1822 SPS_TO_V4L2SPS(log2_max_frame_num_minus4
);
1823 SPS_TO_V4L2SPS(pic_order_cnt_type
);
1824 SPS_TO_V4L2SPS(log2_max_pic_order_cnt_lsb_minus4
);
1825 SPS_TO_V4L2SPS(offset_for_non_ref_pic
);
1826 SPS_TO_V4L2SPS(offset_for_top_to_bottom_field
);
1827 SPS_TO_V4L2SPS(num_ref_frames_in_pic_order_cnt_cycle
);
1829 static_assert(arraysize(v4l2_sps
.offset_for_ref_frame
) ==
1830 arraysize(sps
->offset_for_ref_frame
),
1831 "offset_for_ref_frame arrays must be same size");
1832 for (size_t i
= 0; i
< arraysize(v4l2_sps
.offset_for_ref_frame
); ++i
)
1833 v4l2_sps
.offset_for_ref_frame
[i
] = sps
->offset_for_ref_frame
[i
];
1834 SPS_TO_V4L2SPS(max_num_ref_frames
);
1835 SPS_TO_V4L2SPS(pic_width_in_mbs_minus1
);
1836 SPS_TO_V4L2SPS(pic_height_in_map_units_minus1
);
1837 #undef SPS_TO_V4L2SPS
1839 #define SET_V4L2_SPS_FLAG_IF(cond, flag) \
1840 v4l2_sps.flags |= ((sps->cond) ? (flag) : 0)
1841 SET_V4L2_SPS_FLAG_IF(separate_colour_plane_flag
,
1842 V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE
);
1843 SET_V4L2_SPS_FLAG_IF(qpprime_y_zero_transform_bypass_flag
,
1844 V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS
);
1845 SET_V4L2_SPS_FLAG_IF(delta_pic_order_always_zero_flag
,
1846 V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO
);
1847 SET_V4L2_SPS_FLAG_IF(gaps_in_frame_num_value_allowed_flag
,
1848 V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED
);
1849 SET_V4L2_SPS_FLAG_IF(frame_mbs_only_flag
, V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY
);
1850 SET_V4L2_SPS_FLAG_IF(mb_adaptive_frame_field_flag
,
1851 V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD
);
1852 SET_V4L2_SPS_FLAG_IF(direct_8x8_inference_flag
,
1853 V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE
);
1855 memset(&ctrl
, 0, sizeof(ctrl
));
1856 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_SPS
;
1857 ctrl
.size
= sizeof(v4l2_sps
);
1858 ctrl
.p_h264_sps
= &v4l2_sps
;
1859 ctrls
.push_back(ctrl
);
1861 struct v4l2_ctrl_h264_pps v4l2_pps
;
1862 memset(&v4l2_pps
, 0, sizeof(v4l2_pps
));
1863 #define PPS_TO_V4L2PPS(a) v4l2_pps.a = pps->a
1864 PPS_TO_V4L2PPS(pic_parameter_set_id
);
1865 PPS_TO_V4L2PPS(seq_parameter_set_id
);
1866 PPS_TO_V4L2PPS(num_slice_groups_minus1
);
1867 PPS_TO_V4L2PPS(num_ref_idx_l0_default_active_minus1
);
1868 PPS_TO_V4L2PPS(num_ref_idx_l1_default_active_minus1
);
1869 PPS_TO_V4L2PPS(weighted_bipred_idc
);
1870 PPS_TO_V4L2PPS(pic_init_qp_minus26
);
1871 PPS_TO_V4L2PPS(pic_init_qs_minus26
);
1872 PPS_TO_V4L2PPS(chroma_qp_index_offset
);
1873 PPS_TO_V4L2PPS(second_chroma_qp_index_offset
);
1874 #undef PPS_TO_V4L2PPS
1876 #define SET_V4L2_PPS_FLAG_IF(cond, flag) \
1877 v4l2_pps.flags |= ((pps->cond) ? (flag) : 0)
1878 SET_V4L2_PPS_FLAG_IF(entropy_coding_mode_flag
,
1879 V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE
);
1880 SET_V4L2_PPS_FLAG_IF(
1881 bottom_field_pic_order_in_frame_present_flag
,
1882 V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT
);
1883 SET_V4L2_PPS_FLAG_IF(weighted_pred_flag
, V4L2_H264_PPS_FLAG_WEIGHTED_PRED
);
1884 SET_V4L2_PPS_FLAG_IF(deblocking_filter_control_present_flag
,
1885 V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT
);
1886 SET_V4L2_PPS_FLAG_IF(constrained_intra_pred_flag
,
1887 V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED
);
1888 SET_V4L2_PPS_FLAG_IF(redundant_pic_cnt_present_flag
,
1889 V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT
);
1890 SET_V4L2_PPS_FLAG_IF(transform_8x8_mode_flag
,
1891 V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE
);
1892 SET_V4L2_PPS_FLAG_IF(pic_scaling_matrix_present_flag
,
1893 V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT
);
1894 #undef SET_V4L2_PPS_FLAG_IF
1895 memset(&ctrl
, 0, sizeof(ctrl
));
1896 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_PPS
;
1897 ctrl
.size
= sizeof(v4l2_pps
);
1898 ctrl
.p_h264_pps
= &v4l2_pps
;
1899 ctrls
.push_back(ctrl
);
1901 struct v4l2_ctrl_h264_scaling_matrix v4l2_scaling_matrix
;
1902 memset(&v4l2_scaling_matrix
, 0, sizeof(v4l2_scaling_matrix
));
1903 static_assert(arraysize(v4l2_scaling_matrix
.scaling_list_4x4
) <=
1904 arraysize(pps
->scaling_list4x4
) &&
1905 arraysize(v4l2_scaling_matrix
.scaling_list_4x4
[0]) <=
1906 arraysize(pps
->scaling_list4x4
[0]) &&
1907 arraysize(v4l2_scaling_matrix
.scaling_list_8x8
) <=
1908 arraysize(pps
->scaling_list8x8
) &&
1909 arraysize(v4l2_scaling_matrix
.scaling_list_8x8
[0]) <=
1910 arraysize(pps
->scaling_list8x8
[0]),
1911 "scaling_lists must be of correct size");
1912 for (size_t i
= 0; i
< arraysize(v4l2_scaling_matrix
.scaling_list_4x4
); ++i
) {
1913 for (size_t j
= 0; j
< arraysize(v4l2_scaling_matrix
.scaling_list_4x4
[i
]);
1915 v4l2_scaling_matrix
.scaling_list_4x4
[i
][j
] = pps
->scaling_list4x4
[i
][j
];
1918 for (size_t i
= 0; i
< arraysize(v4l2_scaling_matrix
.scaling_list_8x8
); ++i
) {
1919 for (size_t j
= 0; j
< arraysize(v4l2_scaling_matrix
.scaling_list_8x8
[i
]);
1921 v4l2_scaling_matrix
.scaling_list_8x8
[i
][j
] = pps
->scaling_list8x8
[i
][j
];
1924 memset(&ctrl
, 0, sizeof(ctrl
));
1925 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX
;
1926 ctrl
.size
= sizeof(v4l2_scaling_matrix
);
1927 ctrl
.p_h264_scal_mtrx
= &v4l2_scaling_matrix
;
1928 ctrls
.push_back(ctrl
);
1930 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
1931 H264PictureToV4L2DecodeSurface(pic
);
1933 struct v4l2_ext_controls ext_ctrls
;
1934 memset(&ext_ctrls
, 0, sizeof(ext_ctrls
));
1935 ext_ctrls
.count
= ctrls
.size();
1936 ext_ctrls
.controls
= &ctrls
[0];
1937 ext_ctrls
.config_store
= dec_surface
->config_store();
1938 v4l2_dec_
->SubmitExtControls(&ext_ctrls
);
1940 H264PictureListToDPBIndicesList(ref_pic_listp0
,
1941 v4l2_decode_param_
.ref_pic_list_p0
);
1942 H264PictureListToDPBIndicesList(ref_pic_listb0
,
1943 v4l2_decode_param_
.ref_pic_list_b0
);
1944 H264PictureListToDPBIndicesList(ref_pic_listb1
,
1945 v4l2_decode_param_
.ref_pic_list_b1
);
1947 std::vector
<scoped_refptr
<V4L2DecodeSurface
>> ref_surfaces
;
1948 H264DPBToV4L2DPB(dpb
, &ref_surfaces
);
1949 dec_surface
->SetReferenceSurfaces(ref_surfaces
);
1954 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitSlice(
1955 const media::H264PPS
* pps
,
1956 const media::H264SliceHeader
* slice_hdr
,
1957 const H264Picture::Vector
& ref_pic_list0
,
1958 const H264Picture::Vector
& ref_pic_list1
,
1959 const scoped_refptr
<H264Picture
>& pic
,
1960 const uint8_t* data
,
1962 if (num_slices_
== kMaxSlices
) {
1963 LOGF(ERROR
) << "Over limit of supported slices per frame";
1967 struct v4l2_ctrl_h264_slice_param
& v4l2_slice_param
=
1968 v4l2_slice_params_
[num_slices_
++];
1969 memset(&v4l2_slice_param
, 0, sizeof(v4l2_slice_param
));
1971 v4l2_slice_param
.size
= size
;
1972 #define SHDR_TO_V4L2SPARM(a) v4l2_slice_param.a = slice_hdr->a
1973 SHDR_TO_V4L2SPARM(header_bit_size
);
1974 SHDR_TO_V4L2SPARM(first_mb_in_slice
);
1975 SHDR_TO_V4L2SPARM(slice_type
);
1976 SHDR_TO_V4L2SPARM(pic_parameter_set_id
);
1977 SHDR_TO_V4L2SPARM(colour_plane_id
);
1978 SHDR_TO_V4L2SPARM(frame_num
);
1979 SHDR_TO_V4L2SPARM(idr_pic_id
);
1980 SHDR_TO_V4L2SPARM(pic_order_cnt_lsb
);
1981 SHDR_TO_V4L2SPARM(delta_pic_order_cnt_bottom
);
1982 SHDR_TO_V4L2SPARM(delta_pic_order_cnt0
);
1983 SHDR_TO_V4L2SPARM(delta_pic_order_cnt1
);
1984 SHDR_TO_V4L2SPARM(redundant_pic_cnt
);
1985 SHDR_TO_V4L2SPARM(dec_ref_pic_marking_bit_size
);
1986 SHDR_TO_V4L2SPARM(cabac_init_idc
);
1987 SHDR_TO_V4L2SPARM(slice_qp_delta
);
1988 SHDR_TO_V4L2SPARM(slice_qs_delta
);
1989 SHDR_TO_V4L2SPARM(disable_deblocking_filter_idc
);
1990 SHDR_TO_V4L2SPARM(slice_alpha_c0_offset_div2
);
1991 SHDR_TO_V4L2SPARM(slice_beta_offset_div2
);
1992 SHDR_TO_V4L2SPARM(num_ref_idx_l0_active_minus1
);
1993 SHDR_TO_V4L2SPARM(num_ref_idx_l1_active_minus1
);
1994 SHDR_TO_V4L2SPARM(pic_order_cnt_bit_size
);
1995 #undef SHDR_TO_V4L2SPARM
1997 #define SET_V4L2_SPARM_FLAG_IF(cond, flag) \
1998 v4l2_slice_param.flags |= ((slice_hdr->cond) ? (flag) : 0)
1999 SET_V4L2_SPARM_FLAG_IF(field_pic_flag
, V4L2_SLICE_FLAG_FIELD_PIC
);
2000 SET_V4L2_SPARM_FLAG_IF(bottom_field_flag
, V4L2_SLICE_FLAG_BOTTOM_FIELD
);
2001 SET_V4L2_SPARM_FLAG_IF(direct_spatial_mv_pred_flag
,
2002 V4L2_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED
);
2003 SET_V4L2_SPARM_FLAG_IF(sp_for_switch_flag
, V4L2_SLICE_FLAG_SP_FOR_SWITCH
);
2004 #undef SET_V4L2_SPARM_FLAG_IF
2006 struct v4l2_h264_pred_weight_table
* pred_weight_table
=
2007 &v4l2_slice_param
.pred_weight_table
;
2009 if (((slice_hdr
->IsPSlice() || slice_hdr
->IsSPSlice()) &&
2010 pps
->weighted_pred_flag
) ||
2011 (slice_hdr
->IsBSlice() && pps
->weighted_bipred_idc
== 1)) {
2012 pred_weight_table
->luma_log2_weight_denom
=
2013 slice_hdr
->luma_log2_weight_denom
;
2014 pred_weight_table
->chroma_log2_weight_denom
=
2015 slice_hdr
->chroma_log2_weight_denom
;
2017 struct v4l2_h264_weight_factors
* factorsl0
=
2018 &pred_weight_table
->weight_factors
[0];
2020 for (int i
= 0; i
< 32; ++i
) {
2021 factorsl0
->luma_weight
[i
] =
2022 slice_hdr
->pred_weight_table_l0
.luma_weight
[i
];
2023 factorsl0
->luma_offset
[i
] =
2024 slice_hdr
->pred_weight_table_l0
.luma_offset
[i
];
2026 for (int j
= 0; j
< 2; ++j
) {
2027 factorsl0
->chroma_weight
[i
][j
] =
2028 slice_hdr
->pred_weight_table_l0
.chroma_weight
[i
][j
];
2029 factorsl0
->chroma_offset
[i
][j
] =
2030 slice_hdr
->pred_weight_table_l0
.chroma_offset
[i
][j
];
2034 if (slice_hdr
->IsBSlice()) {
2035 struct v4l2_h264_weight_factors
* factorsl1
=
2036 &pred_weight_table
->weight_factors
[1];
2038 for (int i
= 0; i
< 32; ++i
) {
2039 factorsl1
->luma_weight
[i
] =
2040 slice_hdr
->pred_weight_table_l1
.luma_weight
[i
];
2041 factorsl1
->luma_offset
[i
] =
2042 slice_hdr
->pred_weight_table_l1
.luma_offset
[i
];
2044 for (int j
= 0; j
< 2; ++j
) {
2045 factorsl1
->chroma_weight
[i
][j
] =
2046 slice_hdr
->pred_weight_table_l1
.chroma_weight
[i
][j
];
2047 factorsl1
->chroma_offset
[i
][j
] =
2048 slice_hdr
->pred_weight_table_l1
.chroma_offset
[i
][j
];
2054 H264PictureListToDPBIndicesList(ref_pic_list0
,
2055 v4l2_slice_param
.ref_pic_list0
);
2056 H264PictureListToDPBIndicesList(ref_pic_list1
,
2057 v4l2_slice_param
.ref_pic_list1
);
2059 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2060 H264PictureToV4L2DecodeSurface(pic
);
2062 v4l2_decode_param_
.nal_ref_idc
= slice_hdr
->nal_ref_idc
;
2064 // TODO(posciak): Don't add start code back here, but have it passed from
2066 size_t data_copy_size
= size
+ 3;
2067 scoped_ptr
<uint8_t[]> data_copy(new uint8_t[data_copy_size
]);
2068 memset(data_copy
.get(), 0, data_copy_size
);
2069 data_copy
[2] = 0x01;
2070 memcpy(data_copy
.get() + 3, data
, size
);
2071 return v4l2_dec_
->SubmitSlice(dec_surface
->input_record(), data_copy
.get(),
2075 bool V4L2SliceVideoDecodeAccelerator::SubmitSlice(int index
,
2076 const uint8_t* data
,
2078 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2080 InputRecord
& input_record
= input_buffer_map_
[index
];
2082 if (input_record
.bytes_used
+ size
> input_record
.length
) {
2083 DVLOGF(1) << "Input buffer too small";
2087 memcpy(static_cast<uint8_t*>(input_record
.address
) + input_record
.bytes_used
,
2089 input_record
.bytes_used
+= size
;
2094 bool V4L2SliceVideoDecodeAccelerator::SubmitExtControls(
2095 struct v4l2_ext_controls
* ext_ctrls
) {
2096 DCHECK_GT(ext_ctrls
->config_store
, 0u);
2097 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_EXT_CTRLS
, ext_ctrls
);
2101 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitDecode(
2102 const scoped_refptr
<H264Picture
>& pic
) {
2103 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2104 H264PictureToV4L2DecodeSurface(pic
);
2106 v4l2_decode_param_
.num_slices
= num_slices_
;
2107 v4l2_decode_param_
.idr_pic_flag
= pic
->idr
;
2108 v4l2_decode_param_
.top_field_order_cnt
= pic
->top_field_order_cnt
;
2109 v4l2_decode_param_
.bottom_field_order_cnt
= pic
->bottom_field_order_cnt
;
2111 struct v4l2_ext_control ctrl
;
2112 std::vector
<struct v4l2_ext_control
> ctrls
;
2114 memset(&ctrl
, 0, sizeof(ctrl
));
2115 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM
;
2116 ctrl
.size
= sizeof(v4l2_slice_params_
);
2117 ctrl
.p_h264_slice_param
= v4l2_slice_params_
;
2118 ctrls
.push_back(ctrl
);
2120 memset(&ctrl
, 0, sizeof(ctrl
));
2121 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAM
;
2122 ctrl
.size
= sizeof(v4l2_decode_param_
);
2123 ctrl
.p_h264_decode_param
= &v4l2_decode_param_
;
2124 ctrls
.push_back(ctrl
);
2126 struct v4l2_ext_controls ext_ctrls
;
2127 memset(&ext_ctrls
, 0, sizeof(ext_ctrls
));
2128 ext_ctrls
.count
= ctrls
.size();
2129 ext_ctrls
.controls
= &ctrls
[0];
2130 ext_ctrls
.config_store
= dec_surface
->config_store();
2131 v4l2_dec_
->SubmitExtControls(&ext_ctrls
);
2135 v4l2_dec_
->DecodeSurface(dec_surface
);
2139 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::OutputPicture(
2140 const scoped_refptr
<H264Picture
>& pic
) {
2141 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2142 H264PictureToV4L2DecodeSurface(pic
);
2143 v4l2_dec_
->SurfaceReady(dec_surface
);
2147 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::Reset() {
2149 memset(&v4l2_decode_param_
, 0, sizeof(v4l2_decode_param_
));
2150 memset(&v4l2_slice_params_
, 0, sizeof(v4l2_slice_params_
));
2153 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
2154 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::
2155 H264PictureToV4L2DecodeSurface(const scoped_refptr
<H264Picture
>& pic
) {
2156 V4L2H264Picture
* v4l2_pic
= pic
->AsV4L2H264Picture();
2158 return v4l2_pic
->dec_surface();
2161 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::V4L2VP8Accelerator(
2162 V4L2SliceVideoDecodeAccelerator
* v4l2_dec
)
2163 : v4l2_dec_(v4l2_dec
) {
2167 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::~V4L2VP8Accelerator() {
2170 scoped_refptr
<VP8Picture
>
2171 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::CreateVP8Picture() {
2172 scoped_refptr
<V4L2DecodeSurface
> dec_surface
= v4l2_dec_
->CreateSurface();
2176 return new V4L2VP8Picture(dec_surface
);
2179 #define ARRAY_MEMCPY_CHECKED(to, from) \
2181 static_assert(sizeof(to) == sizeof(from), \
2182 #from " and " #to " arrays must be of same size"); \
2183 memcpy(to, from, sizeof(to)); \
2186 static void FillV4L2SegmentationHeader(
2187 const media::Vp8SegmentationHeader
& vp8_sgmnt_hdr
,
2188 struct v4l2_vp8_sgmnt_hdr
* v4l2_sgmnt_hdr
) {
2189 #define SET_V4L2_SGMNT_HDR_FLAG_IF(cond, flag) \
2190 v4l2_sgmnt_hdr->flags |= ((vp8_sgmnt_hdr.cond) ? (flag) : 0)
2191 SET_V4L2_SGMNT_HDR_FLAG_IF(segmentation_enabled
,
2192 V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED
);
2193 SET_V4L2_SGMNT_HDR_FLAG_IF(update_mb_segmentation_map
,
2194 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP
);
2195 SET_V4L2_SGMNT_HDR_FLAG_IF(update_segment_feature_data
,
2196 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA
);
2197 #undef SET_V4L2_SPARM_FLAG_IF
2198 v4l2_sgmnt_hdr
->segment_feature_mode
= vp8_sgmnt_hdr
.segment_feature_mode
;
2200 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr
->quant_update
,
2201 vp8_sgmnt_hdr
.quantizer_update_value
);
2202 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr
->lf_update
,
2203 vp8_sgmnt_hdr
.lf_update_value
);
2204 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr
->segment_probs
,
2205 vp8_sgmnt_hdr
.segment_prob
);
2208 static void FillV4L2LoopfilterHeader(
2209 const media::Vp8LoopFilterHeader
& vp8_loopfilter_hdr
,
2210 struct v4l2_vp8_loopfilter_hdr
* v4l2_lf_hdr
) {
2211 #define SET_V4L2_LF_HDR_FLAG_IF(cond, flag) \
2212 v4l2_lf_hdr->flags |= ((vp8_loopfilter_hdr.cond) ? (flag) : 0)
2213 SET_V4L2_LF_HDR_FLAG_IF(loop_filter_adj_enable
, V4L2_VP8_LF_HDR_ADJ_ENABLE
);
2214 SET_V4L2_LF_HDR_FLAG_IF(mode_ref_lf_delta_update
,
2215 V4L2_VP8_LF_HDR_DELTA_UPDATE
);
2216 #undef SET_V4L2_SGMNT_HDR_FLAG_IF
2218 #define LF_HDR_TO_V4L2_LF_HDR(a) v4l2_lf_hdr->a = vp8_loopfilter_hdr.a;
2219 LF_HDR_TO_V4L2_LF_HDR(type
);
2220 LF_HDR_TO_V4L2_LF_HDR(level
);
2221 LF_HDR_TO_V4L2_LF_HDR(sharpness_level
);
2222 #undef LF_HDR_TO_V4L2_LF_HDR
2224 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr
->ref_frm_delta_magnitude
,
2225 vp8_loopfilter_hdr
.ref_frame_delta
);
2226 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr
->mb_mode_delta_magnitude
,
2227 vp8_loopfilter_hdr
.mb_mode_delta
);
2230 static void FillV4L2QuantizationHeader(
2231 const media::Vp8QuantizationHeader
& vp8_quant_hdr
,
2232 struct v4l2_vp8_quantization_hdr
* v4l2_quant_hdr
) {
2233 v4l2_quant_hdr
->y_ac_qi
= vp8_quant_hdr
.y_ac_qi
;
2234 v4l2_quant_hdr
->y_dc_delta
= vp8_quant_hdr
.y_dc_delta
;
2235 v4l2_quant_hdr
->y2_dc_delta
= vp8_quant_hdr
.y2_dc_delta
;
2236 v4l2_quant_hdr
->y2_ac_delta
= vp8_quant_hdr
.y2_ac_delta
;
2237 v4l2_quant_hdr
->uv_dc_delta
= vp8_quant_hdr
.uv_dc_delta
;
2238 v4l2_quant_hdr
->uv_ac_delta
= vp8_quant_hdr
.uv_ac_delta
;
2241 static void FillV4L2EntropyHeader(
2242 const media::Vp8EntropyHeader
& vp8_entropy_hdr
,
2243 struct v4l2_vp8_entropy_hdr
* v4l2_entropy_hdr
) {
2244 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr
->coeff_probs
,
2245 vp8_entropy_hdr
.coeff_probs
);
2246 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr
->y_mode_probs
,
2247 vp8_entropy_hdr
.y_mode_probs
);
2248 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr
->uv_mode_probs
,
2249 vp8_entropy_hdr
.uv_mode_probs
);
2250 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr
->mv_probs
,
2251 vp8_entropy_hdr
.mv_probs
);
2254 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::SubmitDecode(
2255 const scoped_refptr
<VP8Picture
>& pic
,
2256 const media::Vp8FrameHeader
* frame_hdr
,
2257 const scoped_refptr
<VP8Picture
>& last_frame
,
2258 const scoped_refptr
<VP8Picture
>& golden_frame
,
2259 const scoped_refptr
<VP8Picture
>& alt_frame
) {
2260 struct v4l2_ctrl_vp8_frame_hdr v4l2_frame_hdr
;
2261 memset(&v4l2_frame_hdr
, 0, sizeof(v4l2_frame_hdr
));
2263 #define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a
2264 FHDR_TO_V4L2_FHDR(key_frame
);
2265 FHDR_TO_V4L2_FHDR(version
);
2266 FHDR_TO_V4L2_FHDR(width
);
2267 FHDR_TO_V4L2_FHDR(horizontal_scale
);
2268 FHDR_TO_V4L2_FHDR(height
);
2269 FHDR_TO_V4L2_FHDR(vertical_scale
);
2270 FHDR_TO_V4L2_FHDR(sign_bias_golden
);
2271 FHDR_TO_V4L2_FHDR(sign_bias_alternate
);
2272 FHDR_TO_V4L2_FHDR(prob_skip_false
);
2273 FHDR_TO_V4L2_FHDR(prob_intra
);
2274 FHDR_TO_V4L2_FHDR(prob_last
);
2275 FHDR_TO_V4L2_FHDR(prob_gf
);
2276 FHDR_TO_V4L2_FHDR(bool_dec_range
);
2277 FHDR_TO_V4L2_FHDR(bool_dec_value
);
2278 FHDR_TO_V4L2_FHDR(bool_dec_count
);
2279 #undef FHDR_TO_V4L2_FHDR
2281 #define SET_V4L2_FRM_HDR_FLAG_IF(cond, flag) \
2282 v4l2_frame_hdr.flags |= ((frame_hdr->cond) ? (flag) : 0)
2283 SET_V4L2_FRM_HDR_FLAG_IF(is_experimental
,
2284 V4L2_VP8_FRAME_HDR_FLAG_EXPERIMENTAL
);
2285 SET_V4L2_FRM_HDR_FLAG_IF(show_frame
, V4L2_VP8_FRAME_HDR_FLAG_SHOW_FRAME
);
2286 SET_V4L2_FRM_HDR_FLAG_IF(mb_no_skip_coeff
,
2287 V4L2_VP8_FRAME_HDR_FLAG_MB_NO_SKIP_COEFF
);
2288 #undef SET_V4L2_FRM_HDR_FLAG_IF
2290 FillV4L2SegmentationHeader(frame_hdr
->segmentation_hdr
,
2291 &v4l2_frame_hdr
.sgmnt_hdr
);
2293 FillV4L2LoopfilterHeader(frame_hdr
->loopfilter_hdr
, &v4l2_frame_hdr
.lf_hdr
);
2295 FillV4L2QuantizationHeader(frame_hdr
->quantization_hdr
,
2296 &v4l2_frame_hdr
.quant_hdr
);
2298 FillV4L2EntropyHeader(frame_hdr
->entropy_hdr
, &v4l2_frame_hdr
.entropy_hdr
);
2300 v4l2_frame_hdr
.first_part_size
=
2301 base::checked_cast
<__u32
>(frame_hdr
->first_part_size
);
2302 v4l2_frame_hdr
.first_part_offset
=
2303 base::checked_cast
<__u32
>(frame_hdr
->first_part_offset
);
2304 v4l2_frame_hdr
.macroblock_bit_offset
=
2305 base::checked_cast
<__u32
>(frame_hdr
->macroblock_bit_offset
);
2306 v4l2_frame_hdr
.num_dct_parts
= frame_hdr
->num_of_dct_partitions
;
2308 static_assert(arraysize(v4l2_frame_hdr
.dct_part_sizes
) ==
2309 arraysize(frame_hdr
->dct_partition_sizes
),
2310 "DCT partition size arrays must have equal number of elements");
2311 for (size_t i
= 0; i
< frame_hdr
->num_of_dct_partitions
&&
2312 i
< arraysize(v4l2_frame_hdr
.dct_part_sizes
); ++i
)
2313 v4l2_frame_hdr
.dct_part_sizes
[i
] = frame_hdr
->dct_partition_sizes
[i
];
2315 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2316 VP8PictureToV4L2DecodeSurface(pic
);
2317 std::vector
<scoped_refptr
<V4L2DecodeSurface
>> ref_surfaces
;
2320 scoped_refptr
<V4L2DecodeSurface
> last_frame_surface
=
2321 VP8PictureToV4L2DecodeSurface(last_frame
);
2322 v4l2_frame_hdr
.last_frame
= last_frame_surface
->output_record();
2323 ref_surfaces
.push_back(last_frame_surface
);
2325 v4l2_frame_hdr
.last_frame
= VIDEO_MAX_FRAME
;
2329 scoped_refptr
<V4L2DecodeSurface
> golden_frame_surface
=
2330 VP8PictureToV4L2DecodeSurface(golden_frame
);
2331 v4l2_frame_hdr
.golden_frame
= golden_frame_surface
->output_record();
2332 ref_surfaces
.push_back(golden_frame_surface
);
2334 v4l2_frame_hdr
.golden_frame
= VIDEO_MAX_FRAME
;
2338 scoped_refptr
<V4L2DecodeSurface
> alt_frame_surface
=
2339 VP8PictureToV4L2DecodeSurface(alt_frame
);
2340 v4l2_frame_hdr
.alt_frame
= alt_frame_surface
->output_record();
2341 ref_surfaces
.push_back(alt_frame_surface
);
2343 v4l2_frame_hdr
.alt_frame
= VIDEO_MAX_FRAME
;
2346 struct v4l2_ext_control ctrl
;
2347 memset(&ctrl
, 0, sizeof(ctrl
));
2348 ctrl
.id
= V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR
;
2349 ctrl
.size
= sizeof(v4l2_frame_hdr
);
2350 ctrl
.p_vp8_frame_hdr
= &v4l2_frame_hdr
;
2352 struct v4l2_ext_controls ext_ctrls
;
2353 memset(&ext_ctrls
, 0, sizeof(ext_ctrls
));
2354 ext_ctrls
.count
= 1;
2355 ext_ctrls
.controls
= &ctrl
;
2356 ext_ctrls
.config_store
= dec_surface
->config_store();
2358 if (!v4l2_dec_
->SubmitExtControls(&ext_ctrls
))
2361 dec_surface
->SetReferenceSurfaces(ref_surfaces
);
2363 if (!v4l2_dec_
->SubmitSlice(dec_surface
->input_record(), frame_hdr
->data
,
2364 frame_hdr
->frame_size
))
2367 v4l2_dec_
->DecodeSurface(dec_surface
);
2371 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::OutputPicture(
2372 const scoped_refptr
<VP8Picture
>& pic
) {
2373 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2374 VP8PictureToV4L2DecodeSurface(pic
);
2376 v4l2_dec_
->SurfaceReady(dec_surface
);
2380 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
2381 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::
2382 VP8PictureToV4L2DecodeSurface(const scoped_refptr
<VP8Picture
>& pic
) {
2383 V4L2VP8Picture
* v4l2_pic
= pic
->AsV4L2VP8Picture();
2385 return v4l2_pic
->dec_surface();
2388 void V4L2SliceVideoDecodeAccelerator::DecodeSurface(
2389 const scoped_refptr
<V4L2DecodeSurface
>& dec_surface
) {
2390 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2392 DVLOGF(3) << "Submitting decode for surface: " << dec_surface
->ToString();
2393 Enqueue(dec_surface
);
2396 void V4L2SliceVideoDecodeAccelerator::SurfaceReady(
2397 const scoped_refptr
<V4L2DecodeSurface
>& dec_surface
) {
2399 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2401 decoder_display_queue_
.push(dec_surface
);
2402 TryOutputSurfaces();
2405 void V4L2SliceVideoDecodeAccelerator::TryOutputSurfaces() {
2406 while (!decoder_display_queue_
.empty()) {
2407 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2408 decoder_display_queue_
.front();
2410 if (!dec_surface
->decoded())
2413 decoder_display_queue_
.pop();
2414 OutputSurface(dec_surface
);
2418 void V4L2SliceVideoDecodeAccelerator::OutputSurface(
2419 const scoped_refptr
<V4L2DecodeSurface
>& dec_surface
) {
2420 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2422 OutputRecord
& output_record
=
2423 output_buffer_map_
[dec_surface
->output_record()];
2426 surfaces_at_display_
.insert(std::make_pair(output_record
.picture_id
,
2427 dec_surface
)).second
;
2430 DCHECK(!output_record
.at_client
);
2431 DCHECK(!output_record
.at_device
);
2432 DCHECK_NE(output_record
.egl_image
, EGL_NO_IMAGE_KHR
);
2433 DCHECK_NE(output_record
.picture_id
, -1);
2434 output_record
.at_client
= true;
2436 media::Picture
picture(output_record
.picture_id
, dec_surface
->bitstream_id(),
2437 gfx::Rect(visible_size_
), false);
2438 DVLOGF(3) << dec_surface
->ToString()
2439 << ", bitstream_id: " << picture
.bitstream_buffer_id()
2440 << ", picture_id: " << picture
.picture_buffer_id();
2441 pending_picture_ready_
.push(PictureRecord(output_record
.cleared
, picture
));
2443 output_record
.cleared
= true;
2446 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
2447 V4L2SliceVideoDecodeAccelerator::CreateSurface() {
2448 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2449 DCHECK_EQ(state_
, kDecoding
);
2451 if (free_input_buffers_
.empty() || free_output_buffers_
.empty())
2454 int input
= free_input_buffers_
.front();
2455 free_input_buffers_
.pop_front();
2456 int output
= free_output_buffers_
.front();
2457 free_output_buffers_
.pop_front();
2459 InputRecord
& input_record
= input_buffer_map_
[input
];
2460 DCHECK_EQ(input_record
.bytes_used
, 0u);
2461 DCHECK_EQ(input_record
.input_id
, -1);
2462 DCHECK(decoder_current_bitstream_buffer_
!= nullptr);
2463 input_record
.input_id
= decoder_current_bitstream_buffer_
->input_id
;
2465 scoped_refptr
<V4L2DecodeSurface
> dec_surface
= new V4L2DecodeSurface(
2466 decoder_current_bitstream_buffer_
->input_id
, input
, output
,
2467 base::Bind(&V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer
,
2468 base::Unretained(this)));
2470 DVLOGF(4) << "Created surface " << input
<< " -> " << output
;
2474 void V4L2SliceVideoDecodeAccelerator::SendPictureReady() {
2476 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2477 bool resetting_or_flushing
= (decoder_resetting_
|| decoder_flushing_
);
2478 while (!pending_picture_ready_
.empty()) {
2479 bool cleared
= pending_picture_ready_
.front().cleared
;
2480 const media::Picture
& picture
= pending_picture_ready_
.front().picture
;
2481 if (cleared
&& picture_clearing_count_
== 0) {
2482 DVLOGF(4) << "Posting picture ready to IO for: "
2483 << picture
.picture_buffer_id();
2484 // This picture is cleared. Post it to IO thread to reduce latency. This
2485 // should be the case after all pictures are cleared at the beginning.
2486 io_task_runner_
->PostTask(
2487 FROM_HERE
, base::Bind(&Client::PictureReady
, io_client_
, picture
));
2488 pending_picture_ready_
.pop();
2489 } else if (!cleared
|| resetting_or_flushing
) {
2490 DVLOGF(3) << "cleared=" << pending_picture_ready_
.front().cleared
2491 << ", decoder_resetting_=" << decoder_resetting_
2492 << ", decoder_flushing_=" << decoder_flushing_
2493 << ", picture_clearing_count_=" << picture_clearing_count_
;
2494 DVLOGF(4) << "Posting picture ready to GPU for: "
2495 << picture
.picture_buffer_id();
2496 // If the picture is not cleared, post it to the child thread because it
2497 // has to be cleared in the child thread. A picture only needs to be
2498 // cleared once. If the decoder is resetting or flushing, send all
2499 // pictures to ensure PictureReady arrive before reset or flush done.
2500 child_task_runner_
->PostTaskAndReply(
2501 FROM_HERE
, base::Bind(&Client::PictureReady
, client_
, picture
),
2502 // Unretained is safe. If Client::PictureReady gets to run, |this| is
2503 // alive. Destroy() will wait the decode thread to finish.
2504 base::Bind(&V4L2SliceVideoDecodeAccelerator::PictureCleared
,
2505 base::Unretained(this)));
2506 picture_clearing_count_
++;
2507 pending_picture_ready_
.pop();
2509 // This picture is cleared. But some pictures are about to be cleared on
2510 // the child thread. To preserve the order, do not send this until those
2511 // pictures are cleared.
2517 void V4L2SliceVideoDecodeAccelerator::PictureCleared() {
2518 DVLOGF(3) << "clearing count=" << picture_clearing_count_
;
2519 DCHECK(decoder_thread_task_runner_
->BelongsToCurrentThread());
2520 DCHECK_GT(picture_clearing_count_
, 0);
2521 picture_clearing_count_
--;
2525 bool V4L2SliceVideoDecodeAccelerator::CanDecodeOnIOThread() {
2530 media::VideoDecodeAccelerator::SupportedProfiles
2531 V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() {
2532 scoped_refptr
<V4L2Device
> device
= V4L2Device::Create(V4L2Device::kDecoder
);
2534 return SupportedProfiles();
2536 const uint32_t supported_formats
[] = {
2537 V4L2_PIX_FMT_H264_SLICE
, V4L2_PIX_FMT_VP8_FRAME
};
2538 return device
->GetSupportedDecodeProfiles(arraysize(supported_formats
),
2542 } // namespace content