1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #include <linux/videodev2.h>
8 #include <sys/eventfd.h>
12 #include "base/bind.h"
13 #include "base/bind_helpers.h"
14 #include "base/callback.h"
15 #include "base/callback_helpers.h"
16 #include "base/command_line.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/numerics/safe_conversions.h"
19 #include "base/strings/stringprintf.h"
20 #include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
21 #include "media/base/bind_to_current_loop.h"
22 #include "media/base/media_switches.h"
23 #include "ui/gl/scoped_binders.h"
25 #define LOGF(level) LOG(level) << __FUNCTION__ << "(): "
26 #define DVLOGF(level) DVLOG(level) << __FUNCTION__ << "(): "
28 #define NOTIFY_ERROR(x) \
30 LOG(ERROR) << "Setting error state:" << x; \
34 #define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value) \
36 if (device_->Ioctl(type, arg) != 0) { \
37 PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << #type; \
42 #define IOCTL_OR_ERROR_RETURN(type, arg) \
43 IOCTL_OR_ERROR_RETURN_VALUE(type, arg, ((void)0))
45 #define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
46 IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false)
48 #define IOCTL_OR_LOG_ERROR(type, arg) \
50 if (device_->Ioctl(type, arg) != 0) \
51 PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << #type; \
56 class V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
57 : public base::RefCounted
<V4L2DecodeSurface
> {
59 using ReleaseCB
= base::Callback
<void(int)>;
61 V4L2DecodeSurface(int32 bitstream_id
,
64 const ReleaseCB
& release_cb
);
66 // Mark the surface as decoded. This will also release all references, as
67 // they are not needed anymore.
69 bool decoded() const { return decoded_
; }
71 int32
bitstream_id() const { return bitstream_id_
; }
72 int input_record() const { return input_record_
; }
73 int output_record() const { return output_record_
; }
74 uint32_t config_store() const { return config_store_
; }
76 // Take references to each reference surface and keep them until the
77 // target surface is decoded.
78 void SetReferenceSurfaces(
79 const std::vector
<scoped_refptr
<V4L2DecodeSurface
>>& ref_surfaces
);
81 std::string
ToString() const;
84 friend class base::RefCounted
<V4L2DecodeSurface
>;
90 uint32_t config_store_
;
93 ReleaseCB release_cb_
;
95 std::vector
<scoped_refptr
<V4L2DecodeSurface
>> reference_surfaces_
;
97 DISALLOW_COPY_AND_ASSIGN(V4L2DecodeSurface
);
100 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::V4L2DecodeSurface(
104 const ReleaseCB
& release_cb
)
105 : bitstream_id_(bitstream_id
),
106 input_record_(input_record
),
107 output_record_(output_record
),
108 config_store_(input_record
+ 1),
110 release_cb_(release_cb
) {
113 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::~V4L2DecodeSurface() {
114 DVLOGF(5) << "Releasing output record id=" << output_record_
;
115 release_cb_
.Run(output_record_
);
118 void V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::SetReferenceSurfaces(
119 const std::vector
<scoped_refptr
<V4L2DecodeSurface
>>& ref_surfaces
) {
120 DCHECK(reference_surfaces_
.empty());
121 reference_surfaces_
= ref_surfaces
;
124 void V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::SetDecoded() {
128 // We can now drop references to all reference surfaces for this surface
129 // as we are done with decoding.
130 reference_surfaces_
.clear();
133 std::string
V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::ToString()
136 base::StringAppendF(&out
, "Buffer %d -> %d. ", input_record_
, output_record_
);
137 base::StringAppendF(&out
, "Reference surfaces:");
138 for (const auto& ref
: reference_surfaces_
) {
139 DCHECK_NE(ref
->output_record(), output_record_
);
140 base::StringAppendF(&out
, " %d", ref
->output_record());
145 V4L2SliceVideoDecodeAccelerator::InputRecord::InputRecord()
153 V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord()
157 egl_image(EGL_NO_IMAGE_KHR
),
158 egl_sync(EGL_NO_SYNC_KHR
),
162 struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef
{
164 base::WeakPtr
<VideoDecodeAccelerator::Client
>& client
,
165 const scoped_refptr
<base::MessageLoopProxy
>& client_message_loop_proxy
,
166 base::SharedMemory
* shm
,
169 ~BitstreamBufferRef();
170 const base::WeakPtr
<VideoDecodeAccelerator::Client
> client
;
171 const scoped_refptr
<base::MessageLoopProxy
> client_message_loop_proxy
;
172 const scoped_ptr
<base::SharedMemory
> shm
;
175 const int32 input_id
;
178 V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
179 base::WeakPtr
<VideoDecodeAccelerator::Client
>& client
,
180 const scoped_refptr
<base::MessageLoopProxy
>& client_message_loop_proxy
,
181 base::SharedMemory
* shm
,
185 client_message_loop_proxy(client_message_loop_proxy
),
192 V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() {
194 DVLOGF(5) << "returning input_id: " << input_id
;
195 client_message_loop_proxy
->PostTask(
197 base::Bind(&VideoDecodeAccelerator::Client::NotifyEndOfBitstreamBuffer
,
202 struct V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef
{
203 EGLSyncKHRRef(EGLDisplay egl_display
, EGLSyncKHR egl_sync
);
205 EGLDisplay
const egl_display
;
209 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::EGLSyncKHRRef(
210 EGLDisplay egl_display
,
212 : egl_display(egl_display
), egl_sync(egl_sync
) {
215 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::~EGLSyncKHRRef() {
216 // We don't check for eglDestroySyncKHR failures, because if we get here
217 // with a valid sync object, something went wrong and we are getting
219 if (egl_sync
!= EGL_NO_SYNC_KHR
)
220 eglDestroySyncKHR(egl_display
, egl_sync
);
223 struct V4L2SliceVideoDecodeAccelerator::PictureRecord
{
224 PictureRecord(bool cleared
, const media::Picture
& picture
);
226 bool cleared
; // Whether the texture is cleared and safe to render from.
227 media::Picture picture
; // The decoded picture.
230 V4L2SliceVideoDecodeAccelerator::PictureRecord::PictureRecord(
232 const media::Picture
& picture
)
233 : cleared(cleared
), picture(picture
) {
236 V4L2SliceVideoDecodeAccelerator::PictureRecord::~PictureRecord() {
239 class V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator
240 : public H264Decoder::H264Accelerator
{
242 V4L2H264Accelerator(V4L2SliceVideoDecodeAccelerator
* v4l2_dec
);
243 virtual ~V4L2H264Accelerator() override
;
245 // H264Decoder::H264Accelerator implementation.
246 scoped_refptr
<H264Picture
> CreateH264Picture() override
;
248 bool SubmitFrameMetadata(const media::H264SPS
* sps
,
249 const media::H264PPS
* pps
,
251 const H264Picture::Vector
& ref_pic_listp0
,
252 const H264Picture::Vector
& ref_pic_listb0
,
253 const H264Picture::Vector
& ref_pic_listb1
,
254 const scoped_refptr
<H264Picture
>& pic
) override
;
256 bool SubmitSlice(const media::H264PPS
* pps
,
257 const media::H264SliceHeader
* slice_hdr
,
258 const H264Picture::Vector
& ref_pic_list0
,
259 const H264Picture::Vector
& ref_pic_list1
,
260 const scoped_refptr
<H264Picture
>& pic
,
262 size_t size
) override
;
264 bool SubmitDecode(const scoped_refptr
<H264Picture
>& pic
) override
;
265 bool OutputPicture(const scoped_refptr
<H264Picture
>& pic
) override
;
267 void Reset() override
;
270 // Max size of reference list.
271 static const size_t kDPBIndicesListSize
= 32;
272 void H264PictureListToDPBIndicesList(const H264Picture::Vector
& src_pic_list
,
273 uint8_t dst_list
[kDPBIndicesListSize
]);
275 void H264DPBToV4L2DPB(
277 std::vector
<scoped_refptr
<V4L2DecodeSurface
>>* ref_surfaces
);
279 scoped_refptr
<V4L2DecodeSurface
> H264PictureToV4L2DecodeSurface(
280 const scoped_refptr
<H264Picture
>& pic
);
283 V4L2SliceVideoDecodeAccelerator
* v4l2_dec_
;
285 // TODO(posciak): This should be queried from hardware once supported.
286 static const size_t kMaxSlices
= 16;
287 struct v4l2_ctrl_h264_slice_param v4l2_slice_params_
[kMaxSlices
];
288 struct v4l2_ctrl_h264_decode_param v4l2_decode_param_
;
290 DISALLOW_COPY_AND_ASSIGN(V4L2H264Accelerator
);
293 class V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator
294 : public VP8Decoder::VP8Accelerator
{
296 V4L2VP8Accelerator(V4L2SliceVideoDecodeAccelerator
* v4l2_dec
);
297 ~V4L2VP8Accelerator() override
;
299 // H264Decoder::VP8Accelerator implementation.
300 scoped_refptr
<VP8Picture
> CreateVP8Picture() override
;
302 bool SubmitDecode(const scoped_refptr
<VP8Picture
>& pic
,
303 const media::Vp8FrameHeader
* frame_hdr
,
304 const scoped_refptr
<VP8Picture
>& last_frame
,
305 const scoped_refptr
<VP8Picture
>& golden_frame
,
306 const scoped_refptr
<VP8Picture
>& alt_frame
) override
;
308 bool OutputPicture(const scoped_refptr
<VP8Picture
>& pic
) override
;
311 scoped_refptr
<V4L2DecodeSurface
> VP8PictureToV4L2DecodeSurface(
312 const scoped_refptr
<VP8Picture
>& pic
);
314 V4L2SliceVideoDecodeAccelerator
* v4l2_dec_
;
316 DISALLOW_COPY_AND_ASSIGN(V4L2VP8Accelerator
);
319 // Codec-specific subclasses of software decoder picture classes.
320 // This allows us to keep decoders oblivious of our implementation details.
321 class V4L2H264Picture
: public H264Picture
{
323 V4L2H264Picture(const scoped_refptr
<
324 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>& dec_surface
);
325 virtual ~V4L2H264Picture() override
;
327 V4L2H264Picture
* AsV4L2H264Picture() override
{ return this; }
328 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
334 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
337 DISALLOW_COPY_AND_ASSIGN(V4L2H264Picture
);
340 V4L2H264Picture::V4L2H264Picture(const scoped_refptr
<
341 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>& dec_surface
)
342 : dec_surface_(dec_surface
) {
345 V4L2H264Picture::~V4L2H264Picture() {
348 class V4L2VP8Picture
: public VP8Picture
{
350 V4L2VP8Picture(const scoped_refptr
<
351 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>& dec_surface
);
352 virtual ~V4L2VP8Picture() override
;
354 V4L2VP8Picture
* AsV4L2VP8Picture() override
{ return this; }
355 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
361 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
364 DISALLOW_COPY_AND_ASSIGN(V4L2VP8Picture
);
367 V4L2VP8Picture::V4L2VP8Picture(const scoped_refptr
<
368 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>& dec_surface
)
369 : dec_surface_(dec_surface
) {
372 V4L2VP8Picture::~V4L2VP8Picture() {
375 V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
376 const scoped_refptr
<V4L2Device
>& device
,
377 EGLDisplay egl_display
,
378 EGLContext egl_context
,
379 const base::WeakPtr
<Client
>& io_client
,
380 const base::Callback
<bool(void)>& make_context_current
,
381 const scoped_refptr
<base::MessageLoopProxy
>& io_message_loop_proxy
)
382 : input_planes_count_(0),
383 output_planes_count_(0),
384 child_message_loop_proxy_(base::MessageLoopProxy::current()),
385 io_message_loop_proxy_(io_message_loop_proxy
),
386 io_client_(io_client
),
388 decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"),
389 device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"),
390 input_streamon_(false),
391 input_buffer_queued_count_(0),
392 output_streamon_(false),
393 output_buffer_queued_count_(0),
394 video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN
),
395 output_format_fourcc_(0),
396 state_(kUninitialized
),
397 decoder_flushing_(false),
398 decoder_resetting_(false),
399 surface_set_change_pending_(false),
400 picture_clearing_count_(0),
401 pictures_assigned_(false, false),
402 make_context_current_(make_context_current
),
403 egl_display_(egl_display
),
404 egl_context_(egl_context
),
405 weak_this_factory_(this) {
406 weak_this_
= weak_this_factory_
.GetWeakPtr();
409 V4L2SliceVideoDecodeAccelerator::~V4L2SliceVideoDecodeAccelerator() {
412 DCHECK(child_message_loop_proxy_
->BelongsToCurrentThread());
413 DCHECK(!decoder_thread_
.IsRunning());
414 DCHECK(!device_poll_thread_
.IsRunning());
416 DCHECK(input_buffer_map_
.empty());
417 DCHECK(output_buffer_map_
.empty());
420 void V4L2SliceVideoDecodeAccelerator::NotifyError(Error error
) {
421 if (!child_message_loop_proxy_
->BelongsToCurrentThread()) {
422 child_message_loop_proxy_
->PostTask(
423 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::NotifyError
,
429 client_
->NotifyError(error
);
430 client_ptr_factory_
.reset();
434 bool V4L2SliceVideoDecodeAccelerator::Initialize(
435 media::VideoCodecProfile profile
,
436 VideoDecodeAccelerator::Client
* client
) {
437 DVLOGF(3) << "profile: " << profile
;
438 DCHECK(child_message_loop_proxy_
->BelongsToCurrentThread());
439 DCHECK_EQ(state_
, kUninitialized
);
441 client_ptr_factory_
.reset(
442 new base::WeakPtrFactory
<VideoDecodeAccelerator::Client
>(client
));
443 client_
= client_ptr_factory_
->GetWeakPtr();
445 video_profile_
= profile
;
447 if (video_profile_
>= media::H264PROFILE_MIN
&&
448 video_profile_
<= media::H264PROFILE_MAX
) {
449 h264_accelerator_
.reset(new V4L2H264Accelerator(this));
450 decoder_
.reset(new H264Decoder(h264_accelerator_
.get()));
451 } else if (video_profile_
>= media::VP8PROFILE_MIN
&&
452 video_profile_
<= media::VP8PROFILE_MAX
) {
453 vp8_accelerator_
.reset(new V4L2VP8Accelerator(this));
454 decoder_
.reset(new VP8Decoder(vp8_accelerator_
.get()));
456 DLOG(ERROR
) << "Unsupported profile " << video_profile_
;
460 // TODO(posciak): This needs to be queried once supported.
461 input_planes_count_
= 1;
462 output_planes_count_
= 1;
464 if (egl_display_
== EGL_NO_DISPLAY
) {
465 LOG(ERROR
) << "Initialize(): could not get EGLDisplay";
469 // We need the context to be initialized to query extensions.
470 if (!make_context_current_
.Run()) {
471 LOG(ERROR
) << "Initialize(): could not make context current";
475 if (!gfx::g_driver_egl
.ext
.b_EGL_KHR_fence_sync
) {
476 LOG(ERROR
) << "Initialize(): context does not have EGL_KHR_fence_sync";
480 // Capabilities check.
481 struct v4l2_capability caps
;
482 const __u32 kCapsRequired
=
483 V4L2_CAP_VIDEO_CAPTURE_MPLANE
|
484 V4L2_CAP_VIDEO_OUTPUT_MPLANE
|
486 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP
, &caps
);
487 if ((caps
.capabilities
& kCapsRequired
) != kCapsRequired
) {
488 DLOG(ERROR
) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
489 ", caps check failed: 0x" << std::hex
<< caps
.capabilities
;
496 if (!decoder_thread_
.Start()) {
497 DLOG(ERROR
) << "Initialize(): device thread failed to start";
500 decoder_thread_proxy_
= decoder_thread_
.message_loop_proxy();
502 state_
= kInitialized
;
504 // InitializeTask will NOTIFY_ERROR on failure.
505 decoder_thread_proxy_
->PostTask(
506 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::InitializeTask
,
507 base::Unretained(this)));
509 DVLOGF(1) << "V4L2SliceVideoDecodeAccelerator initialized";
513 void V4L2SliceVideoDecodeAccelerator::InitializeTask() {
515 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
516 DCHECK_EQ(state_
, kInitialized
);
518 if (!CreateInputBuffers())
519 NOTIFY_ERROR(PLATFORM_FAILURE
);
521 // Output buffers will be created once decoder gives us information
522 // about their size and required count.
526 void V4L2SliceVideoDecodeAccelerator::Destroy() {
528 DCHECK(child_message_loop_proxy_
->BelongsToCurrentThread());
530 if (decoder_thread_
.IsRunning()) {
531 decoder_thread_proxy_
->PostTask(
532 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DestroyTask
,
533 base::Unretained(this)));
535 // Wait for tasks to finish/early-exit.
536 decoder_thread_
.Stop();
540 DVLOGF(3) << "Destroyed";
543 void V4L2SliceVideoDecodeAccelerator::DestroyTask() {
545 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
551 decoder_current_bitstream_buffer_
.reset();
552 while (!decoder_input_queue_
.empty())
553 decoder_input_queue_
.pop();
555 // Stop streaming and the device_poll_thread_.
556 StopDevicePoll(false);
558 DestroyInputBuffers();
559 DestroyOutputs(false);
561 DCHECK(surfaces_at_device_
.empty());
562 DCHECK(surfaces_at_display_
.empty());
563 DCHECK(decoder_display_queue_
.empty());
566 bool V4L2SliceVideoDecodeAccelerator::SetupFormats() {
567 DCHECK_EQ(state_
, kUninitialized
);
569 __u32 input_format_fourcc
=
570 V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_
, true);
571 if (!input_format_fourcc
) {
577 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
578 switches::kIgnoreResolutionLimitsForAcceleratedVideoDecode
))
579 input_size
= kInputBufferMaxSizeFor4k
;
581 input_size
= kInputBufferMaxSizeFor1080p
;
583 struct v4l2_format format
;
584 memset(&format
, 0, sizeof(format
));
585 format
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
586 format
.fmt
.pix_mp
.pixelformat
= input_format_fourcc
;
587 format
.fmt
.pix_mp
.plane_fmt
[0].sizeimage
= input_size
;
588 format
.fmt
.pix_mp
.num_planes
= input_planes_count_
;
589 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT
, &format
);
591 // We have to set up the format for output, because the driver may not allow
592 // changing it once we start streaming; whether it can support our chosen
593 // output format or not may depend on the input format.
594 struct v4l2_fmtdesc fmtdesc
;
595 memset(&fmtdesc
, 0, sizeof(fmtdesc
));
596 fmtdesc
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
597 output_format_fourcc_
= 0;
598 while (device_
->Ioctl(VIDIOC_ENUM_FMT
, &fmtdesc
) == 0) {
599 if (device_
->CanCreateEGLImageFrom(fmtdesc
.pixelformat
)) {
600 output_format_fourcc_
= fmtdesc
.pixelformat
;
606 if (output_format_fourcc_
== 0) {
607 LOG(ERROR
) << "Could not find a usable output format";
611 // Only set fourcc for output; resolution, etc., will come from the
612 // driver once it extracts it from the stream.
613 memset(&format
, 0, sizeof(format
));
614 format
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
615 format
.fmt
.pix_mp
.pixelformat
= output_format_fourcc_
;
616 format
.fmt
.pix_mp
.num_planes
= output_planes_count_
;
617 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT
, &format
);
622 bool V4L2SliceVideoDecodeAccelerator::CreateInputBuffers() {
624 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
625 DCHECK(!input_streamon_
);
626 DCHECK(input_buffer_map_
.empty());
628 struct v4l2_requestbuffers reqbufs
;
629 memset(&reqbufs
, 0, sizeof(reqbufs
));
630 reqbufs
.count
= kNumInputBuffers
;
631 reqbufs
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
632 reqbufs
.memory
= V4L2_MEMORY_MMAP
;
633 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS
, &reqbufs
);
634 if (reqbufs
.count
< kNumInputBuffers
) {
635 PLOG(ERROR
) << "Could not allocate enough output buffers";
638 input_buffer_map_
.resize(reqbufs
.count
);
639 for (size_t i
= 0; i
< input_buffer_map_
.size(); ++i
) {
640 free_input_buffers_
.push_back(i
);
642 // Query for the MEMORY_MMAP pointer.
643 struct v4l2_plane planes
[VIDEO_MAX_PLANES
];
644 struct v4l2_buffer buffer
;
645 memset(&buffer
, 0, sizeof(buffer
));
646 memset(planes
, 0, sizeof(planes
));
648 buffer
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
649 buffer
.memory
= V4L2_MEMORY_MMAP
;
650 buffer
.m
.planes
= planes
;
651 buffer
.length
= input_planes_count_
;
652 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF
, &buffer
);
653 void* address
= device_
->Mmap(nullptr,
654 buffer
.m
.planes
[0].length
,
655 PROT_READ
| PROT_WRITE
,
657 buffer
.m
.planes
[0].m
.mem_offset
);
658 if (address
== MAP_FAILED
) {
659 PLOG(ERROR
) << "CreateInputBuffers(): mmap() failed";
662 input_buffer_map_
[i
].address
= address
;
663 input_buffer_map_
[i
].length
= buffer
.m
.planes
[0].length
;
669 bool V4L2SliceVideoDecodeAccelerator::CreateOutputBuffers() {
671 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
672 DCHECK(!output_streamon_
);
673 DCHECK(output_buffer_map_
.empty());
674 DCHECK(surfaces_at_display_
.empty());
675 DCHECK(surfaces_at_device_
.empty());
677 visible_size_
= decoder_
->GetPicSize();
678 size_t num_pictures
= decoder_
->GetRequiredNumOfPictures();
680 DCHECK_GT(num_pictures
, 0u);
681 DCHECK(!visible_size_
.IsEmpty());
683 struct v4l2_format format
;
684 memset(&format
, 0, sizeof(format
));
685 format
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
686 format
.fmt
.pix_mp
.pixelformat
= output_format_fourcc_
;
687 format
.fmt
.pix_mp
.width
= visible_size_
.width();
688 format
.fmt
.pix_mp
.height
= visible_size_
.height();
689 format
.fmt
.pix_mp
.num_planes
= input_planes_count_
;
691 if (device_
->Ioctl(VIDIOC_S_FMT
, &format
) != 0) {
692 PLOG(ERROR
) << "Failed setting format to: " << output_format_fourcc_
;
693 NOTIFY_ERROR(PLATFORM_FAILURE
);
697 coded_size_
.SetSize(base::checked_cast
<int>(format
.fmt
.pix_mp
.width
),
698 base::checked_cast
<int>(format
.fmt
.pix_mp
.height
));
699 DCHECK_EQ(coded_size_
.width() % 16, 0);
700 DCHECK_EQ(coded_size_
.height() % 16, 0);
702 if (!gfx::Rect(coded_size_
).Contains(gfx::Rect(visible_size_
))) {
703 LOG(ERROR
) << "Got invalid adjusted coded size: " << coded_size_
.ToString();
707 struct v4l2_requestbuffers reqbufs
;
708 memset(&reqbufs
, 0, sizeof(reqbufs
));
709 reqbufs
.count
= num_pictures
;
710 reqbufs
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
711 reqbufs
.memory
= V4L2_MEMORY_MMAP
;
712 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS
, &reqbufs
);
714 if (reqbufs
.count
< num_pictures
) {
715 PLOG(ERROR
) << "Could not allocate enough output buffers";
719 output_buffer_map_
.resize(reqbufs
.count
);
721 DVLOGF(3) << "buffer_count=" << output_buffer_map_
.size()
722 << ", visible size=" << visible_size_
.ToString()
723 << ", coded size=" << coded_size_
.ToString();
725 child_message_loop_proxy_
->PostTask(
727 base::Bind(&VideoDecodeAccelerator::Client::ProvidePictureBuffers
,
728 client_
, output_buffer_map_
.size(), coded_size_
,
729 device_
->GetTextureTarget()));
731 // Wait for the client to call AssignPictureBuffers() on the Child thread.
732 // We do this, because if we continue decoding without finishing buffer
733 // allocation, we may end up Resetting before AssignPictureBuffers arrives,
734 // resulting in unnecessary complications and subtle bugs.
735 pictures_assigned_
.Wait();
740 void V4L2SliceVideoDecodeAccelerator::DestroyInputBuffers() {
742 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread() ||
743 !decoder_thread_
.IsRunning());
744 DCHECK(!input_streamon_
);
746 for (auto& input_record
: input_buffer_map_
) {
747 if (input_record
.address
!= nullptr)
748 device_
->Munmap(input_record
.address
, input_record
.length
);
751 struct v4l2_requestbuffers reqbufs
;
752 memset(&reqbufs
, 0, sizeof(reqbufs
));
754 reqbufs
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
755 reqbufs
.memory
= V4L2_MEMORY_MMAP
;
756 IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS
, &reqbufs
);
758 input_buffer_map_
.clear();
759 free_input_buffers_
.clear();
762 void V4L2SliceVideoDecodeAccelerator::DismissPictures(
763 std::vector
<int32
> picture_buffer_ids
,
764 base::WaitableEvent
* done
) {
766 DCHECK(child_message_loop_proxy_
->BelongsToCurrentThread());
768 for (auto picture_buffer_id
: picture_buffer_ids
) {
769 DVLOGF(1) << "dismissing PictureBuffer id=" << picture_buffer_id
;
770 client_
->DismissPictureBuffer(picture_buffer_id
);
776 void V4L2SliceVideoDecodeAccelerator::DevicePollTask(bool poll_device
) {
778 DCHECK_EQ(device_poll_thread_
.message_loop(), base::MessageLoop::current());
781 if (!device_
->Poll(poll_device
, &event_pending
)) {
782 NOTIFY_ERROR(PLATFORM_FAILURE
);
786 // All processing should happen on ServiceDeviceTask(), since we shouldn't
787 // touch encoder state from this thread.
788 decoder_thread_proxy_
->PostTask(
789 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask
,
790 base::Unretained(this)));
793 void V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask() {
795 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
797 // ServiceDeviceTask() should only ever be scheduled from DevicePollTask().
800 SchedulePollIfNeeded();
803 void V4L2SliceVideoDecodeAccelerator::SchedulePollIfNeeded() {
804 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
806 if (!device_poll_thread_
.IsRunning()) {
807 DVLOGF(2) << "Device poll thread stopped, will not schedule poll";
811 DCHECK(input_streamon_
|| output_streamon_
);
813 if (input_buffer_queued_count_
+ output_buffer_queued_count_
== 0) {
814 DVLOGF(4) << "No buffers queued, will not schedule poll";
818 DVLOGF(4) << "Scheduling device poll task";
820 device_poll_thread_
.message_loop()->PostTask(
821 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask
,
822 base::Unretained(this), true));
824 DVLOGF(2) << "buffer counts: "
825 << "INPUT[" << decoder_input_queue_
.size() << "]"
827 << free_input_buffers_
.size() << "+"
828 << input_buffer_queued_count_
<< "/"
829 << input_buffer_map_
.size() << "]->["
830 << free_output_buffers_
.size() << "+"
831 << output_buffer_queued_count_
<< "/"
832 << output_buffer_map_
.size() << "]"
833 << " => DISPLAYQ[" << decoder_display_queue_
.size() << "]"
834 << " => CLIENT[" << surfaces_at_display_
.size() << "]";
837 void V4L2SliceVideoDecodeAccelerator::Enqueue(
838 const scoped_refptr
<V4L2DecodeSurface
>& dec_surface
) {
839 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
841 const int old_inputs_queued
= input_buffer_queued_count_
;
842 const int old_outputs_queued
= output_buffer_queued_count_
;
844 if (!EnqueueInputRecord(dec_surface
->input_record(),
845 dec_surface
->config_store())) {
846 DVLOGF(1) << "Failed queueing an input buffer";
847 NOTIFY_ERROR(PLATFORM_FAILURE
);
851 if (!EnqueueOutputRecord(dec_surface
->output_record())) {
852 DVLOGF(1) << "Failed queueing an output buffer";
853 NOTIFY_ERROR(PLATFORM_FAILURE
);
858 surfaces_at_device_
.insert(std::make_pair(dec_surface
->output_record(),
859 dec_surface
)).second
;
862 if (old_inputs_queued
== 0 && old_outputs_queued
== 0)
863 SchedulePollIfNeeded();
866 void V4L2SliceVideoDecodeAccelerator::Dequeue() {
868 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
870 struct v4l2_buffer dqbuf
;
871 struct v4l2_plane planes
[VIDEO_MAX_PLANES
];
872 while (input_buffer_queued_count_
> 0) {
873 DCHECK(input_streamon_
);
874 memset(&dqbuf
, 0, sizeof(dqbuf
));
875 memset(&planes
, 0, sizeof(planes
));
876 dqbuf
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
877 dqbuf
.memory
= V4L2_MEMORY_USERPTR
;
878 dqbuf
.m
.planes
= planes
;
879 dqbuf
.length
= input_planes_count_
;
880 if (device_
->Ioctl(VIDIOC_DQBUF
, &dqbuf
) != 0) {
881 if (errno
== EAGAIN
) {
882 // EAGAIN if we're just out of buffers to dequeue.
885 PLOG(ERROR
) << "ioctl() failed: VIDIOC_DQBUF";
886 NOTIFY_ERROR(PLATFORM_FAILURE
);
889 InputRecord
& input_record
= input_buffer_map_
[dqbuf
.index
];
890 DCHECK(input_record
.at_device
);
891 ReuseInputBuffer(dqbuf
.index
);
892 input_buffer_queued_count_
--;
893 DVLOGF(4) << "Dequeued input=" << dqbuf
.index
894 << " count: " << input_buffer_queued_count_
;
897 while (output_buffer_queued_count_
> 0) {
898 DCHECK(output_streamon_
);
899 memset(&dqbuf
, 0, sizeof(dqbuf
));
900 memset(&planes
, 0, sizeof(planes
));
901 dqbuf
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
902 dqbuf
.memory
= V4L2_MEMORY_MMAP
;
903 dqbuf
.m
.planes
= planes
;
904 dqbuf
.length
= output_planes_count_
;
905 if (device_
->Ioctl(VIDIOC_DQBUF
, &dqbuf
) != 0) {
906 if (errno
== EAGAIN
) {
907 // EAGAIN if we're just out of buffers to dequeue.
910 PLOG(ERROR
) << "ioctl() failed: VIDIOC_DQBUF";
911 NOTIFY_ERROR(PLATFORM_FAILURE
);
914 OutputRecord
& output_record
= output_buffer_map_
[dqbuf
.index
];
915 DCHECK(output_record
.at_device
);
916 output_record
.at_device
= false;
917 DCHECK_NE(output_record
.picture_id
, -1);
918 output_buffer_queued_count_
--;
919 DVLOGF(3) << "Dequeued output=" << dqbuf
.index
920 << " count " << output_buffer_queued_count_
;
922 V4L2DecodeSurfaceByOutputId::iterator it
=
923 surfaces_at_device_
.find(dqbuf
.index
);
924 if (it
== surfaces_at_device_
.end()) {
925 DLOG(ERROR
) << "Got invalid surface from device.";
926 NOTIFY_ERROR(PLATFORM_FAILURE
);
929 it
->second
->SetDecoded();
930 surfaces_at_device_
.erase(it
);
933 // A frame was decoded, see if we can output it.
936 ProcessPendingEventsIfNeeded();
939 void V4L2SliceVideoDecodeAccelerator::ProcessPendingEventsIfNeeded() {
940 // Process pending events, if any, in the correct order.
941 // We always first process the surface set change, as it is an internal
942 // event from the decoder and interleaving it with external requests would
943 // put the decoder in an undefined state.
944 FinishSurfaceSetChangeIfNeeded();
946 // Process external (client) requests.
947 FinishFlushIfNeeded();
948 FinishResetIfNeeded();
951 void V4L2SliceVideoDecodeAccelerator::ReuseInputBuffer(int index
) {
952 DCHECK_LT(index
, static_cast<int>(input_buffer_map_
.size()));
953 DVLOGF(4) << "Reusing input buffer, index=" << index
;
954 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
956 InputRecord
& input_record
= input_buffer_map_
[index
];
957 input_record
.at_device
= false;
958 input_record
.input_id
= -1;
959 input_record
.bytes_used
= 0;
960 free_input_buffers_
.push_back(index
);
963 void V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer(int index
) {
964 DCHECK_LT(index
, static_cast<int>(output_buffer_map_
.size()));
965 DVLOGF(4) << "Reusing output buffer, index=" << index
;
966 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
968 OutputRecord
& output_record
= output_buffer_map_
[index
];
969 DCHECK(!output_record
.at_device
);
970 output_record
.at_client
= false;
972 free_output_buffers_
.push_back(index
);
974 ScheduleDecodeBufferTaskIfNeeded();
977 bool V4L2SliceVideoDecodeAccelerator::EnqueueInputRecord(
979 uint32_t config_store
) {
981 DCHECK_LT(index
, static_cast<int>(input_buffer_map_
.size()));
982 DCHECK_GT(config_store
, 0u);
984 // Enqueue an input (VIDEO_OUTPUT) buffer for an input video frame.
985 InputRecord
& input_record
= input_buffer_map_
[index
];
986 DCHECK(!input_record
.at_device
);
987 struct v4l2_buffer qbuf
;
988 struct v4l2_plane qbuf_planes
[VIDEO_MAX_PLANES
];
989 memset(&qbuf
, 0, sizeof(qbuf
));
990 memset(qbuf_planes
, 0, sizeof(qbuf_planes
));
992 qbuf
.type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
993 qbuf
.memory
= V4L2_MEMORY_MMAP
;
994 qbuf
.m
.planes
= qbuf_planes
;
995 qbuf
.m
.planes
[0].bytesused
= input_record
.bytes_used
;
996 qbuf
.length
= input_planes_count_
;
997 qbuf
.config_store
= config_store
;
998 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF
, &qbuf
);
999 input_record
.at_device
= true;
1000 input_buffer_queued_count_
++;
1001 DVLOGF(4) << "Enqueued input=" << qbuf
.index
1002 << " count: " << input_buffer_queued_count_
;
1007 bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index
) {
1009 DCHECK_LT(index
, static_cast<int>(output_buffer_map_
.size()));
1011 // Enqueue an output (VIDEO_CAPTURE) buffer.
1012 OutputRecord
& output_record
= output_buffer_map_
[index
];
1013 DCHECK(!output_record
.at_device
);
1014 DCHECK(!output_record
.at_client
);
1015 DCHECK_NE(output_record
.egl_image
, EGL_NO_IMAGE_KHR
);
1016 DCHECK_NE(output_record
.picture_id
, -1);
1017 if (output_record
.egl_sync
!= EGL_NO_SYNC_KHR
) {
1018 // If we have to wait for completion, wait. Note that
1019 // free_output_buffers_ is a FIFO queue, so we always wait on the
1020 // buffer that has been in the queue the longest.
1021 if (eglClientWaitSyncKHR(egl_display_
, output_record
.egl_sync
, 0,
1022 EGL_FOREVER_KHR
) == EGL_FALSE
) {
1023 // This will cause tearing, but is safe otherwise.
1024 DVLOGF(1) << "eglClientWaitSyncKHR failed!";
1026 if (eglDestroySyncKHR(egl_display_
, output_record
.egl_sync
) != EGL_TRUE
) {
1027 LOGF(ERROR
) << "eglDestroySyncKHR failed!";
1028 NOTIFY_ERROR(PLATFORM_FAILURE
);
1031 output_record
.egl_sync
= EGL_NO_SYNC_KHR
;
1034 struct v4l2_buffer qbuf
;
1035 struct v4l2_plane qbuf_planes
[VIDEO_MAX_PLANES
];
1036 memset(&qbuf
, 0, sizeof(qbuf
));
1037 memset(qbuf_planes
, 0, sizeof(qbuf_planes
));
1039 qbuf
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1040 qbuf
.memory
= V4L2_MEMORY_MMAP
;
1041 qbuf
.m
.planes
= qbuf_planes
;
1042 qbuf
.length
= output_planes_count_
;
1043 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF
, &qbuf
);
1044 output_record
.at_device
= true;
1045 output_buffer_queued_count_
++;
1046 DVLOGF(4) << "Enqueued output=" << qbuf
.index
1047 << " count: " << output_buffer_queued_count_
;
1052 bool V4L2SliceVideoDecodeAccelerator::StartDevicePoll() {
1053 DVLOGF(3) << "Starting device poll";
1054 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1055 DCHECK(!device_poll_thread_
.IsRunning());
1057 // Start up the device poll thread and schedule its first DevicePollTask().
1058 if (!device_poll_thread_
.Start()) {
1059 DLOG(ERROR
) << "StartDevicePoll(): Device thread failed to start";
1060 NOTIFY_ERROR(PLATFORM_FAILURE
);
1063 if (!input_streamon_
) {
1064 __u32 type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
1065 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMON
, &type
);
1066 input_streamon_
= true;
1069 if (!output_streamon_
) {
1070 __u32 type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1071 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMON
, &type
);
1072 output_streamon_
= true;
1075 device_poll_thread_
.message_loop()->PostTask(
1076 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask
,
1077 base::Unretained(this), true));
1082 bool V4L2SliceVideoDecodeAccelerator::StopDevicePoll(bool keep_input_state
) {
1083 DVLOGF(3) << "Stopping device poll";
1084 if (decoder_thread_
.IsRunning())
1085 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1087 // Signal the DevicePollTask() to stop, and stop the device poll thread.
1088 if (!device_
->SetDevicePollInterrupt()) {
1089 PLOG(ERROR
) << "SetDevicePollInterrupt(): failed";
1090 NOTIFY_ERROR(PLATFORM_FAILURE
);
1093 device_poll_thread_
.Stop();
1094 DVLOGF(3) << "Device poll thread stopped";
1096 // Clear the interrupt now, to be sure.
1097 if (!device_
->ClearDevicePollInterrupt()) {
1098 NOTIFY_ERROR(PLATFORM_FAILURE
);
1102 if (!keep_input_state
) {
1103 if (input_streamon_
) {
1104 __u32 type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
1105 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF
, &type
);
1107 input_streamon_
= false;
1110 if (output_streamon_
) {
1111 __u32 type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1112 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF
, &type
);
1114 output_streamon_
= false;
1116 if (!keep_input_state
) {
1117 free_input_buffers_
.clear();
1118 // We don't care about the buffer state (at_device) here, because
1119 // STREAMOFF tells the driver to drop all buffers without DQBUFing them.
1120 for (size_t i
= 0; i
< input_buffer_map_
.size(); ++i
)
1121 ReuseInputBuffer(i
);
1122 input_buffer_queued_count_
= 0;
1125 surfaces_at_device_
.clear();
1127 free_output_buffers_
.clear();
1128 for (size_t i
= 0; i
< output_buffer_map_
.size(); ++i
) {
1129 OutputRecord
& output_record
= output_buffer_map_
[i
];
1130 DCHECK(!(output_record
.at_client
&& output_record
.at_device
));
1131 output_record
.at_device
= false;
1132 if (!output_record
.at_client
)
1133 free_output_buffers_
.push_back(i
);
1135 output_buffer_queued_count_
= 0;
1137 DVLOGF(3) << "Device poll stopped";
1141 void V4L2SliceVideoDecodeAccelerator::Decode(
1142 const media::BitstreamBuffer
& bitstream_buffer
) {
1143 DVLOGF(3) << "input_id=" << bitstream_buffer
.id()
1144 << ", size=" << bitstream_buffer
.size();
1145 DCHECK(io_message_loop_proxy_
->BelongsToCurrentThread());
1147 decoder_thread_proxy_
->PostTask(
1148 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeTask
,
1149 base::Unretained(this), bitstream_buffer
));
1152 void V4L2SliceVideoDecodeAccelerator::DecodeTask(
1153 const media::BitstreamBuffer
& bitstream_buffer
) {
1154 DVLOGF(3) << "input_id=" << bitstream_buffer
.id()
1155 << " size=" << bitstream_buffer
.size();
1156 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1158 scoped_ptr
<BitstreamBufferRef
> bitstream_record(new BitstreamBufferRef(
1159 io_client_
, io_message_loop_proxy_
,
1160 new base::SharedMemory(bitstream_buffer
.handle(), true),
1161 bitstream_buffer
.size(), bitstream_buffer
.id()));
1162 if (!bitstream_record
->shm
->Map(bitstream_buffer
.size())) {
1163 LOGF(ERROR
) << "Could not map bitstream_buffer";
1164 NOTIFY_ERROR(UNREADABLE_INPUT
);
1167 DVLOGF(3) << "mapped at=" << bitstream_record
->shm
->memory();
1169 decoder_input_queue_
.push(
1170 linked_ptr
<BitstreamBufferRef
>(bitstream_record
.release()));
1172 ScheduleDecodeBufferTaskIfNeeded();
1175 bool V4L2SliceVideoDecodeAccelerator::TrySetNewBistreamBuffer() {
1176 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1177 DCHECK(!decoder_current_bitstream_buffer_
);
1179 if (decoder_input_queue_
.empty())
1182 decoder_current_bitstream_buffer_
.reset(
1183 decoder_input_queue_
.front().release());
1184 decoder_input_queue_
.pop();
1186 if (decoder_current_bitstream_buffer_
->input_id
== kFlushBufferId
) {
1187 // This is a buffer we queued for ourselves to trigger flush at this time.
1192 const uint8_t* const data
= reinterpret_cast<const uint8_t*>(
1193 decoder_current_bitstream_buffer_
->shm
->memory());
1194 const size_t data_size
= decoder_current_bitstream_buffer_
->size
;
1195 decoder_
->SetStream(data
, data_size
);
1200 void V4L2SliceVideoDecodeAccelerator::ScheduleDecodeBufferTaskIfNeeded() {
1201 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1202 if (state_
== kDecoding
) {
1203 decoder_thread_proxy_
->PostTask(
1205 base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeBufferTask
,
1206 base::Unretained(this)));
1210 void V4L2SliceVideoDecodeAccelerator::DecodeBufferTask() {
1212 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1214 if (state_
!= kDecoding
) {
1215 DVLOGF(3) << "Early exit, not in kDecoding";
1220 AcceleratedVideoDecoder::DecodeResult res
;
1221 res
= decoder_
->Decode();
1223 case AcceleratedVideoDecoder::kAllocateNewSurfaces
:
1224 DVLOGF(2) << "Decoder requesting a new set of surfaces";
1225 InitiateSurfaceSetChange();
1228 case AcceleratedVideoDecoder::kRanOutOfStreamData
:
1229 decoder_current_bitstream_buffer_
.reset();
1230 if (!TrySetNewBistreamBuffer())
1235 case AcceleratedVideoDecoder::kRanOutOfSurfaces
:
1236 // No more surfaces for the decoder, we'll come back once we have more.
1237 DVLOGF(4) << "Ran out of surfaces";
1240 case AcceleratedVideoDecoder::kDecodeError
:
1241 DVLOGF(1) << "Error decoding stream";
1242 NOTIFY_ERROR(PLATFORM_FAILURE
);
1248 void V4L2SliceVideoDecodeAccelerator::InitiateSurfaceSetChange() {
1250 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1252 DCHECK_EQ(state_
, kDecoding
);
1255 DCHECK(!surface_set_change_pending_
);
1256 surface_set_change_pending_
= true;
1258 FinishSurfaceSetChangeIfNeeded();
1261 void V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChangeIfNeeded() {
1263 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1265 if (!surface_set_change_pending_
|| !surfaces_at_device_
.empty())
1268 DCHECK_EQ(state_
, kIdle
);
1269 DCHECK(decoder_display_queue_
.empty());
1270 // All output buffers should've been returned from decoder and device by now.
1271 // The only remaining owner of surfaces may be display (client), and we will
1272 // dismiss them when destroying output buffers below.
1273 DCHECK_EQ(free_output_buffers_
.size() + surfaces_at_display_
.size(),
1274 output_buffer_map_
.size());
1276 // Keep input queue running while we switch outputs.
1277 if (!StopDevicePoll(true)) {
1278 NOTIFY_ERROR(PLATFORM_FAILURE
);
1282 // This will return only once all buffers are dismissed and destroyed.
1283 // This does not wait until they are displayed however, as display retains
1284 // references to the buffers bound to textures and will release them
1285 // after displaying.
1286 if (!DestroyOutputs(true)) {
1287 NOTIFY_ERROR(PLATFORM_FAILURE
);
1291 if (!CreateOutputBuffers()) {
1292 NOTIFY_ERROR(PLATFORM_FAILURE
);
1296 if (!StartDevicePoll()) {
1297 NOTIFY_ERROR(PLATFORM_FAILURE
);
1301 DVLOGF(3) << "Surface set change finished";
1303 surface_set_change_pending_
= false;
1305 ScheduleDecodeBufferTaskIfNeeded();
1308 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss
) {
1310 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1311 std::vector
<EGLImageKHR
> egl_images_to_destroy
;
1312 std::vector
<int32
> picture_buffers_to_dismiss
;
1314 if (output_buffer_map_
.empty())
1317 for (auto output_record
: output_buffer_map_
) {
1318 DCHECK(!output_record
.at_device
);
1319 output_record
.at_client
= false;
1321 if (output_record
.egl_sync
!= EGL_NO_SYNC_KHR
) {
1322 if (eglDestroySyncKHR(egl_display_
, output_record
.egl_sync
) != EGL_TRUE
)
1323 DVLOGF(1) << "eglDestroySyncKHR failed.";
1326 if (output_record
.egl_image
!= EGL_NO_IMAGE_KHR
) {
1327 child_message_loop_proxy_
->PostTask(
1329 base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage
), device_
,
1330 egl_display_
, output_record
.egl_image
));
1333 picture_buffers_to_dismiss
.push_back(output_record
.picture_id
);
1337 DVLOGF(2) << "Scheduling picture dismissal";
1338 base::WaitableEvent
done(false, false);
1339 child_message_loop_proxy_
->PostTask(
1340 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::DismissPictures
,
1341 weak_this_
, picture_buffers_to_dismiss
, &done
));
1345 // At this point client can't call ReusePictureBuffer on any of the pictures
1346 // anymore, so it's safe to destroy.
1347 return DestroyOutputBuffers();
1350 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputBuffers() {
1352 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread() ||
1353 !decoder_thread_
.IsRunning());
1354 DCHECK(!output_streamon_
);
1355 DCHECK(surfaces_at_device_
.empty());
1356 DCHECK(decoder_display_queue_
.empty());
1357 DCHECK_EQ(surfaces_at_display_
.size() + free_output_buffers_
.size(),
1358 output_buffer_map_
.size());
1360 if (output_buffer_map_
.empty())
1363 // It's ok to do this, client will retain references to textures, but we are
1364 // not interested in reusing the surfaces anymore.
1365 // This will prevent us from reusing old surfaces in case we have some
1366 // ReusePictureBuffer() pending on ChildThread already. It's ok to ignore
1367 // them, because we have already dismissed them (in DestroyOutputs()).
1368 surfaces_at_display_
.clear();
1369 DCHECK_EQ(free_output_buffers_
.size(), output_buffer_map_
.size());
1371 free_output_buffers_
.clear();
1372 output_buffer_map_
.clear();
1374 struct v4l2_requestbuffers reqbufs
;
1375 memset(&reqbufs
, 0, sizeof(reqbufs
));
1377 reqbufs
.type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1378 reqbufs
.memory
= V4L2_MEMORY_MMAP
;
1379 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS
, &reqbufs
);
1384 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
1385 const std::vector
<media::PictureBuffer
>& buffers
) {
1387 DCHECK(child_message_loop_proxy_
->BelongsToCurrentThread());
1389 if (buffers
.size() != output_buffer_map_
.size()) {
1390 DLOG(ERROR
) << "Failed to provide requested picture buffers. "
1391 << "(Got " << buffers
.size()
1392 << ", requested " << output_buffer_map_
.size() << ")";
1393 NOTIFY_ERROR(INVALID_ARGUMENT
);
1397 if (!make_context_current_
.Run()) {
1398 DLOG(ERROR
) << "could not make context current";
1399 NOTIFY_ERROR(PLATFORM_FAILURE
);
1403 gfx::ScopedTextureBinder
bind_restore(GL_TEXTURE_EXTERNAL_OES
, 0);
1405 // It's safe to manipulate all the buffer state here, because the decoder
1406 // thread is waiting on pictures_assigned_.
1407 DCHECK(free_output_buffers_
.empty());
1408 for (size_t i
= 0; i
< output_buffer_map_
.size(); ++i
) {
1409 DCHECK(buffers
[i
].size() == coded_size_
);
1411 OutputRecord
& output_record
= output_buffer_map_
[i
];
1412 DCHECK(!output_record
.at_device
);
1413 DCHECK(!output_record
.at_client
);
1414 DCHECK_EQ(output_record
.egl_image
, EGL_NO_IMAGE_KHR
);
1415 DCHECK_EQ(output_record
.egl_sync
, EGL_NO_SYNC_KHR
);
1416 DCHECK_EQ(output_record
.picture_id
, -1);
1417 DCHECK_EQ(output_record
.cleared
, false);
1419 EGLImageKHR egl_image
= device_
->CreateEGLImage(egl_display_
,
1421 buffers
[i
].texture_id(),
1424 output_format_fourcc_
,
1425 output_planes_count_
);
1426 if (egl_image
== EGL_NO_IMAGE_KHR
) {
1427 LOGF(ERROR
) << "Could not create EGLImageKHR";
1428 // Ownership of EGLImages allocated in previous iterations of this loop
1429 // has been transferred to output_buffer_map_. After we error-out here
1430 // the destructor will handle their cleanup.
1431 NOTIFY_ERROR(PLATFORM_FAILURE
);
1435 output_record
.egl_image
= egl_image
;
1436 output_record
.picture_id
= buffers
[i
].id();
1437 free_output_buffers_
.push_back(i
);
1438 DVLOGF(3) << "buffer[" << i
<< "]: picture_id=" << output_record
.picture_id
;
1441 pictures_assigned_
.Signal();
1444 void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
1445 int32 picture_buffer_id
) {
1446 DCHECK(child_message_loop_proxy_
->BelongsToCurrentThread());
1447 DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id
;
1449 if (!make_context_current_
.Run()) {
1450 LOGF(ERROR
) << "could not make context current";
1451 NOTIFY_ERROR(PLATFORM_FAILURE
);
1455 EGLSyncKHR egl_sync
=
1456 eglCreateSyncKHR(egl_display_
, EGL_SYNC_FENCE_KHR
, NULL
);
1457 if (egl_sync
== EGL_NO_SYNC_KHR
) {
1458 LOGF(ERROR
) << "eglCreateSyncKHR() failed";
1459 NOTIFY_ERROR(PLATFORM_FAILURE
);
1463 scoped_ptr
<EGLSyncKHRRef
> egl_sync_ref(
1464 new EGLSyncKHRRef(egl_display_
, egl_sync
));
1465 decoder_thread_proxy_
->PostTask(
1467 base::Bind(&V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask
,
1468 base::Unretained(this), picture_buffer_id
,
1469 base::Passed(&egl_sync_ref
)));
1472 void V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask(
1473 int32 picture_buffer_id
,
1474 scoped_ptr
<EGLSyncKHRRef
> egl_sync_ref
) {
1475 DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id
;
1476 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1478 V4L2DecodeSurfaceByPictureBufferId::iterator it
=
1479 surfaces_at_display_
.find(picture_buffer_id
);
1480 if (it
== surfaces_at_display_
.end()) {
1481 // It's possible that we've already posted a DismissPictureBuffer for this
1482 // picture, but it has not yet executed when this ReusePictureBuffer was
1483 // posted to us by the client. In that case just ignore this (we've already
1484 // dismissed it and accounted for that) and let the sync object get
1486 DVLOGF(3) << "got picture id=" << picture_buffer_id
1487 << " not in use (anymore?).";
1491 OutputRecord
& output_record
= output_buffer_map_
[it
->second
->output_record()];
1492 if (output_record
.at_device
|| !output_record
.at_client
) {
1493 DVLOGF(1) << "picture_buffer_id not reusable";
1494 NOTIFY_ERROR(INVALID_ARGUMENT
);
1498 DCHECK_EQ(output_record
.egl_sync
, EGL_NO_SYNC_KHR
);
1499 DCHECK(!output_record
.at_device
);
1500 output_record
.at_client
= false;
1501 output_record
.egl_sync
= egl_sync_ref
->egl_sync
;
1502 // Take ownership of the EGLSync.
1503 egl_sync_ref
->egl_sync
= EGL_NO_SYNC_KHR
;
1504 surfaces_at_display_
.erase(it
);
1507 void V4L2SliceVideoDecodeAccelerator::Flush() {
1509 DCHECK(child_message_loop_proxy_
->BelongsToCurrentThread());
1511 decoder_thread_proxy_
->PostTask(
1512 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::FlushTask
,
1513 base::Unretained(this)));
1516 void V4L2SliceVideoDecodeAccelerator::FlushTask() {
1518 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1520 if (!decoder_input_queue_
.empty()) {
1521 // We are not done with pending inputs, so queue an empty buffer,
1522 // which - when reached - will trigger flush sequence.
1523 decoder_input_queue_
.push(
1524 linked_ptr
<BitstreamBufferRef
>(new BitstreamBufferRef(
1525 io_client_
, io_message_loop_proxy_
, nullptr, 0, kFlushBufferId
)));
1529 // No more inputs pending, so just finish flushing here.
1533 void V4L2SliceVideoDecodeAccelerator::InitiateFlush() {
1535 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1537 DCHECK(!decoder_flushing_
);
1538 DCHECK_EQ(state_
, kDecoding
);
1541 // This will trigger output for all remaining surfaces in the decoder.
1542 // However, not all of them may be decoded yet (they would be queued
1543 // in hardware then).
1544 if (!decoder_
->Flush()) {
1545 DVLOGF(1) << "Failed flushing the decoder.";
1546 NOTIFY_ERROR(PLATFORM_FAILURE
);
1550 // Put the decoder in an idle state, ready to resume.
1553 decoder_flushing_
= true;
1555 decoder_thread_proxy_
->PostTask(
1557 base::Bind(&V4L2SliceVideoDecodeAccelerator::FinishFlushIfNeeded
,
1558 base::Unretained(this)));
1561 void V4L2SliceVideoDecodeAccelerator::FinishFlushIfNeeded() {
1563 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1565 if (!decoder_flushing_
|| !surfaces_at_device_
.empty())
1568 DCHECK_EQ(state_
, kIdle
);
1570 // At this point, all remaining surfaces are decoded and dequeued, and since
1571 // we have already scheduled output for them in InitiateFlush(), their
1572 // respective PictureReady calls have been posted (or they have been queued on
1573 // pending_picture_ready_). So at this time, once we SendPictureReady(),
1574 // we will have all remaining PictureReady() posted to the client and we
1575 // can post NotifyFlushDone().
1576 DCHECK(decoder_display_queue_
.empty());
1578 // Decoder should have already returned all surfaces and all surfaces are
1579 // out of hardware. There can be no other owners of input buffers.
1580 DCHECK_EQ(free_input_buffers_
.size(), input_buffer_map_
.size());
1584 child_message_loop_proxy_
->PostTask(
1585 FROM_HERE
, base::Bind(&Client::NotifyFlushDone
, client_
));
1587 decoder_flushing_
= false;
1589 DVLOGF(3) << "Flush finished";
1591 ScheduleDecodeBufferTaskIfNeeded();
1594 void V4L2SliceVideoDecodeAccelerator::Reset() {
1596 DCHECK(child_message_loop_proxy_
->BelongsToCurrentThread());
1598 decoder_thread_proxy_
->PostTask(
1599 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::ResetTask
,
1600 base::Unretained(this)));
1603 void V4L2SliceVideoDecodeAccelerator::ResetTask() {
1605 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1607 if (decoder_resetting_
) {
1608 // This is a bug in the client, multiple Reset()s before NotifyResetDone()
1610 NOTREACHED() << "Client should not be requesting multiple Reset()s";
1614 DCHECK_EQ(state_
, kDecoding
);
1617 // Put the decoder in an idle state, ready to resume.
1620 decoder_resetting_
= true;
1622 // Drop all remaining inputs.
1623 decoder_current_bitstream_buffer_
.reset();
1624 while (!decoder_input_queue_
.empty())
1625 decoder_input_queue_
.pop();
1627 FinishResetIfNeeded();
1630 void V4L2SliceVideoDecodeAccelerator::FinishResetIfNeeded() {
1632 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
1634 if (!decoder_resetting_
|| !surfaces_at_device_
.empty())
1637 DCHECK_EQ(state_
, kIdle
);
1638 DCHECK(!decoder_flushing_
);
1641 // Drop any pending outputs.
1642 while (!decoder_display_queue_
.empty())
1643 decoder_display_queue_
.pop();
1645 // At this point we can have no input buffers in the decoder, because we
1646 // Reset()ed it in ResetTask(), and have not scheduled any new Decode()s
1647 // having been in kIdle since. We don't have any surfaces in the HW either -
1648 // we just checked that surfaces_at_device_.empty(), and inputs are tied
1649 // to surfaces. Since there can be no other owners of input buffers, we can
1650 // simply mark them all as available.
1651 DCHECK_EQ(input_buffer_queued_count_
, 0);
1652 free_input_buffers_
.clear();
1653 for (size_t i
= 0; i
< input_buffer_map_
.size(); ++i
) {
1654 DCHECK(!input_buffer_map_
[i
].at_device
);
1655 ReuseInputBuffer(i
);
1658 decoder_resetting_
= false;
1660 child_message_loop_proxy_
->PostTask(
1661 FROM_HERE
, base::Bind(&Client::NotifyResetDone
, client_
));
1663 DVLOGF(3) << "Reset finished";
1666 ScheduleDecodeBufferTaskIfNeeded();
1669 void V4L2SliceVideoDecodeAccelerator::SetErrorState(Error error
) {
1670 // We can touch decoder_state_ only if this is the decoder thread or the
1671 // decoder thread isn't running.
1672 if (decoder_thread_
.IsRunning() &&
1673 !decoder_thread_proxy_
->BelongsToCurrentThread()) {
1674 decoder_thread_proxy_
->PostTask(
1675 FROM_HERE
, base::Bind(&V4L2SliceVideoDecodeAccelerator::SetErrorState
,
1676 base::Unretained(this), error
));
1680 // Post NotifyError only if we are already initialized, as the API does
1681 // not allow doing so before that.
1682 if (state_
!= kError
&& state_
!= kUninitialized
)
1688 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::V4L2H264Accelerator(
1689 V4L2SliceVideoDecodeAccelerator
* v4l2_dec
)
1690 : num_slices_(0), v4l2_dec_(v4l2_dec
) {
1694 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::~V4L2H264Accelerator() {
1697 scoped_refptr
<H264Picture
>
1698 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::CreateH264Picture() {
1699 scoped_refptr
<V4L2DecodeSurface
> dec_surface
= v4l2_dec_
->CreateSurface();
1703 return new V4L2H264Picture(dec_surface
);
1706 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::
1707 H264PictureListToDPBIndicesList(const H264Picture::Vector
& src_pic_list
,
1708 uint8_t dst_list
[kDPBIndicesListSize
]) {
1710 for (i
= 0; i
< src_pic_list
.size() && i
< kDPBIndicesListSize
; ++i
) {
1711 const scoped_refptr
<H264Picture
>& pic
= src_pic_list
[i
];
1712 dst_list
[i
] = pic
? pic
->dpb_position
: VIDEO_MAX_FRAME
;
1715 while (i
< kDPBIndicesListSize
)
1716 dst_list
[i
++] = VIDEO_MAX_FRAME
;
1719 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::H264DPBToV4L2DPB(
1721 std::vector
<scoped_refptr
<V4L2DecodeSurface
>>* ref_surfaces
) {
1722 memset(v4l2_decode_param_
.dpb
, 0, sizeof(v4l2_decode_param_
.dpb
));
1724 for (const auto& pic
: dpb
) {
1725 if (i
>= arraysize(v4l2_decode_param_
.dpb
)) {
1726 DVLOG(1) << "Invalid DPB size";
1729 struct v4l2_h264_dpb_entry
& entry
= v4l2_decode_param_
.dpb
[i
++];
1730 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
1731 H264PictureToV4L2DecodeSurface(pic
);
1732 entry
.buf_index
= dec_surface
->output_record();
1733 entry
.frame_num
= pic
->frame_num
;
1734 entry
.pic_num
= pic
->pic_num
;
1735 entry
.top_field_order_cnt
= pic
->top_field_order_cnt
;
1736 entry
.bottom_field_order_cnt
= pic
->bottom_field_order_cnt
;
1737 entry
.flags
= (pic
->ref
? V4L2_H264_DPB_ENTRY_FLAG_ACTIVE
: 0) |
1738 (pic
->long_term
? V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM
: 0);
1740 ref_surfaces
->push_back(dec_surface
);
1744 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitFrameMetadata(
1745 const media::H264SPS
* sps
,
1746 const media::H264PPS
* pps
,
1748 const H264Picture::Vector
& ref_pic_listp0
,
1749 const H264Picture::Vector
& ref_pic_listb0
,
1750 const H264Picture::Vector
& ref_pic_listb1
,
1751 const scoped_refptr
<H264Picture
>& pic
) {
1752 struct v4l2_ext_control ctrl
;
1753 std::vector
<struct v4l2_ext_control
> ctrls
;
1755 struct v4l2_ctrl_h264_sps v4l2_sps
;
1756 memset(&v4l2_sps
, 0, sizeof(v4l2_sps
));
1757 v4l2_sps
.constraint_set_flags
=
1758 sps
->constraint_set0_flag
? V4L2_H264_SPS_CONSTRAINT_SET0_FLAG
: 0 |
1759 sps
->constraint_set1_flag
? V4L2_H264_SPS_CONSTRAINT_SET1_FLAG
: 0 |
1760 sps
->constraint_set2_flag
? V4L2_H264_SPS_CONSTRAINT_SET2_FLAG
: 0 |
1761 sps
->constraint_set3_flag
? V4L2_H264_SPS_CONSTRAINT_SET3_FLAG
: 0 |
1762 sps
->constraint_set4_flag
? V4L2_H264_SPS_CONSTRAINT_SET4_FLAG
: 0 |
1763 sps
->constraint_set5_flag
? V4L2_H264_SPS_CONSTRAINT_SET5_FLAG
: 0;
1764 #define SPS_TO_V4L2SPS(a) v4l2_sps.a = sps->a
1765 SPS_TO_V4L2SPS(profile_idc
);
1766 SPS_TO_V4L2SPS(level_idc
);
1767 SPS_TO_V4L2SPS(seq_parameter_set_id
);
1768 SPS_TO_V4L2SPS(chroma_format_idc
);
1769 SPS_TO_V4L2SPS(bit_depth_luma_minus8
);
1770 SPS_TO_V4L2SPS(bit_depth_chroma_minus8
);
1771 SPS_TO_V4L2SPS(log2_max_frame_num_minus4
);
1772 SPS_TO_V4L2SPS(pic_order_cnt_type
);
1773 SPS_TO_V4L2SPS(log2_max_pic_order_cnt_lsb_minus4
);
1774 SPS_TO_V4L2SPS(offset_for_non_ref_pic
);
1775 SPS_TO_V4L2SPS(offset_for_top_to_bottom_field
);
1776 SPS_TO_V4L2SPS(num_ref_frames_in_pic_order_cnt_cycle
);
1778 static_assert(arraysize(v4l2_sps
.offset_for_ref_frame
) ==
1779 arraysize(sps
->offset_for_ref_frame
),
1780 "offset_for_ref_frame arrays must be same size");
1781 for (size_t i
= 0; i
< arraysize(v4l2_sps
.offset_for_ref_frame
); ++i
)
1782 v4l2_sps
.offset_for_ref_frame
[i
] = sps
->offset_for_ref_frame
[i
];
1783 SPS_TO_V4L2SPS(max_num_ref_frames
);
1784 SPS_TO_V4L2SPS(pic_width_in_mbs_minus1
);
1785 SPS_TO_V4L2SPS(pic_height_in_map_units_minus1
);
1786 #undef SPS_TO_V4L2SPS
1788 #define SET_V4L2_SPS_FLAG_IF(cond, flag) \
1789 v4l2_sps.flags |= ((sps->cond) ? (flag) : 0)
1790 SET_V4L2_SPS_FLAG_IF(separate_colour_plane_flag
,
1791 V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE
);
1792 SET_V4L2_SPS_FLAG_IF(qpprime_y_zero_transform_bypass_flag
,
1793 V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS
);
1794 SET_V4L2_SPS_FLAG_IF(delta_pic_order_always_zero_flag
,
1795 V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO
);
1796 SET_V4L2_SPS_FLAG_IF(gaps_in_frame_num_value_allowed_flag
,
1797 V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED
);
1798 SET_V4L2_SPS_FLAG_IF(frame_mbs_only_flag
, V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY
);
1799 SET_V4L2_SPS_FLAG_IF(mb_adaptive_frame_field_flag
,
1800 V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD
);
1801 SET_V4L2_SPS_FLAG_IF(direct_8x8_inference_flag
,
1802 V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE
);
1804 memset(&ctrl
, 0, sizeof(ctrl
));
1805 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_SPS
;
1806 ctrl
.size
= sizeof(v4l2_sps
);
1807 ctrl
.p_h264_sps
= &v4l2_sps
;
1808 ctrls
.push_back(ctrl
);
1810 struct v4l2_ctrl_h264_pps v4l2_pps
;
1811 memset(&v4l2_pps
, 0, sizeof(v4l2_pps
));
1812 #define PPS_TO_V4L2PPS(a) v4l2_pps.a = pps->a
1813 PPS_TO_V4L2PPS(pic_parameter_set_id
);
1814 PPS_TO_V4L2PPS(seq_parameter_set_id
);
1815 PPS_TO_V4L2PPS(num_slice_groups_minus1
);
1816 PPS_TO_V4L2PPS(num_ref_idx_l0_default_active_minus1
);
1817 PPS_TO_V4L2PPS(num_ref_idx_l1_default_active_minus1
);
1818 PPS_TO_V4L2PPS(weighted_bipred_idc
);
1819 PPS_TO_V4L2PPS(pic_init_qp_minus26
);
1820 PPS_TO_V4L2PPS(pic_init_qs_minus26
);
1821 PPS_TO_V4L2PPS(chroma_qp_index_offset
);
1822 PPS_TO_V4L2PPS(second_chroma_qp_index_offset
);
1823 #undef PPS_TO_V4L2PPS
1825 #define SET_V4L2_PPS_FLAG_IF(cond, flag) \
1826 v4l2_pps.flags |= ((pps->cond) ? (flag) : 0)
1827 SET_V4L2_PPS_FLAG_IF(entropy_coding_mode_flag
,
1828 V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE
);
1829 SET_V4L2_PPS_FLAG_IF(
1830 bottom_field_pic_order_in_frame_present_flag
,
1831 V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT
);
1832 SET_V4L2_PPS_FLAG_IF(weighted_pred_flag
, V4L2_H264_PPS_FLAG_WEIGHTED_PRED
);
1833 SET_V4L2_PPS_FLAG_IF(deblocking_filter_control_present_flag
,
1834 V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT
);
1835 SET_V4L2_PPS_FLAG_IF(constrained_intra_pred_flag
,
1836 V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED
);
1837 SET_V4L2_PPS_FLAG_IF(redundant_pic_cnt_present_flag
,
1838 V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT
);
1839 SET_V4L2_PPS_FLAG_IF(transform_8x8_mode_flag
,
1840 V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE
);
1841 SET_V4L2_PPS_FLAG_IF(pic_scaling_matrix_present_flag
,
1842 V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT
);
1843 #undef SET_V4L2_PPS_FLAG_IF
1844 memset(&ctrl
, 0, sizeof(ctrl
));
1845 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_PPS
;
1846 ctrl
.size
= sizeof(v4l2_pps
);
1847 ctrl
.p_h264_pps
= &v4l2_pps
;
1848 ctrls
.push_back(ctrl
);
1850 struct v4l2_ctrl_h264_scaling_matrix v4l2_scaling_matrix
;
1851 memset(&v4l2_scaling_matrix
, 0, sizeof(v4l2_scaling_matrix
));
1852 static_assert(arraysize(v4l2_scaling_matrix
.scaling_list_4x4
) <=
1853 arraysize(pps
->scaling_list4x4
) &&
1854 arraysize(v4l2_scaling_matrix
.scaling_list_4x4
[0]) <=
1855 arraysize(pps
->scaling_list4x4
[0]) &&
1856 arraysize(v4l2_scaling_matrix
.scaling_list_8x8
) <=
1857 arraysize(pps
->scaling_list8x8
) &&
1858 arraysize(v4l2_scaling_matrix
.scaling_list_8x8
[0]) <=
1859 arraysize(pps
->scaling_list8x8
[0]),
1860 "scaling_lists must be of correct size");
1861 for (size_t i
= 0; i
< arraysize(v4l2_scaling_matrix
.scaling_list_4x4
); ++i
) {
1862 for (size_t j
= 0; j
< arraysize(v4l2_scaling_matrix
.scaling_list_4x4
[i
]);
1864 v4l2_scaling_matrix
.scaling_list_4x4
[i
][j
] = pps
->scaling_list4x4
[i
][j
];
1867 for (size_t i
= 0; i
< arraysize(v4l2_scaling_matrix
.scaling_list_8x8
); ++i
) {
1868 for (size_t j
= 0; j
< arraysize(v4l2_scaling_matrix
.scaling_list_8x8
[i
]);
1870 v4l2_scaling_matrix
.scaling_list_8x8
[i
][j
] = pps
->scaling_list8x8
[i
][j
];
1873 memset(&ctrl
, 0, sizeof(ctrl
));
1874 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX
;
1875 ctrl
.size
= sizeof(v4l2_scaling_matrix
);
1876 ctrl
.p_h264_scal_mtrx
= &v4l2_scaling_matrix
;
1877 ctrls
.push_back(ctrl
);
1879 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
1880 H264PictureToV4L2DecodeSurface(pic
);
1882 struct v4l2_ext_controls ext_ctrls
;
1883 memset(&ext_ctrls
, 0, sizeof(ext_ctrls
));
1884 ext_ctrls
.count
= ctrls
.size();
1885 ext_ctrls
.controls
= &ctrls
[0];
1886 ext_ctrls
.config_store
= dec_surface
->config_store();
1887 v4l2_dec_
->SubmitExtControls(&ext_ctrls
);
1889 H264PictureListToDPBIndicesList(ref_pic_listp0
,
1890 v4l2_decode_param_
.ref_pic_list_p0
);
1891 H264PictureListToDPBIndicesList(ref_pic_listb0
,
1892 v4l2_decode_param_
.ref_pic_list_b0
);
1893 H264PictureListToDPBIndicesList(ref_pic_listb1
,
1894 v4l2_decode_param_
.ref_pic_list_b1
);
1896 std::vector
<scoped_refptr
<V4L2DecodeSurface
>> ref_surfaces
;
1897 H264DPBToV4L2DPB(dpb
, &ref_surfaces
);
1898 dec_surface
->SetReferenceSurfaces(ref_surfaces
);
1903 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitSlice(
1904 const media::H264PPS
* pps
,
1905 const media::H264SliceHeader
* slice_hdr
,
1906 const H264Picture::Vector
& ref_pic_list0
,
1907 const H264Picture::Vector
& ref_pic_list1
,
1908 const scoped_refptr
<H264Picture
>& pic
,
1909 const uint8_t* data
,
1911 if (num_slices_
== kMaxSlices
) {
1912 LOGF(ERROR
) << "Over limit of supported slices per frame";
1916 struct v4l2_ctrl_h264_slice_param
& v4l2_slice_param
=
1917 v4l2_slice_params_
[num_slices_
++];
1918 memset(&v4l2_slice_param
, 0, sizeof(v4l2_slice_param
));
1920 v4l2_slice_param
.size
= size
;
1921 #define SHDR_TO_V4L2SPARM(a) v4l2_slice_param.a = slice_hdr->a
1922 SHDR_TO_V4L2SPARM(header_bit_size
);
1923 SHDR_TO_V4L2SPARM(first_mb_in_slice
);
1924 SHDR_TO_V4L2SPARM(slice_type
);
1925 SHDR_TO_V4L2SPARM(pic_parameter_set_id
);
1926 SHDR_TO_V4L2SPARM(colour_plane_id
);
1927 SHDR_TO_V4L2SPARM(frame_num
);
1928 SHDR_TO_V4L2SPARM(idr_pic_id
);
1929 SHDR_TO_V4L2SPARM(pic_order_cnt_lsb
);
1930 SHDR_TO_V4L2SPARM(delta_pic_order_cnt_bottom
);
1931 SHDR_TO_V4L2SPARM(delta_pic_order_cnt0
);
1932 SHDR_TO_V4L2SPARM(delta_pic_order_cnt1
);
1933 SHDR_TO_V4L2SPARM(redundant_pic_cnt
);
1934 SHDR_TO_V4L2SPARM(dec_ref_pic_marking_bit_size
);
1935 SHDR_TO_V4L2SPARM(cabac_init_idc
);
1936 SHDR_TO_V4L2SPARM(slice_qp_delta
);
1937 SHDR_TO_V4L2SPARM(slice_qs_delta
);
1938 SHDR_TO_V4L2SPARM(disable_deblocking_filter_idc
);
1939 SHDR_TO_V4L2SPARM(slice_alpha_c0_offset_div2
);
1940 SHDR_TO_V4L2SPARM(slice_beta_offset_div2
);
1941 SHDR_TO_V4L2SPARM(num_ref_idx_l0_active_minus1
);
1942 SHDR_TO_V4L2SPARM(num_ref_idx_l1_active_minus1
);
1943 SHDR_TO_V4L2SPARM(pic_order_cnt_bit_size
);
1944 #undef SHDR_TO_V4L2SPARM
1946 #define SET_V4L2_SPARM_FLAG_IF(cond, flag) \
1947 v4l2_slice_param.flags |= ((slice_hdr->cond) ? (flag) : 0)
1948 SET_V4L2_SPARM_FLAG_IF(field_pic_flag
, V4L2_SLICE_FLAG_FIELD_PIC
);
1949 SET_V4L2_SPARM_FLAG_IF(bottom_field_flag
, V4L2_SLICE_FLAG_BOTTOM_FIELD
);
1950 SET_V4L2_SPARM_FLAG_IF(direct_spatial_mv_pred_flag
,
1951 V4L2_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED
);
1952 SET_V4L2_SPARM_FLAG_IF(sp_for_switch_flag
, V4L2_SLICE_FLAG_SP_FOR_SWITCH
);
1953 #undef SET_V4L2_SPARM_FLAG_IF
1955 struct v4l2_h264_pred_weight_table
* pred_weight_table
=
1956 &v4l2_slice_param
.pred_weight_table
;
1958 if (((slice_hdr
->IsPSlice() || slice_hdr
->IsSPSlice()) &&
1959 pps
->weighted_pred_flag
) ||
1960 (slice_hdr
->IsBSlice() && pps
->weighted_bipred_idc
== 1)) {
1961 pred_weight_table
->luma_log2_weight_denom
=
1962 slice_hdr
->luma_log2_weight_denom
;
1963 pred_weight_table
->chroma_log2_weight_denom
=
1964 slice_hdr
->chroma_log2_weight_denom
;
1966 struct v4l2_h264_weight_factors
* factorsl0
=
1967 &pred_weight_table
->weight_factors
[0];
1969 for (int i
= 0; i
< 32; ++i
) {
1970 factorsl0
->luma_weight
[i
] =
1971 slice_hdr
->pred_weight_table_l0
.luma_weight
[i
];
1972 factorsl0
->luma_offset
[i
] =
1973 slice_hdr
->pred_weight_table_l0
.luma_offset
[i
];
1975 for (int j
= 0; j
< 2; ++j
) {
1976 factorsl0
->chroma_weight
[i
][j
] =
1977 slice_hdr
->pred_weight_table_l0
.chroma_weight
[i
][j
];
1978 factorsl0
->chroma_offset
[i
][j
] =
1979 slice_hdr
->pred_weight_table_l0
.chroma_offset
[i
][j
];
1983 if (slice_hdr
->IsBSlice()) {
1984 struct v4l2_h264_weight_factors
* factorsl1
=
1985 &pred_weight_table
->weight_factors
[1];
1987 for (int i
= 0; i
< 32; ++i
) {
1988 factorsl1
->luma_weight
[i
] =
1989 slice_hdr
->pred_weight_table_l1
.luma_weight
[i
];
1990 factorsl1
->luma_offset
[i
] =
1991 slice_hdr
->pred_weight_table_l1
.luma_offset
[i
];
1993 for (int j
= 0; j
< 2; ++j
) {
1994 factorsl1
->chroma_weight
[i
][j
] =
1995 slice_hdr
->pred_weight_table_l1
.chroma_weight
[i
][j
];
1996 factorsl1
->chroma_offset
[i
][j
] =
1997 slice_hdr
->pred_weight_table_l1
.chroma_offset
[i
][j
];
2003 H264PictureListToDPBIndicesList(ref_pic_list0
,
2004 v4l2_slice_param
.ref_pic_list0
);
2005 H264PictureListToDPBIndicesList(ref_pic_list1
,
2006 v4l2_slice_param
.ref_pic_list1
);
2008 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2009 H264PictureToV4L2DecodeSurface(pic
);
2011 v4l2_decode_param_
.nal_ref_idc
= slice_hdr
->nal_ref_idc
;
2013 // TODO(posciak): Don't add start code back here, but have it passed from
2015 size_t data_copy_size
= size
+ 3;
2016 scoped_ptr
<uint8_t[]> data_copy(new uint8_t[data_copy_size
]);
2017 memset(data_copy
.get(), 0, data_copy_size
);
2018 data_copy
[2] = 0x01;
2019 memcpy(data_copy
.get() + 3, data
, size
);
2020 return v4l2_dec_
->SubmitSlice(dec_surface
->input_record(), data_copy
.get(),
2024 bool V4L2SliceVideoDecodeAccelerator::SubmitSlice(int index
,
2025 const uint8_t* data
,
2027 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
2029 InputRecord
& input_record
= input_buffer_map_
[index
];
2031 if (input_record
.bytes_used
+ size
> input_record
.length
) {
2032 DVLOGF(1) << "Input buffer too small";
2036 memcpy(static_cast<uint8_t*>(input_record
.address
) + input_record
.bytes_used
,
2038 input_record
.bytes_used
+= size
;
2043 bool V4L2SliceVideoDecodeAccelerator::SubmitExtControls(
2044 struct v4l2_ext_controls
* ext_ctrls
) {
2045 DCHECK_GT(ext_ctrls
->config_store
, 0u);
2046 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_EXT_CTRLS
, ext_ctrls
);
2050 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitDecode(
2051 const scoped_refptr
<H264Picture
>& pic
) {
2052 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2053 H264PictureToV4L2DecodeSurface(pic
);
2055 v4l2_decode_param_
.num_slices
= num_slices_
;
2056 v4l2_decode_param_
.idr_pic_flag
= pic
->idr
;
2057 v4l2_decode_param_
.top_field_order_cnt
= pic
->top_field_order_cnt
;
2058 v4l2_decode_param_
.bottom_field_order_cnt
= pic
->bottom_field_order_cnt
;
2060 struct v4l2_ext_control ctrl
;
2061 std::vector
<struct v4l2_ext_control
> ctrls
;
2063 memset(&ctrl
, 0, sizeof(ctrl
));
2064 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM
;
2065 ctrl
.size
= sizeof(v4l2_slice_params_
);
2066 ctrl
.p_h264_slice_param
= v4l2_slice_params_
;
2067 ctrls
.push_back(ctrl
);
2069 memset(&ctrl
, 0, sizeof(ctrl
));
2070 ctrl
.id
= V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAM
;
2071 ctrl
.size
= sizeof(v4l2_decode_param_
);
2072 ctrl
.p_h264_decode_param
= &v4l2_decode_param_
;
2073 ctrls
.push_back(ctrl
);
2075 struct v4l2_ext_controls ext_ctrls
;
2076 memset(&ext_ctrls
, 0, sizeof(ext_ctrls
));
2077 ext_ctrls
.count
= ctrls
.size();
2078 ext_ctrls
.controls
= &ctrls
[0];
2079 ext_ctrls
.config_store
= dec_surface
->config_store();
2080 v4l2_dec_
->SubmitExtControls(&ext_ctrls
);
2084 v4l2_dec_
->DecodeSurface(dec_surface
);
2088 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::OutputPicture(
2089 const scoped_refptr
<H264Picture
>& pic
) {
2090 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2091 H264PictureToV4L2DecodeSurface(pic
);
2092 v4l2_dec_
->SurfaceReady(dec_surface
);
2096 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::Reset() {
2098 memset(&v4l2_decode_param_
, 0, sizeof(v4l2_decode_param_
));
2099 memset(&v4l2_slice_params_
, 0, sizeof(v4l2_slice_params_
));
2102 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
2103 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::
2104 H264PictureToV4L2DecodeSurface(const scoped_refptr
<H264Picture
>& pic
) {
2105 V4L2H264Picture
* v4l2_pic
= pic
->AsV4L2H264Picture();
2107 return v4l2_pic
->dec_surface();
2110 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::V4L2VP8Accelerator(
2111 V4L2SliceVideoDecodeAccelerator
* v4l2_dec
)
2112 : v4l2_dec_(v4l2_dec
) {
2116 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::~V4L2VP8Accelerator() {
2119 scoped_refptr
<VP8Picture
>
2120 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::CreateVP8Picture() {
2121 scoped_refptr
<V4L2DecodeSurface
> dec_surface
= v4l2_dec_
->CreateSurface();
2125 return new V4L2VP8Picture(dec_surface
);
2128 #define ARRAY_MEMCPY_CHECKED(to, from) \
2130 static_assert(sizeof(to) == sizeof(from), \
2131 #from " and " #to " arrays must be of same size"); \
2132 memcpy(to, from, sizeof(to)); \
2135 static void FillV4L2SegmentationHeader(
2136 const media::Vp8SegmentationHeader
& vp8_sgmnt_hdr
,
2137 struct v4l2_vp8_sgmnt_hdr
* v4l2_sgmnt_hdr
) {
2138 #define SET_V4L2_SGMNT_HDR_FLAG_IF(cond, flag) \
2139 v4l2_sgmnt_hdr->flags |= ((vp8_sgmnt_hdr.cond) ? (flag) : 0)
2140 SET_V4L2_SGMNT_HDR_FLAG_IF(segmentation_enabled
,
2141 V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED
);
2142 SET_V4L2_SGMNT_HDR_FLAG_IF(update_mb_segmentation_map
,
2143 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP
);
2144 SET_V4L2_SGMNT_HDR_FLAG_IF(update_segment_feature_data
,
2145 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA
);
2146 #undef SET_V4L2_SPARM_FLAG_IF
2147 v4l2_sgmnt_hdr
->segment_feature_mode
= vp8_sgmnt_hdr
.segment_feature_mode
;
2149 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr
->quant_update
,
2150 vp8_sgmnt_hdr
.quantizer_update_value
);
2151 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr
->lf_update
,
2152 vp8_sgmnt_hdr
.lf_update_value
);
2153 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr
->segment_probs
,
2154 vp8_sgmnt_hdr
.segment_prob
);
2157 static void FillV4L2LoopfilterHeader(
2158 const media::Vp8LoopFilterHeader
& vp8_loopfilter_hdr
,
2159 struct v4l2_vp8_loopfilter_hdr
* v4l2_lf_hdr
) {
2160 #define SET_V4L2_LF_HDR_FLAG_IF(cond, flag) \
2161 v4l2_lf_hdr->flags |= ((vp8_loopfilter_hdr.cond) ? (flag) : 0)
2162 SET_V4L2_LF_HDR_FLAG_IF(loop_filter_adj_enable
, V4L2_VP8_LF_HDR_ADJ_ENABLE
);
2163 SET_V4L2_LF_HDR_FLAG_IF(mode_ref_lf_delta_update
,
2164 V4L2_VP8_LF_HDR_DELTA_UPDATE
);
2165 #undef SET_V4L2_SGMNT_HDR_FLAG_IF
2167 #define LF_HDR_TO_V4L2_LF_HDR(a) v4l2_lf_hdr->a = vp8_loopfilter_hdr.a;
2168 LF_HDR_TO_V4L2_LF_HDR(type
);
2169 LF_HDR_TO_V4L2_LF_HDR(level
);
2170 LF_HDR_TO_V4L2_LF_HDR(sharpness_level
);
2171 #undef LF_HDR_TO_V4L2_LF_HDR
2173 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr
->ref_frm_delta_magnitude
,
2174 vp8_loopfilter_hdr
.ref_frame_delta
);
2175 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr
->mb_mode_delta_magnitude
,
2176 vp8_loopfilter_hdr
.mb_mode_delta
);
2179 static void FillV4L2QuantizationHeader(
2180 const media::Vp8QuantizationHeader
& vp8_quant_hdr
,
2181 struct v4l2_vp8_quantization_hdr
* v4l2_quant_hdr
) {
2182 v4l2_quant_hdr
->y_ac_qi
= vp8_quant_hdr
.y_ac_qi
;
2183 v4l2_quant_hdr
->y_dc_delta
= vp8_quant_hdr
.y_dc_delta
;
2184 v4l2_quant_hdr
->y2_dc_delta
= vp8_quant_hdr
.y2_dc_delta
;
2185 v4l2_quant_hdr
->y2_ac_delta
= vp8_quant_hdr
.y2_ac_delta
;
2186 v4l2_quant_hdr
->uv_dc_delta
= vp8_quant_hdr
.uv_dc_delta
;
2187 v4l2_quant_hdr
->uv_ac_delta
= vp8_quant_hdr
.uv_ac_delta
;
2190 static void FillV4L2EntropyHeader(
2191 const media::Vp8EntropyHeader
& vp8_entropy_hdr
,
2192 struct v4l2_vp8_entropy_hdr
* v4l2_entropy_hdr
) {
2193 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr
->coeff_probs
,
2194 vp8_entropy_hdr
.coeff_probs
);
2195 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr
->y_mode_probs
,
2196 vp8_entropy_hdr
.y_mode_probs
);
2197 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr
->uv_mode_probs
,
2198 vp8_entropy_hdr
.uv_mode_probs
);
2199 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr
->mv_probs
,
2200 vp8_entropy_hdr
.mv_probs
);
2203 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::SubmitDecode(
2204 const scoped_refptr
<VP8Picture
>& pic
,
2205 const media::Vp8FrameHeader
* frame_hdr
,
2206 const scoped_refptr
<VP8Picture
>& last_frame
,
2207 const scoped_refptr
<VP8Picture
>& golden_frame
,
2208 const scoped_refptr
<VP8Picture
>& alt_frame
) {
2209 struct v4l2_ctrl_vp8_frame_hdr v4l2_frame_hdr
;
2210 memset(&v4l2_frame_hdr
, 0, sizeof(v4l2_frame_hdr
));
2212 #define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a
2213 FHDR_TO_V4L2_FHDR(key_frame
);
2214 FHDR_TO_V4L2_FHDR(version
);
2215 FHDR_TO_V4L2_FHDR(width
);
2216 FHDR_TO_V4L2_FHDR(horizontal_scale
);
2217 FHDR_TO_V4L2_FHDR(height
);
2218 FHDR_TO_V4L2_FHDR(vertical_scale
);
2219 FHDR_TO_V4L2_FHDR(sign_bias_golden
);
2220 FHDR_TO_V4L2_FHDR(sign_bias_alternate
);
2221 FHDR_TO_V4L2_FHDR(prob_skip_false
);
2222 FHDR_TO_V4L2_FHDR(prob_intra
);
2223 FHDR_TO_V4L2_FHDR(prob_last
);
2224 FHDR_TO_V4L2_FHDR(prob_gf
);
2225 FHDR_TO_V4L2_FHDR(bool_dec_range
);
2226 FHDR_TO_V4L2_FHDR(bool_dec_value
);
2227 FHDR_TO_V4L2_FHDR(bool_dec_count
);
2228 #undef FHDR_TO_V4L2_FHDR
2230 #define SET_V4L2_FRM_HDR_FLAG_IF(cond, flag) \
2231 v4l2_frame_hdr.flags |= ((frame_hdr->cond) ? (flag) : 0)
2232 SET_V4L2_FRM_HDR_FLAG_IF(is_experimental
,
2233 V4L2_VP8_FRAME_HDR_FLAG_EXPERIMENTAL
);
2234 SET_V4L2_FRM_HDR_FLAG_IF(show_frame
, V4L2_VP8_FRAME_HDR_FLAG_SHOW_FRAME
);
2235 SET_V4L2_FRM_HDR_FLAG_IF(mb_no_skip_coeff
,
2236 V4L2_VP8_FRAME_HDR_FLAG_MB_NO_SKIP_COEFF
);
2237 #undef SET_V4L2_FRM_HDR_FLAG_IF
2239 FillV4L2SegmentationHeader(frame_hdr
->segmentation_hdr
,
2240 &v4l2_frame_hdr
.sgmnt_hdr
);
2242 FillV4L2LoopfilterHeader(frame_hdr
->loopfilter_hdr
, &v4l2_frame_hdr
.lf_hdr
);
2244 FillV4L2QuantizationHeader(frame_hdr
->quantization_hdr
,
2245 &v4l2_frame_hdr
.quant_hdr
);
2247 FillV4L2EntropyHeader(frame_hdr
->entropy_hdr
, &v4l2_frame_hdr
.entropy_hdr
);
2249 v4l2_frame_hdr
.first_part_size
=
2250 base::checked_cast
<__u32
>(frame_hdr
->first_part_size
);
2251 v4l2_frame_hdr
.first_part_offset
=
2252 base::checked_cast
<__u32
>(frame_hdr
->first_part_offset
);
2253 v4l2_frame_hdr
.macroblock_bit_offset
=
2254 base::checked_cast
<__u32
>(frame_hdr
->macroblock_bit_offset
);
2255 v4l2_frame_hdr
.num_dct_parts
= frame_hdr
->num_of_dct_partitions
;
2257 static_assert(arraysize(v4l2_frame_hdr
.dct_part_sizes
) ==
2258 arraysize(frame_hdr
->dct_partition_sizes
),
2259 "DCT partition size arrays must have equal number of elements");
2260 for (size_t i
= 0; i
< frame_hdr
->num_of_dct_partitions
&&
2261 i
< arraysize(v4l2_frame_hdr
.dct_part_sizes
); ++i
)
2262 v4l2_frame_hdr
.dct_part_sizes
[i
] = frame_hdr
->dct_partition_sizes
[i
];
2264 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2265 VP8PictureToV4L2DecodeSurface(pic
);
2266 std::vector
<scoped_refptr
<V4L2DecodeSurface
>> ref_surfaces
;
2269 scoped_refptr
<V4L2DecodeSurface
> last_frame_surface
=
2270 VP8PictureToV4L2DecodeSurface(last_frame
);
2271 v4l2_frame_hdr
.last_frame
= last_frame_surface
->output_record();
2272 ref_surfaces
.push_back(last_frame_surface
);
2274 v4l2_frame_hdr
.last_frame
= VIDEO_MAX_FRAME
;
2278 scoped_refptr
<V4L2DecodeSurface
> golden_frame_surface
=
2279 VP8PictureToV4L2DecodeSurface(golden_frame
);
2280 v4l2_frame_hdr
.golden_frame
= golden_frame_surface
->output_record();
2281 ref_surfaces
.push_back(golden_frame_surface
);
2283 v4l2_frame_hdr
.golden_frame
= VIDEO_MAX_FRAME
;
2287 scoped_refptr
<V4L2DecodeSurface
> alt_frame_surface
=
2288 VP8PictureToV4L2DecodeSurface(alt_frame
);
2289 v4l2_frame_hdr
.alt_frame
= alt_frame_surface
->output_record();
2290 ref_surfaces
.push_back(alt_frame_surface
);
2292 v4l2_frame_hdr
.alt_frame
= VIDEO_MAX_FRAME
;
2295 struct v4l2_ext_control ctrl
;
2296 memset(&ctrl
, 0, sizeof(ctrl
));
2297 ctrl
.id
= V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR
;
2298 ctrl
.size
= sizeof(v4l2_frame_hdr
);
2299 ctrl
.p_vp8_frame_hdr
= &v4l2_frame_hdr
;
2301 struct v4l2_ext_controls ext_ctrls
;
2302 memset(&ext_ctrls
, 0, sizeof(ext_ctrls
));
2303 ext_ctrls
.count
= 1;
2304 ext_ctrls
.controls
= &ctrl
;
2305 ext_ctrls
.config_store
= dec_surface
->config_store();
2307 if (!v4l2_dec_
->SubmitExtControls(&ext_ctrls
))
2310 dec_surface
->SetReferenceSurfaces(ref_surfaces
);
2312 if (!v4l2_dec_
->SubmitSlice(dec_surface
->input_record(), frame_hdr
->data
,
2313 frame_hdr
->frame_size
))
2316 v4l2_dec_
->DecodeSurface(dec_surface
);
2320 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::OutputPicture(
2321 const scoped_refptr
<VP8Picture
>& pic
) {
2322 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2323 VP8PictureToV4L2DecodeSurface(pic
);
2325 v4l2_dec_
->SurfaceReady(dec_surface
);
2329 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
2330 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::
2331 VP8PictureToV4L2DecodeSurface(const scoped_refptr
<VP8Picture
>& pic
) {
2332 V4L2VP8Picture
* v4l2_pic
= pic
->AsV4L2VP8Picture();
2334 return v4l2_pic
->dec_surface();
2337 void V4L2SliceVideoDecodeAccelerator::DecodeSurface(
2338 const scoped_refptr
<V4L2DecodeSurface
>& dec_surface
) {
2339 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
2341 DVLOGF(3) << "Submitting decode for surface: " << dec_surface
->ToString();
2342 Enqueue(dec_surface
);
2345 void V4L2SliceVideoDecodeAccelerator::SurfaceReady(
2346 const scoped_refptr
<V4L2DecodeSurface
>& dec_surface
) {
2348 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
2350 decoder_display_queue_
.push(dec_surface
);
2351 TryOutputSurfaces();
2354 void V4L2SliceVideoDecodeAccelerator::TryOutputSurfaces() {
2355 while (!decoder_display_queue_
.empty()) {
2356 scoped_refptr
<V4L2DecodeSurface
> dec_surface
=
2357 decoder_display_queue_
.front();
2359 if (!dec_surface
->decoded())
2362 decoder_display_queue_
.pop();
2363 OutputSurface(dec_surface
);
2367 void V4L2SliceVideoDecodeAccelerator::OutputSurface(
2368 const scoped_refptr
<V4L2DecodeSurface
>& dec_surface
) {
2369 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
2371 OutputRecord
& output_record
=
2372 output_buffer_map_
[dec_surface
->output_record()];
2375 surfaces_at_display_
.insert(std::make_pair(output_record
.picture_id
,
2376 dec_surface
)).second
;
2379 DCHECK(!output_record
.at_client
);
2380 DCHECK(!output_record
.at_device
);
2381 DCHECK_NE(output_record
.egl_image
, EGL_NO_IMAGE_KHR
);
2382 DCHECK_NE(output_record
.picture_id
, -1);
2383 output_record
.at_client
= true;
2385 media::Picture
picture(output_record
.picture_id
, dec_surface
->bitstream_id(),
2386 gfx::Rect(visible_size_
), false);
2387 DVLOGF(3) << dec_surface
->ToString()
2388 << ", bitstream_id: " << picture
.bitstream_buffer_id()
2389 << ", picture_id: " << picture
.picture_buffer_id();
2390 pending_picture_ready_
.push(PictureRecord(output_record
.cleared
, picture
));
2392 output_record
.cleared
= true;
2395 scoped_refptr
<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
>
2396 V4L2SliceVideoDecodeAccelerator::CreateSurface() {
2397 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
2398 DCHECK_EQ(state_
, kDecoding
);
2400 if (free_input_buffers_
.empty() || free_output_buffers_
.empty())
2403 int input
= free_input_buffers_
.front();
2404 free_input_buffers_
.pop_front();
2405 int output
= free_output_buffers_
.front();
2406 free_output_buffers_
.pop_front();
2408 InputRecord
& input_record
= input_buffer_map_
[input
];
2409 DCHECK_EQ(input_record
.bytes_used
, 0u);
2410 DCHECK_EQ(input_record
.input_id
, -1);
2411 DCHECK(decoder_current_bitstream_buffer_
!= nullptr);
2412 input_record
.input_id
= decoder_current_bitstream_buffer_
->input_id
;
2414 scoped_refptr
<V4L2DecodeSurface
> dec_surface
= new V4L2DecodeSurface(
2415 decoder_current_bitstream_buffer_
->input_id
, input
, output
,
2416 base::Bind(&V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer
,
2417 base::Unretained(this)));
2419 DVLOGF(4) << "Created surface " << input
<< " -> " << output
;
2423 void V4L2SliceVideoDecodeAccelerator::SendPictureReady() {
2425 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
2426 bool resetting_or_flushing
= (decoder_resetting_
|| decoder_flushing_
);
2427 while (!pending_picture_ready_
.empty()) {
2428 bool cleared
= pending_picture_ready_
.front().cleared
;
2429 const media::Picture
& picture
= pending_picture_ready_
.front().picture
;
2430 if (cleared
&& picture_clearing_count_
== 0) {
2431 DVLOGF(4) << "Posting picture ready to IO for: "
2432 << picture
.picture_buffer_id();
2433 // This picture is cleared. Post it to IO thread to reduce latency. This
2434 // should be the case after all pictures are cleared at the beginning.
2435 io_message_loop_proxy_
->PostTask(
2436 FROM_HERE
, base::Bind(&Client::PictureReady
, io_client_
, picture
));
2437 pending_picture_ready_
.pop();
2438 } else if (!cleared
|| resetting_or_flushing
) {
2439 DVLOGF(3) << "cleared=" << pending_picture_ready_
.front().cleared
2440 << ", decoder_resetting_=" << decoder_resetting_
2441 << ", decoder_flushing_=" << decoder_flushing_
2442 << ", picture_clearing_count_=" << picture_clearing_count_
;
2443 DVLOGF(4) << "Posting picture ready to GPU for: "
2444 << picture
.picture_buffer_id();
2445 // If the picture is not cleared, post it to the child thread because it
2446 // has to be cleared in the child thread. A picture only needs to be
2447 // cleared once. If the decoder is resetting or flushing, send all
2448 // pictures to ensure PictureReady arrive before reset or flush done.
2449 child_message_loop_proxy_
->PostTaskAndReply(
2450 FROM_HERE
, base::Bind(&Client::PictureReady
, client_
, picture
),
2451 // Unretained is safe. If Client::PictureReady gets to run, |this| is
2452 // alive. Destroy() will wait the decode thread to finish.
2453 base::Bind(&V4L2SliceVideoDecodeAccelerator::PictureCleared
,
2454 base::Unretained(this)));
2455 picture_clearing_count_
++;
2456 pending_picture_ready_
.pop();
2458 // This picture is cleared. But some pictures are about to be cleared on
2459 // the child thread. To preserve the order, do not send this until those
2460 // pictures are cleared.
2466 void V4L2SliceVideoDecodeAccelerator::PictureCleared() {
2467 DVLOGF(3) << "clearing count=" << picture_clearing_count_
;
2468 DCHECK(decoder_thread_proxy_
->BelongsToCurrentThread());
2469 DCHECK_GT(picture_clearing_count_
, 0);
2470 picture_clearing_count_
--;
2474 bool V4L2SliceVideoDecodeAccelerator::CanDecodeOnIOThread() {
2478 } // namespace content