GPU workaround to simulate Out of Memory errors with large textures
[chromium-blink-merge.git] / content / common / gpu / media / gpu_video_encode_accelerator.cc
blob20fe9c35f05c56c25b73f062253512600ba4704f
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
7 #include "base/callback.h"
8 #include "base/command_line.h"
9 #include "base/logging.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/message_loop/message_loop_proxy.h"
12 #include "base/numerics/safe_math.h"
13 #include "base/sys_info.h"
14 #include "build/build_config.h"
15 #include "content/common/gpu/gpu_channel.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/public/common/content_switches.h"
18 #include "ipc/ipc_message_macros.h"
19 #include "media/base/limits.h"
20 #include "media/base/video_frame.h"
22 #if defined(OS_CHROMEOS)
23 #if defined(USE_V4L2_CODEC)
24 #include "content/common/gpu/media/v4l2_video_encode_accelerator.h"
25 #endif
26 #if defined(ARCH_CPU_X86_FAMILY)
27 #include "content/common/gpu/media/vaapi_video_encode_accelerator.h"
28 #endif
29 #elif defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
30 #include "content/common/gpu/media/android_video_encode_accelerator.h"
31 #endif
33 namespace content {
35 static bool MakeDecoderContextCurrent(
36 const base::WeakPtr<GpuCommandBufferStub> stub) {
37 if (!stub) {
38 DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
39 return false;
42 if (!stub->decoder()->MakeCurrent()) {
43 DLOG(ERROR) << "Failed to MakeCurrent()";
44 return false;
47 return true;
50 GpuVideoEncodeAccelerator::GpuVideoEncodeAccelerator(int32 host_route_id,
51 GpuCommandBufferStub* stub)
52 : host_route_id_(host_route_id),
53 stub_(stub),
54 input_format_(media::VideoFrame::UNKNOWN),
55 output_buffer_size_(0),
56 weak_this_factory_(this) {
57 stub_->AddDestructionObserver(this);
58 make_context_current_ =
59 base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
62 GpuVideoEncodeAccelerator::~GpuVideoEncodeAccelerator() {
63 // This class can only be self-deleted from OnWillDestroyStub(), which means
64 // the VEA has already been destroyed in there.
65 DCHECK(!encoder_);
68 void GpuVideoEncodeAccelerator::Initialize(
69 media::VideoFrame::Format input_format,
70 const gfx::Size& input_visible_size,
71 media::VideoCodecProfile output_profile,
72 uint32 initial_bitrate,
73 IPC::Message* init_done_msg) {
74 DVLOG(2) << "GpuVideoEncodeAccelerator::Initialize(): "
75 "input_format=" << input_format
76 << ", input_visible_size=" << input_visible_size.ToString()
77 << ", output_profile=" << output_profile
78 << ", initial_bitrate=" << initial_bitrate;
79 DCHECK(!encoder_);
81 if (!stub_->channel()->AddRoute(host_route_id_, this)) {
82 DLOG(ERROR) << "GpuVideoEncodeAccelerator::Initialize(): "
83 "failed to add route";
84 SendCreateEncoderReply(init_done_msg, false);
85 return;
88 if (input_visible_size.width() > media::limits::kMaxDimension ||
89 input_visible_size.height() > media::limits::kMaxDimension ||
90 input_visible_size.GetArea() > media::limits::kMaxCanvas) {
91 DLOG(ERROR) << "GpuVideoEncodeAccelerator::Initialize(): "
92 "input_visible_size " << input_visible_size.ToString()
93 << " too large";
94 SendCreateEncoderReply(init_done_msg, false);
95 return;
98 std::vector<GpuVideoEncodeAccelerator::CreateVEAFp>
99 create_vea_fps = CreateVEAFps();
100 // Try all possible encoders and use the first successful encoder.
101 for (size_t i = 0; i < create_vea_fps.size(); ++i) {
102 encoder_ = (*create_vea_fps[i])();
103 if (encoder_ && encoder_->Initialize(input_format,
104 input_visible_size,
105 output_profile,
106 initial_bitrate,
107 this)) {
108 input_format_ = input_format;
109 input_visible_size_ = input_visible_size;
110 SendCreateEncoderReply(init_done_msg, true);
111 return;
114 encoder_.reset();
115 DLOG(ERROR)
116 << "GpuVideoEncodeAccelerator::Initialize(): VEA initialization failed";
117 SendCreateEncoderReply(init_done_msg, false);
120 bool GpuVideoEncodeAccelerator::OnMessageReceived(const IPC::Message& message) {
121 bool handled = true;
122 IPC_BEGIN_MESSAGE_MAP(GpuVideoEncodeAccelerator, message)
123 IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderMsg_Encode, OnEncode)
124 IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderMsg_UseOutputBitstreamBuffer,
125 OnUseOutputBitstreamBuffer)
126 IPC_MESSAGE_HANDLER(
127 AcceleratedVideoEncoderMsg_RequestEncodingParametersChange,
128 OnRequestEncodingParametersChange)
129 IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderMsg_Destroy, OnDestroy)
130 IPC_MESSAGE_UNHANDLED(handled = false)
131 IPC_END_MESSAGE_MAP()
132 return handled;
135 void GpuVideoEncodeAccelerator::RequireBitstreamBuffers(
136 unsigned int input_count,
137 const gfx::Size& input_coded_size,
138 size_t output_buffer_size) {
139 Send(new AcceleratedVideoEncoderHostMsg_RequireBitstreamBuffers(
140 host_route_id_, input_count, input_coded_size, output_buffer_size));
141 input_coded_size_ = input_coded_size;
142 output_buffer_size_ = output_buffer_size;
145 void GpuVideoEncodeAccelerator::BitstreamBufferReady(int32 bitstream_buffer_id,
146 size_t payload_size,
147 bool key_frame) {
148 Send(new AcceleratedVideoEncoderHostMsg_BitstreamBufferReady(
149 host_route_id_, bitstream_buffer_id, payload_size, key_frame));
152 void GpuVideoEncodeAccelerator::NotifyError(
153 media::VideoEncodeAccelerator::Error error) {
154 Send(new AcceleratedVideoEncoderHostMsg_NotifyError(host_route_id_, error));
157 void GpuVideoEncodeAccelerator::OnWillDestroyStub() {
158 DCHECK(stub_);
159 stub_->channel()->RemoveRoute(host_route_id_);
160 stub_->RemoveDestructionObserver(this);
161 encoder_.reset();
162 delete this;
165 // static
166 std::vector<gpu::VideoEncodeAcceleratorSupportedProfile>
167 GpuVideoEncodeAccelerator::GetSupportedProfiles() {
168 std::vector<media::VideoEncodeAccelerator::SupportedProfile> profiles;
169 std::vector<GpuVideoEncodeAccelerator::CreateVEAFp>
170 create_vea_fps = CreateVEAFps();
172 for (size_t i = 0; i < create_vea_fps.size(); ++i) {
173 scoped_ptr<media::VideoEncodeAccelerator>
174 encoder = (*create_vea_fps[i])();
175 if (!encoder)
176 continue;
177 std::vector<media::VideoEncodeAccelerator::SupportedProfile>
178 vea_profiles = encoder->GetSupportedProfiles();
179 profiles.insert(profiles.end(), vea_profiles.begin(), vea_profiles.end());
181 return ConvertMediaToGpuProfiles(profiles);
184 // static
185 std::vector<gpu::VideoEncodeAcceleratorSupportedProfile>
186 GpuVideoEncodeAccelerator::ConvertMediaToGpuProfiles(const std::vector<
187 media::VideoEncodeAccelerator::SupportedProfile>& media_profiles) {
188 std::vector<gpu::VideoEncodeAcceleratorSupportedProfile> profiles;
189 for (size_t i = 0; i < media_profiles.size(); i++) {
190 gpu::VideoEncodeAcceleratorSupportedProfile profile;
191 profile.profile =
192 static_cast<gpu::VideoCodecProfile>(media_profiles[i].profile);
193 profile.max_resolution = media_profiles[i].max_resolution;
194 profile.max_framerate_numerator = media_profiles[i].max_framerate_numerator;
195 profile.max_framerate_denominator =
196 media_profiles[i].max_framerate_denominator;
197 profiles.push_back(profile);
199 return profiles;
202 // static
203 std::vector<GpuVideoEncodeAccelerator::CreateVEAFp>
204 GpuVideoEncodeAccelerator::CreateVEAFps() {
205 std::vector<GpuVideoEncodeAccelerator::CreateVEAFp> create_vea_fps;
206 create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateV4L2VEA);
207 create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateVaapiVEA);
208 create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateAndroidVEA);
209 return create_vea_fps;
212 // static
213 scoped_ptr<media::VideoEncodeAccelerator>
214 GpuVideoEncodeAccelerator::CreateV4L2VEA() {
215 scoped_ptr<media::VideoEncodeAccelerator> encoder;
216 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
217 scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kEncoder);
218 if (device)
219 encoder.reset(new V4L2VideoEncodeAccelerator(device));
220 #endif
221 return encoder.Pass();
224 // static
225 scoped_ptr<media::VideoEncodeAccelerator>
226 GpuVideoEncodeAccelerator::CreateVaapiVEA() {
227 scoped_ptr<media::VideoEncodeAccelerator> encoder;
228 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
229 const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
230 if (!cmd_line->HasSwitch(switches::kDisableVaapiAcceleratedVideoEncode))
231 encoder.reset(new VaapiVideoEncodeAccelerator());
232 #endif
233 return encoder.Pass();
236 // static
237 scoped_ptr<media::VideoEncodeAccelerator>
238 GpuVideoEncodeAccelerator::CreateAndroidVEA() {
239 scoped_ptr<media::VideoEncodeAccelerator> encoder;
240 #if defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
241 encoder.reset(new AndroidVideoEncodeAccelerator());
242 #endif
243 return encoder.Pass();
246 void GpuVideoEncodeAccelerator::OnEncode(int32 frame_id,
247 base::SharedMemoryHandle buffer_handle,
248 uint32 buffer_offset,
249 uint32 buffer_size,
250 bool force_keyframe) {
251 DVLOG(3) << "GpuVideoEncodeAccelerator::OnEncode(): frame_id=" << frame_id
252 << ", buffer_size=" << buffer_size
253 << ", force_keyframe=" << force_keyframe;
254 if (!encoder_)
255 return;
256 if (frame_id < 0) {
257 DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): invalid frame_id="
258 << frame_id;
259 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
260 return;
263 uint32 aligned_offset =
264 buffer_offset % base::SysInfo::VMAllocationGranularity();
265 base::CheckedNumeric<off_t> map_offset = buffer_offset;
266 map_offset -= aligned_offset;
267 base::CheckedNumeric<size_t> map_size = buffer_size;
268 map_size += aligned_offset;
270 if (!map_offset.IsValid() || !map_size.IsValid()) {
271 DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode():"
272 << " invalid (buffer_offset,buffer_size)";
273 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
274 return;
277 scoped_ptr<base::SharedMemory> shm(
278 new base::SharedMemory(buffer_handle, true));
279 if (!shm->MapAt(map_offset.ValueOrDie(), map_size.ValueOrDie())) {
280 DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): "
281 "could not map frame_id=" << frame_id;
282 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
283 return;
286 uint8* shm_memory = reinterpret_cast<uint8*>(shm->memory()) + aligned_offset;
287 scoped_refptr<media::VideoFrame> frame =
288 media::VideoFrame::WrapExternalPackedMemory(
289 input_format_,
290 input_coded_size_,
291 gfx::Rect(input_visible_size_),
292 input_visible_size_,
293 shm_memory,
294 buffer_size,
295 buffer_handle,
296 buffer_offset,
297 base::TimeDelta(),
298 // It's turtles all the way down...
299 base::Bind(base::IgnoreResult(&base::MessageLoopProxy::PostTask),
300 base::MessageLoopProxy::current(),
301 FROM_HERE,
302 base::Bind(&GpuVideoEncodeAccelerator::EncodeFrameFinished,
303 weak_this_factory_.GetWeakPtr(),
304 frame_id,
305 base::Passed(&shm))));
307 if (!frame.get()) {
308 DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): "
309 "could not create VideoFrame for frame_id=" << frame_id;
310 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
311 return;
314 encoder_->Encode(frame, force_keyframe);
317 void GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(
318 int32 buffer_id,
319 base::SharedMemoryHandle buffer_handle,
320 uint32 buffer_size) {
321 DVLOG(3) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): "
322 "buffer_id=" << buffer_id
323 << ", buffer_size=" << buffer_size;
324 if (!encoder_)
325 return;
326 if (buffer_id < 0) {
327 DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): "
328 "invalid buffer_id=" << buffer_id;
329 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
330 return;
332 if (buffer_size < output_buffer_size_) {
333 DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): "
334 "buffer too small for buffer_id=" << buffer_id;
335 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
336 return;
338 encoder_->UseOutputBitstreamBuffer(
339 media::BitstreamBuffer(buffer_id, buffer_handle, buffer_size));
342 void GpuVideoEncodeAccelerator::OnDestroy() {
343 DVLOG(2) << "GpuVideoEncodeAccelerator::OnDestroy()";
344 OnWillDestroyStub();
347 void GpuVideoEncodeAccelerator::OnRequestEncodingParametersChange(
348 uint32 bitrate,
349 uint32 framerate) {
350 DVLOG(2) << "GpuVideoEncodeAccelerator::OnRequestEncodingParametersChange(): "
351 "bitrate=" << bitrate
352 << ", framerate=" << framerate;
353 if (!encoder_)
354 return;
355 encoder_->RequestEncodingParametersChange(bitrate, framerate);
358 void GpuVideoEncodeAccelerator::EncodeFrameFinished(
359 int32 frame_id,
360 scoped_ptr<base::SharedMemory> shm) {
361 Send(new AcceleratedVideoEncoderHostMsg_NotifyInputDone(host_route_id_,
362 frame_id));
363 // Just let shm fall out of scope.
366 void GpuVideoEncodeAccelerator::Send(IPC::Message* message) {
367 stub_->channel()->Send(message);
370 void GpuVideoEncodeAccelerator::SendCreateEncoderReply(IPC::Message* message,
371 bool succeeded) {
372 GpuCommandBufferMsg_CreateVideoEncoder::WriteReplyParams(message, succeeded);
373 Send(message);
376 } // namespace content