1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
11 #include <linux/kernel.h>
12 #include <linux/clk.h>
13 #include <linux/cpufreq.h>
14 #include <linux/module.h>
15 #include <linux/component.h>
16 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/list.h>
21 #include <linux/iommu.h>
22 #include <linux/types.h>
23 #include <linux/of_graph.h>
24 #include <linux/of_device.h>
25 #include <linux/sizes.h>
26 #include <linux/kthread.h>
28 #include <drm/drm_atomic.h>
29 #include <drm/drm_atomic_helper.h>
30 #include <drm/drm_plane_helper.h>
31 #include <drm/drm_probe_helper.h>
32 #include <drm/drm_fb_helper.h>
33 #include <drm/msm_drm.h>
34 #include <drm/drm_gem.h>
41 struct msm_perf_state
;
42 struct msm_gem_submit
;
43 struct msm_fence_context
;
44 struct msm_gem_address_space
;
49 #define MAX_ENCODERS 8
51 #define MAX_CONNECTORS 8
53 #define FRAC_16_16(mult, div) (((mult) << 16) / (div))
55 struct msm_file_private
{
57 struct list_head submitqueues
;
59 struct msm_gem_address_space
*aspace
;
62 enum msm_mdp_plane_property
{
65 PLANE_PROP_PREMULTIPLIED
,
69 #define MSM_GPU_MAX_RINGS 4
70 #define MAX_H_TILES_PER_DISPLAY 2
73 * enum msm_display_caps - features/capabilities supported by displays
74 * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
75 * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
76 * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
77 * @MSM_DISPLAY_CAP_EDID: EDID supported
79 enum msm_display_caps
{
80 MSM_DISPLAY_CAP_VID_MODE
= BIT(0),
81 MSM_DISPLAY_CAP_CMD_MODE
= BIT(1),
82 MSM_DISPLAY_CAP_HOT_PLUG
= BIT(2),
83 MSM_DISPLAY_CAP_EDID
= BIT(3),
87 * enum msm_event_wait - type of HW events to wait for
88 * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
89 * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
90 * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
93 MSM_ENC_COMMIT_DONE
= 0,
99 * struct msm_display_topology - defines a display topology pipeline
100 * @num_lm: number of layer mixers used
101 * @num_enc: number of compression encoder blocks used
102 * @num_intf: number of interfaces the panel is mounted on
104 struct msm_display_topology
{
111 * struct msm_display_info - defines display properties
112 * @intf_type: DRM_MODE_ENCODER_ type
113 * @capabilities: Bitmask of display flags
114 * @num_of_h_tiles: Number of horizontal tiles in case of split interface
115 * @h_tile_instance: Controller instance used per tile. Number of elements is
116 * based on num_of_h_tiles
117 * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
118 * used instead of panel TE in cmd mode panels
120 struct msm_display_info
{
122 uint32_t capabilities
;
123 uint32_t num_of_h_tiles
;
124 uint32_t h_tile_instance
[MAX_H_TILES_PER_DISPLAY
];
125 bool is_te_using_watchdog_timer
;
128 /* Commit/Event thread specific structure */
129 struct msm_drm_thread
{
130 struct drm_device
*dev
;
131 struct task_struct
*thread
;
132 unsigned int crtc_id
;
133 struct kthread_worker worker
;
136 struct msm_drm_private
{
138 struct drm_device
*dev
;
142 /* subordinate devices, if present: */
143 struct platform_device
*gpu_pdev
;
145 /* top level MDSS wrapper device (for MDP5/DPU only) */
146 struct msm_mdss
*mdss
;
148 /* possibly this should be in the kms component, but it is
149 * shared by both mdp4 and mdp5..
153 /* eDP is for mdp5 only, but kms has not been created
154 * when edp_bind() and edp_init() are called. Here is the only
155 * place to keep the edp instance.
159 /* DSI is shared by mdp4 and mdp5 */
160 struct msm_dsi
*dsi
[2];
162 /* when we have more than one 'msm_gpu' these need to be an array: */
164 struct msm_file_private
*lastctx
;
165 /* gpu is only set on open(), but we need this info earlier */
168 struct drm_fb_helper
*fbdev
;
170 struct msm_rd_state
*rd
; /* debugfs to dump all submits */
171 struct msm_rd_state
*hangrd
; /* debugfs to dump hanging submits */
172 struct msm_perf_state
*perf
;
174 /* list of GEM objects: */
175 struct list_head inactive_list
;
177 /* worker for delayed free of objects: */
178 struct work_struct free_work
;
179 struct llist_head free_list
;
181 struct workqueue_struct
*wq
;
183 unsigned int num_planes
;
184 struct drm_plane
*planes
[MAX_PLANES
];
186 unsigned int num_crtcs
;
187 struct drm_crtc
*crtcs
[MAX_CRTCS
];
189 struct msm_drm_thread event_thread
[MAX_CRTCS
];
191 unsigned int num_encoders
;
192 struct drm_encoder
*encoders
[MAX_ENCODERS
];
194 unsigned int num_bridges
;
195 struct drm_bridge
*bridges
[MAX_BRIDGES
];
197 unsigned int num_connectors
;
198 struct drm_connector
*connectors
[MAX_CONNECTORS
];
201 struct drm_property
*plane_property
[PLANE_PROP_MAX_NUM
];
203 /* VRAM carveout, used when no IOMMU: */
207 /* NOTE: mm managed at the page level, size is in # of pages
208 * and position mm_node->start is in # of pages:
211 spinlock_t lock
; /* Protects drm_mm node allocation/removal */
214 struct notifier_block vmap_notifier
;
215 struct shrinker shrinker
;
217 struct drm_atomic_state
*pm_state
;
221 uint32_t pixel_format
;
224 struct msm_pending_timer
;
226 int msm_atomic_prepare_fb(struct drm_plane
*plane
,
227 struct drm_plane_state
*new_state
);
228 void msm_atomic_init_pending_timer(struct msm_pending_timer
*timer
,
229 struct msm_kms
*kms
, int crtc_idx
);
230 void msm_atomic_commit_tail(struct drm_atomic_state
*state
);
231 struct drm_atomic_state
*msm_atomic_state_alloc(struct drm_device
*dev
);
232 void msm_atomic_state_clear(struct drm_atomic_state
*state
);
233 void msm_atomic_state_free(struct drm_atomic_state
*state
);
235 int msm_gem_init_vma(struct msm_gem_address_space
*aspace
,
236 struct msm_gem_vma
*vma
, int npages
);
237 void msm_gem_purge_vma(struct msm_gem_address_space
*aspace
,
238 struct msm_gem_vma
*vma
);
239 void msm_gem_unmap_vma(struct msm_gem_address_space
*aspace
,
240 struct msm_gem_vma
*vma
);
241 int msm_gem_map_vma(struct msm_gem_address_space
*aspace
,
242 struct msm_gem_vma
*vma
, int prot
,
243 struct sg_table
*sgt
, int npages
);
244 void msm_gem_close_vma(struct msm_gem_address_space
*aspace
,
245 struct msm_gem_vma
*vma
);
247 void msm_gem_address_space_put(struct msm_gem_address_space
*aspace
);
249 struct msm_gem_address_space
*
250 msm_gem_address_space_create(struct device
*dev
, struct iommu_domain
*domain
,
253 struct msm_gem_address_space
*
254 msm_gem_address_space_create_a2xx(struct device
*dev
, struct msm_gpu
*gpu
,
255 const char *name
, uint64_t va_start
, uint64_t va_end
);
257 int msm_register_mmu(struct drm_device
*dev
, struct msm_mmu
*mmu
);
258 void msm_unregister_mmu(struct drm_device
*dev
, struct msm_mmu
*mmu
);
260 bool msm_use_mmu(struct drm_device
*dev
);
262 void msm_gem_submit_free(struct msm_gem_submit
*submit
);
263 int msm_ioctl_gem_submit(struct drm_device
*dev
, void *data
,
264 struct drm_file
*file
);
266 void msm_gem_shrinker_init(struct drm_device
*dev
);
267 void msm_gem_shrinker_cleanup(struct drm_device
*dev
);
269 int msm_gem_mmap_obj(struct drm_gem_object
*obj
,
270 struct vm_area_struct
*vma
);
271 int msm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
);
272 vm_fault_t
msm_gem_fault(struct vm_fault
*vmf
);
273 uint64_t msm_gem_mmap_offset(struct drm_gem_object
*obj
);
274 int msm_gem_get_iova(struct drm_gem_object
*obj
,
275 struct msm_gem_address_space
*aspace
, uint64_t *iova
);
276 int msm_gem_get_and_pin_iova(struct drm_gem_object
*obj
,
277 struct msm_gem_address_space
*aspace
, uint64_t *iova
);
278 uint64_t msm_gem_iova(struct drm_gem_object
*obj
,
279 struct msm_gem_address_space
*aspace
);
280 void msm_gem_unpin_iova(struct drm_gem_object
*obj
,
281 struct msm_gem_address_space
*aspace
);
282 struct page
**msm_gem_get_pages(struct drm_gem_object
*obj
);
283 void msm_gem_put_pages(struct drm_gem_object
*obj
);
284 int msm_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
285 struct drm_mode_create_dumb
*args
);
286 int msm_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
287 uint32_t handle
, uint64_t *offset
);
288 struct sg_table
*msm_gem_prime_get_sg_table(struct drm_gem_object
*obj
);
289 void *msm_gem_prime_vmap(struct drm_gem_object
*obj
);
290 void msm_gem_prime_vunmap(struct drm_gem_object
*obj
, void *vaddr
);
291 int msm_gem_prime_mmap(struct drm_gem_object
*obj
, struct vm_area_struct
*vma
);
292 struct drm_gem_object
*msm_gem_prime_import_sg_table(struct drm_device
*dev
,
293 struct dma_buf_attachment
*attach
, struct sg_table
*sg
);
294 int msm_gem_prime_pin(struct drm_gem_object
*obj
);
295 void msm_gem_prime_unpin(struct drm_gem_object
*obj
);
296 void *msm_gem_get_vaddr(struct drm_gem_object
*obj
);
297 void *msm_gem_get_vaddr_active(struct drm_gem_object
*obj
);
298 void msm_gem_put_vaddr(struct drm_gem_object
*obj
);
299 int msm_gem_madvise(struct drm_gem_object
*obj
, unsigned madv
);
300 int msm_gem_sync_object(struct drm_gem_object
*obj
,
301 struct msm_fence_context
*fctx
, bool exclusive
);
302 void msm_gem_move_to_active(struct drm_gem_object
*obj
,
303 struct msm_gpu
*gpu
, bool exclusive
, struct dma_fence
*fence
);
304 void msm_gem_move_to_inactive(struct drm_gem_object
*obj
);
305 int msm_gem_cpu_prep(struct drm_gem_object
*obj
, uint32_t op
, ktime_t
*timeout
);
306 int msm_gem_cpu_fini(struct drm_gem_object
*obj
);
307 void msm_gem_free_object(struct drm_gem_object
*obj
);
308 int msm_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
309 uint32_t size
, uint32_t flags
, uint32_t *handle
, char *name
);
310 struct drm_gem_object
*msm_gem_new(struct drm_device
*dev
,
311 uint32_t size
, uint32_t flags
);
312 struct drm_gem_object
*msm_gem_new_locked(struct drm_device
*dev
,
313 uint32_t size
, uint32_t flags
);
314 void *msm_gem_kernel_new(struct drm_device
*dev
, uint32_t size
,
315 uint32_t flags
, struct msm_gem_address_space
*aspace
,
316 struct drm_gem_object
**bo
, uint64_t *iova
);
317 void *msm_gem_kernel_new_locked(struct drm_device
*dev
, uint32_t size
,
318 uint32_t flags
, struct msm_gem_address_space
*aspace
,
319 struct drm_gem_object
**bo
, uint64_t *iova
);
320 void msm_gem_kernel_put(struct drm_gem_object
*bo
,
321 struct msm_gem_address_space
*aspace
, bool locked
);
322 struct drm_gem_object
*msm_gem_import(struct drm_device
*dev
,
323 struct dma_buf
*dmabuf
, struct sg_table
*sgt
);
324 void msm_gem_free_work(struct work_struct
*work
);
327 void msm_gem_object_set_name(struct drm_gem_object
*bo
, const char *fmt
, ...);
329 int msm_framebuffer_prepare(struct drm_framebuffer
*fb
,
330 struct msm_gem_address_space
*aspace
);
331 void msm_framebuffer_cleanup(struct drm_framebuffer
*fb
,
332 struct msm_gem_address_space
*aspace
);
333 uint32_t msm_framebuffer_iova(struct drm_framebuffer
*fb
,
334 struct msm_gem_address_space
*aspace
, int plane
);
335 struct drm_gem_object
*msm_framebuffer_bo(struct drm_framebuffer
*fb
, int plane
);
336 const struct msm_format
*msm_framebuffer_format(struct drm_framebuffer
*fb
);
337 struct drm_framebuffer
*msm_framebuffer_create(struct drm_device
*dev
,
338 struct drm_file
*file
, const struct drm_mode_fb_cmd2
*mode_cmd
);
339 struct drm_framebuffer
* msm_alloc_stolen_fb(struct drm_device
*dev
,
340 int w
, int h
, int p
, uint32_t format
);
342 struct drm_fb_helper
*msm_fbdev_init(struct drm_device
*dev
);
343 void msm_fbdev_free(struct drm_device
*dev
);
346 int msm_hdmi_modeset_init(struct hdmi
*hdmi
, struct drm_device
*dev
,
347 struct drm_encoder
*encoder
);
348 void __init
msm_hdmi_register(void);
349 void __exit
msm_hdmi_unregister(void);
352 void __init
msm_edp_register(void);
353 void __exit
msm_edp_unregister(void);
354 int msm_edp_modeset_init(struct msm_edp
*edp
, struct drm_device
*dev
,
355 struct drm_encoder
*encoder
);
358 #ifdef CONFIG_DRM_MSM_DSI
359 void __init
msm_dsi_register(void);
360 void __exit
msm_dsi_unregister(void);
361 int msm_dsi_modeset_init(struct msm_dsi
*msm_dsi
, struct drm_device
*dev
,
362 struct drm_encoder
*encoder
);
364 static inline void __init
msm_dsi_register(void)
367 static inline void __exit
msm_dsi_unregister(void)
370 static inline int msm_dsi_modeset_init(struct msm_dsi
*msm_dsi
,
371 struct drm_device
*dev
,
372 struct drm_encoder
*encoder
)
378 void __init
msm_mdp_register(void);
379 void __exit
msm_mdp_unregister(void);
380 void __init
msm_dpu_register(void);
381 void __exit
msm_dpu_unregister(void);
383 #ifdef CONFIG_DEBUG_FS
384 void msm_gem_describe(struct drm_gem_object
*obj
, struct seq_file
*m
);
385 void msm_gem_describe_objects(struct list_head
*list
, struct seq_file
*m
);
386 void msm_framebuffer_describe(struct drm_framebuffer
*fb
, struct seq_file
*m
);
387 int msm_debugfs_late_init(struct drm_device
*dev
);
388 int msm_rd_debugfs_init(struct drm_minor
*minor
);
389 void msm_rd_debugfs_cleanup(struct msm_drm_private
*priv
);
391 void msm_rd_dump_submit(struct msm_rd_state
*rd
, struct msm_gem_submit
*submit
,
392 const char *fmt
, ...);
393 int msm_perf_debugfs_init(struct drm_minor
*minor
);
394 void msm_perf_debugfs_cleanup(struct msm_drm_private
*priv
);
396 static inline int msm_debugfs_late_init(struct drm_device
*dev
) { return 0; }
398 static inline void msm_rd_dump_submit(struct msm_rd_state
*rd
, struct msm_gem_submit
*submit
,
399 const char *fmt
, ...) {}
400 static inline void msm_rd_debugfs_cleanup(struct msm_drm_private
*priv
) {}
401 static inline void msm_perf_debugfs_cleanup(struct msm_drm_private
*priv
) {}
404 struct clk
*msm_clk_get(struct platform_device
*pdev
, const char *name
);
406 struct clk
*msm_clk_bulk_get_clock(struct clk_bulk_data
*bulk
, int count
,
408 void __iomem
*msm_ioremap(struct platform_device
*pdev
, const char *name
,
409 const char *dbgname
);
410 void msm_writel(u32 data
, void __iomem
*addr
);
411 u32
msm_readl(const void __iomem
*addr
);
413 struct msm_gpu_submitqueue
;
414 int msm_submitqueue_init(struct drm_device
*drm
, struct msm_file_private
*ctx
);
415 struct msm_gpu_submitqueue
*msm_submitqueue_get(struct msm_file_private
*ctx
,
417 int msm_submitqueue_create(struct drm_device
*drm
, struct msm_file_private
*ctx
,
418 u32 prio
, u32 flags
, u32
*id
);
419 int msm_submitqueue_query(struct drm_device
*drm
, struct msm_file_private
*ctx
,
420 struct drm_msm_submitqueue_query
*args
);
421 int msm_submitqueue_remove(struct msm_file_private
*ctx
, u32 id
);
422 void msm_submitqueue_close(struct msm_file_private
*ctx
);
424 void msm_submitqueue_destroy(struct kref
*kref
);
427 #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
428 #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
430 static inline int align_pitch(int width
, int bpp
)
432 int bytespp
= (bpp
+ 7) / 8;
433 /* adreno needs pitch aligned to 32 pixels: */
434 return bytespp
* ALIGN(width
, 32);
437 /* for the generated headers: */
438 #define INVALID_IDX(idx) ({BUG(); 0;})
439 #define fui(x) ({BUG(); 0;})
440 #define util_float_to_half(x) ({BUG(); 0;})
443 #define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
445 /* for conditionally setting boolean flag(s): */
446 #define COND(bool, val) ((bool) ? (val) : 0)
448 static inline unsigned long timeout_to_jiffies(const ktime_t
*timeout
)
450 ktime_t now
= ktime_get();
451 unsigned long remaining_jiffies
;
453 if (ktime_compare(*timeout
, now
) < 0) {
454 remaining_jiffies
= 0;
456 ktime_t rem
= ktime_sub(*timeout
, now
);
457 remaining_jiffies
= ktime_divns(rem
, NSEC_PER_SEC
/ HZ
);
460 return remaining_jiffies
;
463 #endif /* __MSM_DRV_H__ */