bpf: Prevent memory disambiguation attack
[linux/fpc-iii.git] / drivers / gpu / drm / vc4 / vc4_drv.h
blob3af22936d9b3b20dc34eaa7e6b47f18099bfdb51
1 /*
2 * Copyright (C) 2015 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
9 #include <linux/reservation.h>
10 #include <drm/drmP.h>
11 #include <drm/drm_encoder.h>
12 #include <drm/drm_gem_cma_helper.h>
14 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
15 * this.
17 enum vc4_kernel_bo_type {
18 /* Any kernel allocation (gem_create_object hook) before it
19 * gets another type set.
21 VC4_BO_TYPE_KERNEL,
22 VC4_BO_TYPE_V3D,
23 VC4_BO_TYPE_V3D_SHADER,
24 VC4_BO_TYPE_DUMB,
25 VC4_BO_TYPE_BIN,
26 VC4_BO_TYPE_RCL,
27 VC4_BO_TYPE_BCL,
28 VC4_BO_TYPE_KERNEL_CACHE,
29 VC4_BO_TYPE_COUNT
32 struct vc4_dev {
33 struct drm_device *dev;
35 struct vc4_hdmi *hdmi;
36 struct vc4_hvs *hvs;
37 struct vc4_v3d *v3d;
38 struct vc4_dpi *dpi;
39 struct vc4_dsi *dsi1;
40 struct vc4_vec *vec;
42 struct vc4_hang_state *hang_state;
44 /* The kernel-space BO cache. Tracks buffers that have been
45 * unreferenced by all other users (refcounts of 0!) but not
46 * yet freed, so we can do cheap allocations.
48 struct vc4_bo_cache {
49 /* Array of list heads for entries in the BO cache,
50 * based on number of pages, so we can do O(1) lookups
51 * in the cache when allocating.
53 struct list_head *size_list;
54 uint32_t size_list_size;
56 /* List of all BOs in the cache, ordered by age, so we
57 * can do O(1) lookups when trying to free old
58 * buffers.
60 struct list_head time_list;
61 struct work_struct time_work;
62 struct timer_list time_timer;
63 } bo_cache;
65 u32 num_labels;
66 struct vc4_label {
67 const char *name;
68 u32 num_allocated;
69 u32 size_allocated;
70 } *bo_labels;
72 /* Protects bo_cache and bo_labels. */
73 struct mutex bo_lock;
75 /* Purgeable BO pool. All BOs in this pool can have their memory
76 * reclaimed if the driver is unable to allocate new BOs. We also
77 * keep stats related to the purge mechanism here.
79 struct {
80 struct list_head list;
81 unsigned int num;
82 size_t size;
83 unsigned int purged_num;
84 size_t purged_size;
85 struct mutex lock;
86 } purgeable;
88 uint64_t dma_fence_context;
90 /* Sequence number for the last job queued in bin_job_list.
91 * Starts at 0 (no jobs emitted).
93 uint64_t emit_seqno;
95 /* Sequence number for the last completed job on the GPU.
96 * Starts at 0 (no jobs completed).
98 uint64_t finished_seqno;
100 /* List of all struct vc4_exec_info for jobs to be executed in
101 * the binner. The first job in the list is the one currently
102 * programmed into ct0ca for execution.
104 struct list_head bin_job_list;
106 /* List of all struct vc4_exec_info for jobs that have
107 * completed binning and are ready for rendering. The first
108 * job in the list is the one currently programmed into ct1ca
109 * for execution.
111 struct list_head render_job_list;
113 /* List of the finished vc4_exec_infos waiting to be freed by
114 * job_done_work.
116 struct list_head job_done_list;
117 /* Spinlock used to synchronize the job_list and seqno
118 * accesses between the IRQ handler and GEM ioctls.
120 spinlock_t job_lock;
121 wait_queue_head_t job_wait_queue;
122 struct work_struct job_done_work;
124 /* List of struct vc4_seqno_cb for callbacks to be made from a
125 * workqueue when the given seqno is passed.
127 struct list_head seqno_cb_list;
129 /* The memory used for storing binner tile alloc, tile state,
130 * and overflow memory allocations. This is freed when V3D
131 * powers down.
133 struct vc4_bo *bin_bo;
135 /* Size of blocks allocated within bin_bo. */
136 uint32_t bin_alloc_size;
138 /* Bitmask of the bin_alloc_size chunks in bin_bo that are
139 * used.
141 uint32_t bin_alloc_used;
143 /* Bitmask of the current bin_alloc used for overflow memory. */
144 uint32_t bin_alloc_overflow;
146 struct work_struct overflow_mem_work;
148 int power_refcount;
150 /* Mutex controlling the power refcount. */
151 struct mutex power_lock;
153 struct {
154 struct timer_list timer;
155 struct work_struct reset_work;
156 } hangcheck;
158 struct semaphore async_modeset;
161 static inline struct vc4_dev *
162 to_vc4_dev(struct drm_device *dev)
164 return (struct vc4_dev *)dev->dev_private;
167 struct vc4_bo {
168 struct drm_gem_cma_object base;
170 /* seqno of the last job to render using this BO. */
171 uint64_t seqno;
173 /* seqno of the last job to use the RCL to write to this BO.
175 * Note that this doesn't include binner overflow memory
176 * writes.
178 uint64_t write_seqno;
180 bool t_format;
182 /* List entry for the BO's position in either
183 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
185 struct list_head unref_head;
187 /* Time in jiffies when the BO was put in vc4->bo_cache. */
188 unsigned long free_time;
190 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
191 struct list_head size_head;
193 /* Struct for shader validation state, if created by
194 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
196 struct vc4_validated_shader_info *validated_shader;
198 /* normally (resv == &_resv) except for imported bo's */
199 struct reservation_object *resv;
200 struct reservation_object _resv;
202 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
203 * for user-allocated labels.
205 int label;
207 /* Count the number of active users. This is needed to determine
208 * whether we can move the BO to the purgeable list or not (when the BO
209 * is used by the GPU or the display engine we can't purge it).
211 refcount_t usecnt;
213 /* Store purgeable/purged state here */
214 u32 madv;
215 struct mutex madv_lock;
218 static inline struct vc4_bo *
219 to_vc4_bo(struct drm_gem_object *bo)
221 return (struct vc4_bo *)bo;
224 struct vc4_fence {
225 struct dma_fence base;
226 struct drm_device *dev;
227 /* vc4 seqno for signaled() test */
228 uint64_t seqno;
231 static inline struct vc4_fence *
232 to_vc4_fence(struct dma_fence *fence)
234 return (struct vc4_fence *)fence;
237 struct vc4_seqno_cb {
238 struct work_struct work;
239 uint64_t seqno;
240 void (*func)(struct vc4_seqno_cb *cb);
243 struct vc4_v3d {
244 struct vc4_dev *vc4;
245 struct platform_device *pdev;
246 void __iomem *regs;
247 struct clk *clk;
250 struct vc4_hvs {
251 struct platform_device *pdev;
252 void __iomem *regs;
253 u32 __iomem *dlist;
255 /* Memory manager for CRTCs to allocate space in the display
256 * list. Units are dwords.
258 struct drm_mm dlist_mm;
259 /* Memory manager for the LBM memory used by HVS scaling. */
260 struct drm_mm lbm_mm;
261 spinlock_t mm_lock;
263 struct drm_mm_node mitchell_netravali_filter;
266 struct vc4_plane {
267 struct drm_plane base;
270 static inline struct vc4_plane *
271 to_vc4_plane(struct drm_plane *plane)
273 return (struct vc4_plane *)plane;
276 enum vc4_encoder_type {
277 VC4_ENCODER_TYPE_NONE,
278 VC4_ENCODER_TYPE_HDMI,
279 VC4_ENCODER_TYPE_VEC,
280 VC4_ENCODER_TYPE_DSI0,
281 VC4_ENCODER_TYPE_DSI1,
282 VC4_ENCODER_TYPE_SMI,
283 VC4_ENCODER_TYPE_DPI,
286 struct vc4_encoder {
287 struct drm_encoder base;
288 enum vc4_encoder_type type;
289 u32 clock_select;
292 static inline struct vc4_encoder *
293 to_vc4_encoder(struct drm_encoder *encoder)
295 return container_of(encoder, struct vc4_encoder, base);
298 #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
299 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
300 #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
301 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
303 struct vc4_exec_info {
304 /* Sequence number for this bin/render job. */
305 uint64_t seqno;
307 /* Latest write_seqno of any BO that binning depends on. */
308 uint64_t bin_dep_seqno;
310 struct dma_fence *fence;
312 /* Last current addresses the hardware was processing when the
313 * hangcheck timer checked on us.
315 uint32_t last_ct0ca, last_ct1ca;
317 /* Kernel-space copy of the ioctl arguments */
318 struct drm_vc4_submit_cl *args;
320 /* This is the array of BOs that were looked up at the start of exec.
321 * Command validation will use indices into this array.
323 struct drm_gem_cma_object **bo;
324 uint32_t bo_count;
326 /* List of BOs that are being written by the RCL. Other than
327 * the binner temporary storage, this is all the BOs written
328 * by the job.
330 struct drm_gem_cma_object *rcl_write_bo[4];
331 uint32_t rcl_write_bo_count;
333 /* Pointers for our position in vc4->job_list */
334 struct list_head head;
336 /* List of other BOs used in the job that need to be released
337 * once the job is complete.
339 struct list_head unref_list;
341 /* Current unvalidated indices into @bo loaded by the non-hardware
342 * VC4_PACKET_GEM_HANDLES.
344 uint32_t bo_index[2];
346 /* This is the BO where we store the validated command lists, shader
347 * records, and uniforms.
349 struct drm_gem_cma_object *exec_bo;
352 * This tracks the per-shader-record state (packet 64) that
353 * determines the length of the shader record and the offset
354 * it's expected to be found at. It gets read in from the
355 * command lists.
357 struct vc4_shader_state {
358 uint32_t addr;
359 /* Maximum vertex index referenced by any primitive using this
360 * shader state.
362 uint32_t max_index;
363 } *shader_state;
365 /** How many shader states the user declared they were using. */
366 uint32_t shader_state_size;
367 /** How many shader state records the validator has seen. */
368 uint32_t shader_state_count;
370 bool found_tile_binning_mode_config_packet;
371 bool found_start_tile_binning_packet;
372 bool found_increment_semaphore_packet;
373 bool found_flush;
374 uint8_t bin_tiles_x, bin_tiles_y;
375 /* Physical address of the start of the tile alloc array
376 * (where each tile's binned CL will start)
378 uint32_t tile_alloc_offset;
379 /* Bitmask of which binner slots are freed when this job completes. */
380 uint32_t bin_slots;
383 * Computed addresses pointing into exec_bo where we start the
384 * bin thread (ct0) and render thread (ct1).
386 uint32_t ct0ca, ct0ea;
387 uint32_t ct1ca, ct1ea;
389 /* Pointer to the unvalidated bin CL (if present). */
390 void *bin_u;
392 /* Pointers to the shader recs. These paddr gets incremented as CL
393 * packets are relocated in validate_gl_shader_state, and the vaddrs
394 * (u and v) get incremented and size decremented as the shader recs
395 * themselves are validated.
397 void *shader_rec_u;
398 void *shader_rec_v;
399 uint32_t shader_rec_p;
400 uint32_t shader_rec_size;
402 /* Pointers to the uniform data. These pointers are incremented, and
403 * size decremented, as each batch of uniforms is uploaded.
405 void *uniforms_u;
406 void *uniforms_v;
407 uint32_t uniforms_p;
408 uint32_t uniforms_size;
411 static inline struct vc4_exec_info *
412 vc4_first_bin_job(struct vc4_dev *vc4)
414 return list_first_entry_or_null(&vc4->bin_job_list,
415 struct vc4_exec_info, head);
418 static inline struct vc4_exec_info *
419 vc4_first_render_job(struct vc4_dev *vc4)
421 return list_first_entry_or_null(&vc4->render_job_list,
422 struct vc4_exec_info, head);
425 static inline struct vc4_exec_info *
426 vc4_last_render_job(struct vc4_dev *vc4)
428 if (list_empty(&vc4->render_job_list))
429 return NULL;
430 return list_last_entry(&vc4->render_job_list,
431 struct vc4_exec_info, head);
435 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
436 * setup parameters.
438 * This will be used at draw time to relocate the reference to the texture
439 * contents in p0, and validate that the offset combined with
440 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
441 * Note that the hardware treats unprovided config parameters as 0, so not all
442 * of them need to be set up for every texure sample, and we'll store ~0 as
443 * the offset to mark the unused ones.
445 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
446 * Setup") for definitions of the texture parameters.
448 struct vc4_texture_sample_info {
449 bool is_direct;
450 uint32_t p_offset[4];
454 * struct vc4_validated_shader_info - information about validated shaders that
455 * needs to be used from command list validation.
457 * For a given shader, each time a shader state record references it, we need
458 * to verify that the shader doesn't read more uniforms than the shader state
459 * record's uniform BO pointer can provide, and we need to apply relocations
460 * and validate the shader state record's uniforms that define the texture
461 * samples.
463 struct vc4_validated_shader_info {
464 uint32_t uniforms_size;
465 uint32_t uniforms_src_size;
466 uint32_t num_texture_samples;
467 struct vc4_texture_sample_info *texture_samples;
469 uint32_t num_uniform_addr_offsets;
470 uint32_t *uniform_addr_offsets;
472 bool is_threaded;
476 * _wait_for - magic (register) wait macro
478 * Does the right thing for modeset paths when run under kdgb or similar atomic
479 * contexts. Note that it's important that we check the condition again after
480 * having timed out, since the timeout could be due to preemption or similar and
481 * we've never had a chance to check the condition before the timeout.
483 #define _wait_for(COND, MS, W) ({ \
484 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
485 int ret__ = 0; \
486 while (!(COND)) { \
487 if (time_after(jiffies, timeout__)) { \
488 if (!(COND)) \
489 ret__ = -ETIMEDOUT; \
490 break; \
492 if (W && drm_can_sleep()) { \
493 msleep(W); \
494 } else { \
495 cpu_relax(); \
498 ret__; \
501 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
503 /* vc4_bo.c */
504 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
505 void vc4_free_object(struct drm_gem_object *gem_obj);
506 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
507 bool from_cache, enum vc4_kernel_bo_type type);
508 int vc4_dumb_create(struct drm_file *file_priv,
509 struct drm_device *dev,
510 struct drm_mode_create_dumb *args);
511 struct dma_buf *vc4_prime_export(struct drm_device *dev,
512 struct drm_gem_object *obj, int flags);
513 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
514 struct drm_file *file_priv);
515 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
516 struct drm_file *file_priv);
517 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
518 struct drm_file *file_priv);
519 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
520 struct drm_file *file_priv);
521 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
522 struct drm_file *file_priv);
523 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
524 struct drm_file *file_priv);
525 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
526 struct drm_file *file_priv);
527 int vc4_fault(struct vm_fault *vmf);
528 int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
529 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
530 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
531 struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
532 struct dma_buf_attachment *attach,
533 struct sg_table *sgt);
534 void *vc4_prime_vmap(struct drm_gem_object *obj);
535 int vc4_bo_cache_init(struct drm_device *dev);
536 void vc4_bo_cache_destroy(struct drm_device *dev);
537 int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
538 int vc4_bo_inc_usecnt(struct vc4_bo *bo);
539 void vc4_bo_dec_usecnt(struct vc4_bo *bo);
540 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
541 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
543 /* vc4_crtc.c */
544 extern struct platform_driver vc4_crtc_driver;
545 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
546 bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
547 bool in_vblank_irq, int *vpos, int *hpos,
548 ktime_t *stime, ktime_t *etime,
549 const struct drm_display_mode *mode);
551 /* vc4_debugfs.c */
552 int vc4_debugfs_init(struct drm_minor *minor);
554 /* vc4_drv.c */
555 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
557 /* vc4_dpi.c */
558 extern struct platform_driver vc4_dpi_driver;
559 int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
561 /* vc4_dsi.c */
562 extern struct platform_driver vc4_dsi_driver;
563 int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
565 /* vc4_fence.c */
566 extern const struct dma_fence_ops vc4_fence_ops;
568 /* vc4_gem.c */
569 void vc4_gem_init(struct drm_device *dev);
570 void vc4_gem_destroy(struct drm_device *dev);
571 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
572 struct drm_file *file_priv);
573 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
574 struct drm_file *file_priv);
575 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
576 struct drm_file *file_priv);
577 void vc4_submit_next_bin_job(struct drm_device *dev);
578 void vc4_submit_next_render_job(struct drm_device *dev);
579 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
580 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
581 uint64_t timeout_ns, bool interruptible);
582 void vc4_job_handle_completed(struct vc4_dev *vc4);
583 int vc4_queue_seqno_cb(struct drm_device *dev,
584 struct vc4_seqno_cb *cb, uint64_t seqno,
585 void (*func)(struct vc4_seqno_cb *cb));
586 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
587 struct drm_file *file_priv);
589 /* vc4_hdmi.c */
590 extern struct platform_driver vc4_hdmi_driver;
591 int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
593 /* vc4_vec.c */
594 extern struct platform_driver vc4_vec_driver;
595 int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
597 /* vc4_irq.c */
598 irqreturn_t vc4_irq(int irq, void *arg);
599 void vc4_irq_preinstall(struct drm_device *dev);
600 int vc4_irq_postinstall(struct drm_device *dev);
601 void vc4_irq_uninstall(struct drm_device *dev);
602 void vc4_irq_reset(struct drm_device *dev);
604 /* vc4_hvs.c */
605 extern struct platform_driver vc4_hvs_driver;
606 void vc4_hvs_dump_state(struct drm_device *dev);
607 int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
609 /* vc4_kms.c */
610 int vc4_kms_load(struct drm_device *dev);
612 /* vc4_plane.c */
613 struct drm_plane *vc4_plane_init(struct drm_device *dev,
614 enum drm_plane_type type);
615 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
616 u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
617 void vc4_plane_async_set_fb(struct drm_plane *plane,
618 struct drm_framebuffer *fb);
620 /* vc4_v3d.c */
621 extern struct platform_driver vc4_v3d_driver;
622 int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
623 int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
624 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
626 /* vc4_validate.c */
628 vc4_validate_bin_cl(struct drm_device *dev,
629 void *validated,
630 void *unvalidated,
631 struct vc4_exec_info *exec);
634 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
636 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
637 uint32_t hindex);
639 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
641 bool vc4_check_tex_size(struct vc4_exec_info *exec,
642 struct drm_gem_cma_object *fbo,
643 uint32_t offset, uint8_t tiling_format,
644 uint32_t width, uint32_t height, uint8_t cpp);
646 /* vc4_validate_shader.c */
647 struct vc4_validated_shader_info *
648 vc4_validate_shader(struct drm_gem_cma_object *shader_obj);