1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/dma-fence.h>
10 #include "msm_fence.h"
13 static struct msm_gpu
*fctx2gpu(struct msm_fence_context
*fctx
)
15 struct msm_drm_private
*priv
= fctx
->dev
->dev_private
;
19 static enum hrtimer_restart
deadline_timer(struct hrtimer
*t
)
21 struct msm_fence_context
*fctx
= container_of(t
,
22 struct msm_fence_context
, deadline_timer
);
24 kthread_queue_work(fctx2gpu(fctx
)->worker
, &fctx
->deadline_work
);
26 return HRTIMER_NORESTART
;
29 static void deadline_work(struct kthread_work
*work
)
31 struct msm_fence_context
*fctx
= container_of(work
,
32 struct msm_fence_context
, deadline_work
);
34 /* If deadline fence has already passed, nothing to do: */
35 if (msm_fence_completed(fctx
, fctx
->next_deadline_fence
))
38 msm_devfreq_boost(fctx2gpu(fctx
), 2);
42 struct msm_fence_context
*
43 msm_fence_context_alloc(struct drm_device
*dev
, volatile uint32_t *fenceptr
,
46 struct msm_fence_context
*fctx
;
49 fctx
= kzalloc(sizeof(*fctx
), GFP_KERNEL
);
51 return ERR_PTR(-ENOMEM
);
54 strscpy(fctx
->name
, name
, sizeof(fctx
->name
));
55 fctx
->context
= dma_fence_context_alloc(1);
56 fctx
->index
= index
++;
57 fctx
->fenceptr
= fenceptr
;
58 spin_lock_init(&fctx
->spinlock
);
61 * Start out close to the 32b fence rollover point, so we can
62 * catch bugs with fence comparisons.
64 fctx
->last_fence
= 0xffffff00;
65 fctx
->completed_fence
= fctx
->last_fence
;
66 *fctx
->fenceptr
= fctx
->last_fence
;
68 hrtimer_init(&fctx
->deadline_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
69 fctx
->deadline_timer
.function
= deadline_timer
;
71 kthread_init_work(&fctx
->deadline_work
, deadline_work
);
73 fctx
->next_deadline
= ktime_get();
78 void msm_fence_context_free(struct msm_fence_context
*fctx
)
83 bool msm_fence_completed(struct msm_fence_context
*fctx
, uint32_t fence
)
86 * Note: Check completed_fence first, as fenceptr is in a write-combine
87 * mapping, so it will be more expensive to read.
89 return (int32_t)(fctx
->completed_fence
- fence
) >= 0 ||
90 (int32_t)(*fctx
->fenceptr
- fence
) >= 0;
93 /* called from irq handler and workqueue (in recover path) */
94 void msm_update_fence(struct msm_fence_context
*fctx
, uint32_t fence
)
98 spin_lock_irqsave(&fctx
->spinlock
, flags
);
99 if (fence_after(fence
, fctx
->completed_fence
))
100 fctx
->completed_fence
= fence
;
101 if (msm_fence_completed(fctx
, fctx
->next_deadline_fence
))
102 hrtimer_cancel(&fctx
->deadline_timer
);
103 spin_unlock_irqrestore(&fctx
->spinlock
, flags
);
107 struct dma_fence base
;
108 struct msm_fence_context
*fctx
;
111 static inline struct msm_fence
*to_msm_fence(struct dma_fence
*fence
)
113 return container_of(fence
, struct msm_fence
, base
);
116 static const char *msm_fence_get_driver_name(struct dma_fence
*fence
)
121 static const char *msm_fence_get_timeline_name(struct dma_fence
*fence
)
123 struct msm_fence
*f
= to_msm_fence(fence
);
124 return f
->fctx
->name
;
127 static bool msm_fence_signaled(struct dma_fence
*fence
)
129 struct msm_fence
*f
= to_msm_fence(fence
);
130 return msm_fence_completed(f
->fctx
, f
->base
.seqno
);
133 static void msm_fence_set_deadline(struct dma_fence
*fence
, ktime_t deadline
)
135 struct msm_fence
*f
= to_msm_fence(fence
);
136 struct msm_fence_context
*fctx
= f
->fctx
;
140 spin_lock_irqsave(&fctx
->spinlock
, flags
);
143 if (ktime_after(now
, fctx
->next_deadline
) ||
144 ktime_before(deadline
, fctx
->next_deadline
)) {
145 fctx
->next_deadline
= deadline
;
146 fctx
->next_deadline_fence
=
147 max(fctx
->next_deadline_fence
, (uint32_t)fence
->seqno
);
150 * Set timer to trigger boost 3ms before deadline, or
151 * if we are already less than 3ms before the deadline
152 * schedule boost work immediately.
154 deadline
= ktime_sub(deadline
, ms_to_ktime(3));
156 if (ktime_after(now
, deadline
)) {
157 kthread_queue_work(fctx2gpu(fctx
)->worker
,
158 &fctx
->deadline_work
);
160 hrtimer_start(&fctx
->deadline_timer
, deadline
,
165 spin_unlock_irqrestore(&fctx
->spinlock
, flags
);
168 static const struct dma_fence_ops msm_fence_ops
= {
169 .get_driver_name
= msm_fence_get_driver_name
,
170 .get_timeline_name
= msm_fence_get_timeline_name
,
171 .signaled
= msm_fence_signaled
,
172 .set_deadline
= msm_fence_set_deadline
,
176 msm_fence_alloc(void)
180 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
182 return ERR_PTR(-ENOMEM
);
188 msm_fence_init(struct dma_fence
*fence
, struct msm_fence_context
*fctx
)
190 struct msm_fence
*f
= to_msm_fence(fence
);
195 * Until this point, the fence was just some pre-allocated memory,
196 * no-one should have taken a reference to it yet.
198 WARN_ON(kref_read(&fence
->refcount
));
200 dma_fence_init(&f
->base
, &msm_fence_ops
, &fctx
->spinlock
,
201 fctx
->context
, ++fctx
->last_fence
);