drm/amdkfd: Add memory exception handling
[linux/fpc-iii.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_smp.c
blob16702aecf0df714e211b8d7900fc06299e0e92f8
1 /*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "mdp5_kms.h"
21 #include "mdp5_smp.h"
24 /* SMP - Shared Memory Pool
26 * These are shared between all the clients, where each plane in a
27 * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
28 * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
30 * Based on the size of the attached scanout buffer, a certain # of
31 * blocks must be allocated to that client out of the shared pool.
33 * In some hw, some blocks are statically allocated for certain pipes
34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
36 * For each block that can be dynamically allocated, it can be either
37 * free, or pending/in-use by a client. The updates happen in three steps:
39 * 1) mdp5_smp_request():
40 * When plane scanout is setup, calculate required number of
41 * blocks needed per client, and request. Blocks not inuse or
42 * pending by any other client are added to client's pending
43 * set.
45 * 2) mdp5_smp_configure():
46 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
47 * are configured for the union(pending, inuse)
49 * 3) mdp5_smp_commit():
50 * After next vblank, copy pending -> inuse. Optionally update
51 * MDP5_SMP_ALLOC registers if there are newly unused blocks
53 * On the next vblank after changes have been committed to hw, the
54 * client's pending blocks become it's in-use blocks (and no-longer
55 * in-use blocks become available to other clients).
57 * btw, hurray for confusing overloaded acronyms! :-/
59 * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
60 * should happen at (or before)? atomic->check(). And we'd need
61 * an API to discard previous requests if update is aborted or
62 * (test-only).
64 * TODO would perhaps be nice to have debugfs to dump out kernel
65 * inuse and pending state of all clients..
68 struct mdp5_smp {
69 struct drm_device *dev;
71 int blk_cnt;
72 int blk_size;
74 spinlock_t state_lock;
75 mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
77 struct mdp5_client_smp_state client_state[MAX_CLIENTS];
80 static inline
81 struct mdp5_kms *get_kms(struct mdp5_smp *smp)
83 struct msm_drm_private *priv = smp->dev->dev_private;
85 return to_mdp5_kms(to_mdp_kms(priv->kms));
88 static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
90 #define CID_UNUSED 0
92 if (WARN_ON(plane >= pipe2nclients(pipe)))
93 return CID_UNUSED;
96 * Note on SMP clients:
97 * For ViG pipes, fetch Y/Cr/Cb-components clients are always
98 * consecutive, and in that order.
100 * e.g.:
101 * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
102 * Y plane's client ID is N
103 * Cr plane's client ID is N + 1
104 * Cb plane's client ID is N + 2
107 return mdp5_cfg->smp.clients[pipe] + plane;
110 /* step #1: update # of blocks pending for the client: */
111 static int smp_request_block(struct mdp5_smp *smp,
112 u32 cid, int nblks)
114 struct mdp5_kms *mdp5_kms = get_kms(smp);
115 const struct mdp5_cfg_hw *hw_cfg;
116 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
117 int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
118 int reserved;
119 unsigned long flags;
121 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
122 reserved = hw_cfg->smp.reserved[cid];
124 spin_lock_irqsave(&smp->state_lock, flags);
126 if (reserved) {
127 nblks = max(0, nblks - reserved);
128 DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
131 avail = cnt - bitmap_weight(smp->state, cnt);
132 if (nblks > avail) {
133 dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
134 nblks, avail);
135 ret = -ENOSPC;
136 goto fail;
139 cur_nblks = bitmap_weight(ps->pending, cnt);
140 if (nblks > cur_nblks) {
141 /* grow the existing pending reservation: */
142 for (i = cur_nblks; i < nblks; i++) {
143 int blk = find_first_zero_bit(smp->state, cnt);
144 set_bit(blk, ps->pending);
145 set_bit(blk, smp->state);
147 } else {
148 /* shrink the existing pending reservation: */
149 for (i = cur_nblks; i > nblks; i--) {
150 int blk = find_first_bit(ps->pending, cnt);
151 clear_bit(blk, ps->pending);
152 /* don't clear in global smp_state until _commit() */
156 fail:
157 spin_unlock_irqrestore(&smp->state_lock, flags);
158 return 0;
161 static void set_fifo_thresholds(struct mdp5_smp *smp,
162 enum mdp5_pipe pipe, int nblks)
164 struct mdp5_kms *mdp5_kms = get_kms(smp);
165 u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
166 u32 val;
168 /* 1/4 of SMP pool that is being fetched */
169 val = (nblks * smp_entries_per_blk) / 4;
171 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
172 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
173 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
177 * NOTE: looks like if horizontal decimation is used (if we supported that)
178 * then the width used to calculate SMP block requirements is the post-
179 * decimated width. Ie. SMP buffering sits downstream of decimation (which
180 * presumably happens during the dma from scanout buffer).
182 int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
184 struct mdp5_kms *mdp5_kms = get_kms(smp);
185 struct drm_device *dev = mdp5_kms->dev;
186 int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
187 int i, hsub, nplanes, nlines, nblks, ret;
189 nplanes = drm_format_num_planes(fmt);
190 hsub = drm_format_horz_chroma_subsampling(fmt);
192 /* different if BWC (compressed framebuffer?) enabled: */
193 nlines = 2;
195 for (i = 0, nblks = 0; i < nplanes; i++) {
196 int n, fetch_stride, cpp;
198 cpp = drm_format_plane_cpp(fmt, i);
199 fetch_stride = width * cpp / (i ? hsub : 1);
201 n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
203 /* for hw rev v1.00 */
204 if (rev == 0)
205 n = roundup_pow_of_two(n);
207 DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
208 ret = smp_request_block(smp, pipe2client(pipe, i), n);
209 if (ret) {
210 dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
211 n, ret);
212 return ret;
215 nblks += n;
218 set_fifo_thresholds(smp, pipe, nblks);
220 return 0;
223 /* Release SMP blocks for all clients of the pipe */
224 void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
226 int i, nblks;
228 for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
229 smp_request_block(smp, pipe2client(pipe, i), 0);
230 set_fifo_thresholds(smp, pipe, 0);
233 static void update_smp_state(struct mdp5_smp *smp,
234 u32 cid, mdp5_smp_state_t *assigned)
236 struct mdp5_kms *mdp5_kms = get_kms(smp);
237 int cnt = smp->blk_cnt;
238 u32 blk, val;
240 for_each_set_bit(blk, *assigned, cnt) {
241 int idx = blk / 3;
242 int fld = blk % 3;
244 val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx));
246 switch (fld) {
247 case 0:
248 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
249 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid);
250 break;
251 case 1:
252 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
253 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid);
254 break;
255 case 2:
256 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
257 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid);
258 break;
261 mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val);
262 mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val);
266 /* step #2: configure hw for union(pending, inuse): */
267 void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
269 int cnt = smp->blk_cnt;
270 mdp5_smp_state_t assigned;
271 int i;
273 for (i = 0; i < pipe2nclients(pipe); i++) {
274 u32 cid = pipe2client(pipe, i);
275 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
277 bitmap_or(assigned, ps->inuse, ps->pending, cnt);
278 update_smp_state(smp, cid, &assigned);
282 /* step #3: after vblank, copy pending -> inuse: */
283 void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
285 int cnt = smp->blk_cnt;
286 mdp5_smp_state_t released;
287 int i;
289 for (i = 0; i < pipe2nclients(pipe); i++) {
290 u32 cid = pipe2client(pipe, i);
291 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
294 * Figure out if there are any blocks we where previously
295 * using, which can be released and made available to other
296 * clients:
298 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
299 unsigned long flags;
301 spin_lock_irqsave(&smp->state_lock, flags);
302 /* clear released blocks: */
303 bitmap_andnot(smp->state, smp->state, released, cnt);
304 spin_unlock_irqrestore(&smp->state_lock, flags);
306 update_smp_state(smp, CID_UNUSED, &released);
309 bitmap_copy(ps->inuse, ps->pending, cnt);
313 void mdp5_smp_destroy(struct mdp5_smp *smp)
315 kfree(smp);
318 struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
320 struct mdp5_smp *smp = NULL;
321 int ret;
323 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
324 if (unlikely(!smp)) {
325 ret = -ENOMEM;
326 goto fail;
329 smp->dev = dev;
330 smp->blk_cnt = cfg->mmb_count;
331 smp->blk_size = cfg->mmb_size;
333 /* statically tied MMBs cannot be re-allocated: */
334 bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
335 spin_lock_init(&smp->state_lock);
337 return smp;
338 fail:
339 if (smp)
340 mdp5_smp_destroy(smp);
342 return ERR_PTR(ret);