2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
24 /* SMP - Shared Memory Pool
26 * These are shared between all the clients, where each plane in a
27 * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
28 * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
30 * Based on the size of the attached scanout buffer, a certain # of
31 * blocks must be allocated to that client out of the shared pool.
33 * In some hw, some blocks are statically allocated for certain pipes
34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
36 * For each block that can be dynamically allocated, it can be either
41 * The block is allocated to some client and not free.
44 * The block is allocated to some client, and assigned to that
45 * client in MDP5_MDP_SMP_ALLOC registers.
48 * The block is being actively used by a client.
50 * The updates happen in the following steps:
52 * 1) mdp5_smp_request():
53 * When plane scanout is setup, calculate required number of
54 * blocks needed per client, and request. Blocks neither inuse nor
55 * configured nor pending by any other client are added to client's
57 * For shrinking, blocks in pending but not in configured can be freed
58 * directly, but those already in configured will be freed later by
61 * 2) mdp5_smp_configure():
62 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
63 * are configured for the union(pending, inuse)
64 * Current pending is copied to configured.
65 * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
66 * concurrently for the same pipe.
68 * 3) mdp5_smp_commit():
69 * After next vblank, copy configured -> inuse. Optionally update
70 * MDP5_SMP_ALLOC registers if there are newly unused blocks
72 * 4) mdp5_smp_release():
73 * Must be called after the pipe is disabled and no longer uses any SMB
75 * On the next vblank after changes have been committed to hw, the
76 * client's pending blocks become it's in-use blocks (and no-longer
77 * in-use blocks become available to other clients).
79 * btw, hurray for confusing overloaded acronyms! :-/
81 * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
82 * should happen at (or before)? atomic->check(). And we'd need
83 * an API to discard previous requests if update is aborted or
86 * TODO would perhaps be nice to have debugfs to dump out kernel
87 * inuse and pending state of all clients..
91 struct drm_device
*dev
;
93 uint8_t reserved
[MAX_CLIENTS
]; /* fixed MMBs allocation per client */
98 spinlock_t state_lock
;
99 mdp5_smp_state_t state
; /* to track smp allocation amongst pipes: */
101 struct mdp5_client_smp_state client_state
[MAX_CLIENTS
];
104 static void update_smp_state(struct mdp5_smp
*smp
,
105 u32 cid
, mdp5_smp_state_t
*assigned
);
108 struct mdp5_kms
*get_kms(struct mdp5_smp
*smp
)
110 struct msm_drm_private
*priv
= smp
->dev
->dev_private
;
112 return to_mdp5_kms(to_mdp_kms(priv
->kms
));
115 static inline u32
pipe2client(enum mdp5_pipe pipe
, int plane
)
119 if (WARN_ON(plane
>= pipe2nclients(pipe
)))
123 * Note on SMP clients:
124 * For ViG pipes, fetch Y/Cr/Cb-components clients are always
125 * consecutive, and in that order.
128 * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
129 * Y plane's client ID is N
130 * Cr plane's client ID is N + 1
131 * Cb plane's client ID is N + 2
134 return mdp5_cfg
->smp
.clients
[pipe
] + plane
;
137 /* step #1: update # of blocks pending for the client: */
138 static int smp_request_block(struct mdp5_smp
*smp
,
141 struct mdp5_kms
*mdp5_kms
= get_kms(smp
);
142 struct mdp5_client_smp_state
*ps
= &smp
->client_state
[cid
];
143 int i
, ret
, avail
, cur_nblks
, cnt
= smp
->blk_cnt
;
147 reserved
= smp
->reserved
[cid
];
149 spin_lock_irqsave(&smp
->state_lock
, flags
);
152 nblks
= max(0, nblks
- reserved
);
153 DBG("%d MMBs allocated (%d reserved)", nblks
, reserved
);
156 avail
= cnt
- bitmap_weight(smp
->state
, cnt
);
158 dev_err(mdp5_kms
->dev
->dev
, "out of blks (req=%d > avail=%d)\n",
164 cur_nblks
= bitmap_weight(ps
->pending
, cnt
);
165 if (nblks
> cur_nblks
) {
166 /* grow the existing pending reservation: */
167 for (i
= cur_nblks
; i
< nblks
; i
++) {
168 int blk
= find_first_zero_bit(smp
->state
, cnt
);
169 set_bit(blk
, ps
->pending
);
170 set_bit(blk
, smp
->state
);
173 /* shrink the existing pending reservation: */
174 for (i
= cur_nblks
; i
> nblks
; i
--) {
175 int blk
= find_first_bit(ps
->pending
, cnt
);
176 clear_bit(blk
, ps
->pending
);
178 /* clear in global smp_state if not in configured
179 * otherwise until _commit()
181 if (!test_bit(blk
, ps
->configured
))
182 clear_bit(blk
, smp
->state
);
187 spin_unlock_irqrestore(&smp
->state_lock
, flags
);
191 static void set_fifo_thresholds(struct mdp5_smp
*smp
,
192 enum mdp5_pipe pipe
, int nblks
)
194 struct mdp5_kms
*mdp5_kms
= get_kms(smp
);
195 u32 smp_entries_per_blk
= smp
->blk_size
/ (128 / BITS_PER_BYTE
);
198 /* 1/4 of SMP pool that is being fetched */
199 val
= (nblks
* smp_entries_per_blk
) / 4;
201 mdp5_write(mdp5_kms
, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe
), val
* 1);
202 mdp5_write(mdp5_kms
, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe
), val
* 2);
203 mdp5_write(mdp5_kms
, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe
), val
* 3);
207 * NOTE: looks like if horizontal decimation is used (if we supported that)
208 * then the width used to calculate SMP block requirements is the post-
209 * decimated width. Ie. SMP buffering sits downstream of decimation (which
210 * presumably happens during the dma from scanout buffer).
212 int mdp5_smp_request(struct mdp5_smp
*smp
, enum mdp5_pipe pipe
,
213 const struct mdp_format
*format
, u32 width
, bool hdecim
)
215 struct mdp5_kms
*mdp5_kms
= get_kms(smp
);
216 struct drm_device
*dev
= mdp5_kms
->dev
;
217 int rev
= mdp5_cfg_get_hw_rev(mdp5_kms
->cfg
);
218 int i
, hsub
, nplanes
, nlines
, nblks
, ret
;
219 u32 fmt
= format
->base
.pixel_format
;
221 nplanes
= drm_format_num_planes(fmt
);
222 hsub
= drm_format_horz_chroma_subsampling(fmt
);
224 /* different if BWC (compressed framebuffer?) enabled: */
227 /* Newer MDPs have split/packing logic, which fetches sub-sampled
228 * U and V components (splits them from Y if necessary) and packs
229 * them together, writes to SMP using a single client.
231 if ((rev
> 0) && (format
->chroma_sample
> CHROMA_FULL
)) {
232 fmt
= DRM_FORMAT_NV24
;
235 /* if decimation is enabled, HW decimates less on the
236 * sub sampled chroma components
238 if (hdecim
&& (hsub
> 1))
242 for (i
= 0, nblks
= 0; i
< nplanes
; i
++) {
243 int n
, fetch_stride
, cpp
;
245 cpp
= drm_format_plane_cpp(fmt
, i
);
246 fetch_stride
= width
* cpp
/ (i
? hsub
: 1);
248 n
= DIV_ROUND_UP(fetch_stride
* nlines
, smp
->blk_size
);
250 /* for hw rev v1.00 */
252 n
= roundup_pow_of_two(n
);
254 DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe
), i
, n
);
255 ret
= smp_request_block(smp
, pipe2client(pipe
, i
), n
);
257 dev_err(dev
->dev
, "Cannot allocate %d SMP blocks: %d\n",
265 set_fifo_thresholds(smp
, pipe
, nblks
);
270 /* Release SMP blocks for all clients of the pipe */
271 void mdp5_smp_release(struct mdp5_smp
*smp
, enum mdp5_pipe pipe
)
275 int cnt
= smp
->blk_cnt
;
277 for (i
= 0; i
< pipe2nclients(pipe
); i
++) {
278 mdp5_smp_state_t assigned
;
279 u32 cid
= pipe2client(pipe
, i
);
280 struct mdp5_client_smp_state
*ps
= &smp
->client_state
[cid
];
282 spin_lock_irqsave(&smp
->state_lock
, flags
);
284 /* clear hw assignment */
285 bitmap_or(assigned
, ps
->inuse
, ps
->configured
, cnt
);
286 update_smp_state(smp
, CID_UNUSED
, &assigned
);
288 /* free to global pool */
289 bitmap_andnot(smp
->state
, smp
->state
, ps
->pending
, cnt
);
290 bitmap_andnot(smp
->state
, smp
->state
, assigned
, cnt
);
292 /* clear client's infor */
293 bitmap_zero(ps
->pending
, cnt
);
294 bitmap_zero(ps
->configured
, cnt
);
295 bitmap_zero(ps
->inuse
, cnt
);
297 spin_unlock_irqrestore(&smp
->state_lock
, flags
);
300 set_fifo_thresholds(smp
, pipe
, 0);
303 static void update_smp_state(struct mdp5_smp
*smp
,
304 u32 cid
, mdp5_smp_state_t
*assigned
)
306 struct mdp5_kms
*mdp5_kms
= get_kms(smp
);
307 int cnt
= smp
->blk_cnt
;
310 for_each_set_bit(blk
, *assigned
, cnt
) {
314 val
= mdp5_read(mdp5_kms
, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx
));
318 val
&= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK
;
319 val
|= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid
);
322 val
&= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK
;
323 val
|= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid
);
326 val
&= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK
;
327 val
|= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid
);
331 mdp5_write(mdp5_kms
, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx
), val
);
332 mdp5_write(mdp5_kms
, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx
), val
);
336 /* step #2: configure hw for union(pending, inuse): */
337 void mdp5_smp_configure(struct mdp5_smp
*smp
, enum mdp5_pipe pipe
)
339 int cnt
= smp
->blk_cnt
;
340 mdp5_smp_state_t assigned
;
343 for (i
= 0; i
< pipe2nclients(pipe
); i
++) {
344 u32 cid
= pipe2client(pipe
, i
);
345 struct mdp5_client_smp_state
*ps
= &smp
->client_state
[cid
];
348 * if vblank has not happened since last smp_configure
349 * skip the configure for now
351 if (!bitmap_equal(ps
->inuse
, ps
->configured
, cnt
))
354 bitmap_copy(ps
->configured
, ps
->pending
, cnt
);
355 bitmap_or(assigned
, ps
->inuse
, ps
->configured
, cnt
);
356 update_smp_state(smp
, cid
, &assigned
);
360 /* step #3: after vblank, copy configured -> inuse: */
361 void mdp5_smp_commit(struct mdp5_smp
*smp
, enum mdp5_pipe pipe
)
363 int cnt
= smp
->blk_cnt
;
364 mdp5_smp_state_t released
;
367 for (i
= 0; i
< pipe2nclients(pipe
); i
++) {
368 u32 cid
= pipe2client(pipe
, i
);
369 struct mdp5_client_smp_state
*ps
= &smp
->client_state
[cid
];
372 * Figure out if there are any blocks we where previously
373 * using, which can be released and made available to other
376 if (bitmap_andnot(released
, ps
->inuse
, ps
->configured
, cnt
)) {
379 spin_lock_irqsave(&smp
->state_lock
, flags
);
380 /* clear released blocks: */
381 bitmap_andnot(smp
->state
, smp
->state
, released
, cnt
);
382 spin_unlock_irqrestore(&smp
->state_lock
, flags
);
384 update_smp_state(smp
, CID_UNUSED
, &released
);
387 bitmap_copy(ps
->inuse
, ps
->configured
, cnt
);
391 void mdp5_smp_destroy(struct mdp5_smp
*smp
)
396 struct mdp5_smp
*mdp5_smp_init(struct drm_device
*dev
, const struct mdp5_smp_block
*cfg
)
398 struct mdp5_smp
*smp
= NULL
;
401 smp
= kzalloc(sizeof(*smp
), GFP_KERNEL
);
402 if (unlikely(!smp
)) {
408 smp
->blk_cnt
= cfg
->mmb_count
;
409 smp
->blk_size
= cfg
->mmb_size
;
411 /* statically tied MMBs cannot be re-allocated: */
412 bitmap_copy(smp
->state
, cfg
->reserved_state
, smp
->blk_cnt
);
413 memcpy(smp
->reserved
, cfg
->reserved
, sizeof(smp
->reserved
));
414 spin_lock_init(&smp
->state_lock
);
419 mdp5_smp_destroy(smp
);