2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
23 /* SMP - Shared Memory Pool
25 * These are shared between all the clients, where each plane in a
26 * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
27 * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
29 * Based on the size of the attached scanout buffer, a certain # of
30 * blocks must be allocated to that client out of the shared pool.
32 * For each block, it can be either free, or pending/in-use by a
33 * client. The updates happen in three steps:
35 * 1) mdp5_smp_request():
36 * When plane scanout is setup, calculate required number of
37 * blocks needed per client, and request. Blocks not inuse or
38 * pending by any other client are added to client's pending
41 * 2) mdp5_smp_configure():
42 * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
43 * are configured for the union(pending, inuse)
45 * 3) mdp5_smp_commit():
46 * After next vblank, copy pending -> inuse. Optionally update
47 * MDP5_SMP_ALLOC registers if there are newly unused blocks
49 * On the next vblank after changes have been committed to hw, the
50 * client's pending blocks become it's in-use blocks (and no-longer
51 * in-use blocks become available to other clients).
53 * btw, hurray for confusing overloaded acronyms! :-/
55 * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
56 * should happen at (or before)? atomic->check(). And we'd need
57 * an API to discard previous requests if update is aborted or
60 * TODO would perhaps be nice to have debugfs to dump out kernel
61 * inuse and pending state of all clients..
64 static DEFINE_SPINLOCK(smp_lock
);
67 /* step #1: update # of blocks pending for the client: */
68 int mdp5_smp_request(struct mdp5_kms
*mdp5_kms
,
69 enum mdp5_client_id cid
, int nblks
)
71 struct mdp5_client_smp_state
*ps
= &mdp5_kms
->smp_client_state
[cid
];
72 int i
, ret
, avail
, cur_nblks
, cnt
= mdp5_kms
->smp_blk_cnt
;
75 spin_lock_irqsave(&smp_lock
, flags
);
77 avail
= cnt
- bitmap_weight(mdp5_kms
->smp_state
, cnt
);
83 cur_nblks
= bitmap_weight(ps
->pending
, cnt
);
84 if (nblks
> cur_nblks
) {
85 /* grow the existing pending reservation: */
86 for (i
= cur_nblks
; i
< nblks
; i
++) {
87 int blk
= find_first_zero_bit(mdp5_kms
->smp_state
, cnt
);
88 set_bit(blk
, ps
->pending
);
89 set_bit(blk
, mdp5_kms
->smp_state
);
92 /* shrink the existing pending reservation: */
93 for (i
= cur_nblks
; i
> nblks
; i
--) {
94 int blk
= find_first_bit(ps
->pending
, cnt
);
95 clear_bit(blk
, ps
->pending
);
96 /* don't clear in global smp_state until _commit() */
101 spin_unlock_irqrestore(&smp_lock
, flags
);
105 static void update_smp_state(struct mdp5_kms
*mdp5_kms
,
106 enum mdp5_client_id cid
, mdp5_smp_state_t
*assigned
)
108 int cnt
= mdp5_kms
->smp_blk_cnt
;
111 for_each_set_bit(blk
, *assigned
, cnt
) {
115 val
= mdp5_read(mdp5_kms
, REG_MDP5_SMP_ALLOC_W_REG(idx
));
119 val
&= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK
;
120 val
|= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid
);
123 val
&= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK
;
124 val
|= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid
);
127 val
&= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK
;
128 val
|= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid
);
132 mdp5_write(mdp5_kms
, REG_MDP5_SMP_ALLOC_W_REG(idx
), val
);
133 mdp5_write(mdp5_kms
, REG_MDP5_SMP_ALLOC_R_REG(idx
), val
);
137 /* step #2: configure hw for union(pending, inuse): */
138 void mdp5_smp_configure(struct mdp5_kms
*mdp5_kms
, enum mdp5_client_id cid
)
140 struct mdp5_client_smp_state
*ps
= &mdp5_kms
->smp_client_state
[cid
];
141 int cnt
= mdp5_kms
->smp_blk_cnt
;
142 mdp5_smp_state_t assigned
;
144 bitmap_or(assigned
, ps
->inuse
, ps
->pending
, cnt
);
145 update_smp_state(mdp5_kms
, cid
, &assigned
);
148 /* step #3: after vblank, copy pending -> inuse: */
149 void mdp5_smp_commit(struct mdp5_kms
*mdp5_kms
, enum mdp5_client_id cid
)
151 struct mdp5_client_smp_state
*ps
= &mdp5_kms
->smp_client_state
[cid
];
152 int cnt
= mdp5_kms
->smp_blk_cnt
;
153 mdp5_smp_state_t released
;
156 * Figure out if there are any blocks we where previously
157 * using, which can be released and made available to other
160 if (bitmap_andnot(released
, ps
->inuse
, ps
->pending
, cnt
)) {
163 spin_lock_irqsave(&smp_lock
, flags
);
164 /* clear released blocks: */
165 bitmap_andnot(mdp5_kms
->smp_state
, mdp5_kms
->smp_state
,
167 spin_unlock_irqrestore(&smp_lock
, flags
);
169 update_smp_state(mdp5_kms
, CID_UNUSED
, &released
);
172 bitmap_copy(ps
->inuse
, ps
->pending
, cnt
);