drm/amdkfd: Add memory exception handling
[linux/fpc-iii.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_ctl.c
blob5488b687c8d11e8c2c6db2217654c82d4e8de323
1 /*
2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include "mdp5_kms.h"
15 #include "mdp5_ctl.h"
18 * CTL - MDP Control Pool Manager
20 * Controls are shared between all CRTCs.
22 * They are intended to be used for data path configuration.
23 * The top level register programming describes the complete data path for
24 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
26 * Hardware capabilities determine the number of concurrent data paths
28 * In certain use cases (high-resolution dual pipe), one single CTL can be
29 * shared across multiple CRTCs.
31 * Because the number of CTLs can be less than the number of CRTCs,
32 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
33 * requested by the client (in mdp5_crtc_mode_set()).
36 struct op_mode {
37 struct mdp5_interface intf;
39 bool encoder_enabled;
40 uint32_t start_mask;
43 struct mdp5_ctl {
44 struct mdp5_ctl_manager *ctlm;
46 u32 id;
47 int lm;
49 /* whether this CTL has been allocated or not: */
50 bool busy;
52 /* Operation Mode Configuration for the Pipeline */
53 struct op_mode pipeline;
55 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
56 spinlock_t hw_lock;
57 u32 reg_offset;
59 /* when do CTL registers need to be flushed? (mask of trigger bits) */
60 u32 pending_ctl_trigger;
62 bool cursor_on;
64 struct drm_crtc *crtc;
67 struct mdp5_ctl_manager {
68 struct drm_device *dev;
70 /* number of CTL / Layer Mixers in this hw config: */
71 u32 nlm;
72 u32 nctl;
74 /* to filter out non-present bits in the current hardware config */
75 u32 flush_hw_mask;
77 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
78 spinlock_t pool_lock;
79 struct mdp5_ctl ctls[MAX_CTL];
82 static inline
83 struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
85 struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
87 return to_mdp5_kms(to_mdp_kms(priv->kms));
90 static inline
91 void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
93 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
95 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
96 mdp5_write(mdp5_kms, reg, data);
99 static inline
100 u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
102 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
104 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
105 return mdp5_read(mdp5_kms, reg);
108 static void set_display_intf(struct mdp5_kms *mdp5_kms,
109 struct mdp5_interface *intf)
111 unsigned long flags;
112 u32 intf_sel;
114 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
115 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0));
117 switch (intf->num) {
118 case 0:
119 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
120 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type);
121 break;
122 case 1:
123 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
124 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type);
125 break;
126 case 2:
127 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
128 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type);
129 break;
130 case 3:
131 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
132 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type);
133 break;
134 default:
135 BUG();
136 break;
139 mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel);
140 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
143 static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
145 unsigned long flags;
146 u32 ctl_op = 0;
148 if (!mdp5_cfg_intf_is_virtual(intf->type))
149 ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
151 switch (intf->type) {
152 case INTF_DSI:
153 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
154 ctl_op |= MDP5_CTL_OP_CMD_MODE;
155 break;
157 case INTF_WB:
158 if (intf->mode == MDP5_INTF_WB_MODE_LINE)
159 ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
160 break;
162 default:
163 break;
166 spin_lock_irqsave(&ctl->hw_lock, flags);
167 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
168 spin_unlock_irqrestore(&ctl->hw_lock, flags);
171 int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
173 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
174 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
176 memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
178 ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
179 mdp_ctl_flush_mask_encoder(intf);
181 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
182 if (!mdp5_cfg_intf_is_virtual(intf->type))
183 set_display_intf(mdp5_kms, intf);
185 set_ctl_op(ctl, intf);
187 return 0;
190 static bool start_signal_needed(struct mdp5_ctl *ctl)
192 struct op_mode *pipeline = &ctl->pipeline;
194 if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
195 return false;
197 switch (pipeline->intf.type) {
198 case INTF_WB:
199 return true;
200 case INTF_DSI:
201 return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
202 default:
203 return false;
208 * send_start_signal() - Overlay Processor Start Signal
210 * For a given control operation (display pipeline), a START signal needs to be
211 * executed in order to kick off operation and activate all layers.
212 * e.g.: DSI command mode, Writeback
214 static void send_start_signal(struct mdp5_ctl *ctl)
216 unsigned long flags;
218 spin_lock_irqsave(&ctl->hw_lock, flags);
219 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
220 spin_unlock_irqrestore(&ctl->hw_lock, flags);
223 static void refill_start_mask(struct mdp5_ctl *ctl)
225 struct op_mode *pipeline = &ctl->pipeline;
226 struct mdp5_interface *intf = &ctl->pipeline.intf;
228 pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
231 * Writeback encoder needs to program & flush
232 * address registers for each page flip..
234 if (intf->type == INTF_WB)
235 pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
239 * mdp5_ctl_set_encoder_state() - set the encoder state
241 * @enable: true, when encoder is ready for data streaming; false, otherwise.
243 * Note:
244 * This encoder state is needed to trigger START signal (data path kickoff).
246 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
248 if (WARN_ON(!ctl))
249 return -EINVAL;
251 ctl->pipeline.encoder_enabled = enabled;
252 DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");
254 if (start_signal_needed(ctl)) {
255 send_start_signal(ctl);
256 refill_start_mask(ctl);
259 return 0;
263 * Note:
264 * CTL registers need to be flushed after calling this function
265 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
267 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
269 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
270 unsigned long flags;
271 u32 blend_cfg;
272 int lm = ctl->lm;
274 if (unlikely(WARN_ON(lm < 0))) {
275 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
276 ctl->id, lm);
277 return -EINVAL;
280 spin_lock_irqsave(&ctl->hw_lock, flags);
282 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
284 if (enable)
285 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
286 else
287 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
289 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
291 spin_unlock_irqrestore(&ctl->hw_lock, flags);
293 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
294 ctl->cursor_on = enable;
296 return 0;
299 int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
301 unsigned long flags;
303 if (ctl->cursor_on)
304 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
305 else
306 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
308 spin_lock_irqsave(&ctl->hw_lock, flags);
309 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
310 spin_unlock_irqrestore(&ctl->hw_lock, flags);
312 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
314 return 0;
317 u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
319 if (intf->type == INTF_WB)
320 return MDP5_CTL_FLUSH_WB;
322 switch (intf->num) {
323 case 0: return MDP5_CTL_FLUSH_TIMING_0;
324 case 1: return MDP5_CTL_FLUSH_TIMING_1;
325 case 2: return MDP5_CTL_FLUSH_TIMING_2;
326 case 3: return MDP5_CTL_FLUSH_TIMING_3;
327 default: return 0;
331 u32 mdp_ctl_flush_mask_cursor(int cursor_id)
333 switch (cursor_id) {
334 case 0: return MDP5_CTL_FLUSH_CURSOR_0;
335 case 1: return MDP5_CTL_FLUSH_CURSOR_1;
336 default: return 0;
340 u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
342 switch (pipe) {
343 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
344 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
345 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
346 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
347 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
348 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
349 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
350 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
351 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
352 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
353 default: return 0;
357 u32 mdp_ctl_flush_mask_lm(int lm)
359 switch (lm) {
360 case 0: return MDP5_CTL_FLUSH_LM0;
361 case 1: return MDP5_CTL_FLUSH_LM1;
362 case 2: return MDP5_CTL_FLUSH_LM2;
363 case 5: return MDP5_CTL_FLUSH_LM5;
364 default: return 0;
368 static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
370 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
371 u32 sw_mask = 0;
372 #define BIT_NEEDS_SW_FIX(bit) \
373 (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
375 /* for some targets, cursor bit is the same as LM bit */
376 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
377 sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
379 return sw_mask;
383 * mdp5_ctl_commit() - Register Flush
385 * The flush register is used to indicate several registers are all
386 * programmed, and are safe to update to the back copy of the double
387 * buffered registers.
389 * Some registers FLUSH bits are shared when the hardware does not have
390 * dedicated bits for them; handling these is the job of fix_sw_flush().
392 * CTL registers need to be flushed in some circumstances; if that is the
393 * case, some trigger bits will be present in both flush mask and
394 * ctl->pending_ctl_trigger.
396 int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
398 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
399 struct op_mode *pipeline = &ctl->pipeline;
400 unsigned long flags;
402 pipeline->start_mask &= ~flush_mask;
404 VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
405 pipeline->start_mask, ctl->pending_ctl_trigger);
407 if (ctl->pending_ctl_trigger & flush_mask) {
408 flush_mask |= MDP5_CTL_FLUSH_CTL;
409 ctl->pending_ctl_trigger = 0;
412 flush_mask |= fix_sw_flush(ctl, flush_mask);
414 flush_mask &= ctl_mgr->flush_hw_mask;
416 if (flush_mask) {
417 spin_lock_irqsave(&ctl->hw_lock, flags);
418 ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
419 spin_unlock_irqrestore(&ctl->hw_lock, flags);
422 if (start_signal_needed(ctl)) {
423 send_start_signal(ctl);
424 refill_start_mask(ctl);
427 return 0;
430 void mdp5_ctl_release(struct mdp5_ctl *ctl)
432 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
433 unsigned long flags;
435 if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
436 dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
437 ctl->id, ctl->busy);
438 return;
441 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
442 ctl->busy = false;
443 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
445 DBG("CTL %d released", ctl->id);
448 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
450 return WARN_ON(!ctl) ? -EINVAL : ctl->id;
454 * mdp5_ctl_request() - CTL dynamic allocation
456 * Note: Current implementation considers that we can only have one CRTC per CTL
458 * @return first free CTL
460 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
461 struct drm_crtc *crtc)
463 struct mdp5_ctl *ctl = NULL;
464 unsigned long flags;
465 int c;
467 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
469 for (c = 0; c < ctl_mgr->nctl; c++)
470 if (!ctl_mgr->ctls[c].busy)
471 break;
473 if (unlikely(c >= ctl_mgr->nctl)) {
474 dev_err(ctl_mgr->dev->dev, "No more CTL available!");
475 goto unlock;
478 ctl = &ctl_mgr->ctls[c];
480 ctl->lm = mdp5_crtc_get_lm(crtc);
481 ctl->crtc = crtc;
482 ctl->busy = true;
483 ctl->pending_ctl_trigger = 0;
484 DBG("CTL %d allocated", ctl->id);
486 unlock:
487 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
488 return ctl;
491 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
493 unsigned long flags;
494 int c;
496 for (c = 0; c < ctl_mgr->nctl; c++) {
497 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
499 spin_lock_irqsave(&ctl->hw_lock, flags);
500 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
501 spin_unlock_irqrestore(&ctl->hw_lock, flags);
505 void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
507 kfree(ctl_mgr);
510 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
511 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
513 struct mdp5_ctl_manager *ctl_mgr;
514 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
515 unsigned long flags;
516 int c, ret;
518 ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
519 if (!ctl_mgr) {
520 dev_err(dev->dev, "failed to allocate CTL manager\n");
521 ret = -ENOMEM;
522 goto fail;
525 if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
526 dev_err(dev->dev, "Increase static pool size to at least %d\n",
527 ctl_cfg->count);
528 ret = -ENOSPC;
529 goto fail;
532 /* initialize the CTL manager: */
533 ctl_mgr->dev = dev;
534 ctl_mgr->nlm = hw_cfg->lm.count;
535 ctl_mgr->nctl = ctl_cfg->count;
536 ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
537 spin_lock_init(&ctl_mgr->pool_lock);
539 /* initialize each CTL of the pool: */
540 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
541 for (c = 0; c < ctl_mgr->nctl; c++) {
542 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
544 if (WARN_ON(!ctl_cfg->base[c])) {
545 dev_err(dev->dev, "CTL_%d: base is null!\n", c);
546 ret = -EINVAL;
547 goto fail;
549 ctl->ctlm = ctl_mgr;
550 ctl->id = c;
551 ctl->reg_offset = ctl_cfg->base[c];
552 ctl->busy = false;
553 spin_lock_init(&ctl->hw_lock);
555 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
556 DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
558 return ctl_mgr;
560 fail:
561 if (ctl_mgr)
562 mdp5_ctlm_destroy(ctl_mgr);
564 return ERR_PTR(ret);