Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / drivers / gpu / drm / gma500 / gma_display.c
blobf3c48a2be71b87088e81becf739786bd7dc94755
1 /*
2 * Copyright © 2006-2011 Intel Corporation
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * Authors:
18 * Eric Anholt <eric@anholt.net>
19 * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
22 #include <drm/drmP.h>
23 #include "gma_display.h"
24 #include "psb_intel_drv.h"
25 #include "psb_intel_reg.h"
26 #include "psb_drv.h"
27 #include "framebuffer.h"
29 /**
30 * Returns whether any output on the specified pipe is of the specified type
32 bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
34 struct drm_device *dev = crtc->dev;
35 struct drm_mode_config *mode_config = &dev->mode_config;
36 struct drm_connector *l_entry;
38 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
39 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
40 struct gma_encoder *gma_encoder =
41 gma_attached_encoder(l_entry);
42 if (gma_encoder->type == type)
43 return true;
47 return false;
50 void gma_wait_for_vblank(struct drm_device *dev)
52 /* Wait for 20ms, i.e. one cycle at 50hz. */
53 mdelay(20);
56 int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
57 struct drm_framebuffer *old_fb)
59 struct drm_device *dev = crtc->dev;
60 struct drm_psb_private *dev_priv = dev->dev_private;
61 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
62 struct drm_framebuffer *fb = crtc->primary->fb;
63 struct psb_framebuffer *psbfb = to_psb_fb(fb);
64 int pipe = gma_crtc->pipe;
65 const struct psb_offset *map = &dev_priv->regmap[pipe];
66 unsigned long start, offset;
67 u32 dspcntr;
68 int ret = 0;
70 if (!gma_power_begin(dev, true))
71 return 0;
73 /* no fb bound */
74 if (!fb) {
75 dev_err(dev->dev, "No FB bound\n");
76 goto gma_pipe_cleaner;
79 /* We are displaying this buffer, make sure it is actually loaded
80 into the GTT */
81 ret = psb_gtt_pin(psbfb->gtt);
82 if (ret < 0)
83 goto gma_pipe_set_base_exit;
84 start = psbfb->gtt->offset;
85 offset = y * fb->pitches[0] + x * fb->format->cpp[0];
87 REG_WRITE(map->stride, fb->pitches[0]);
89 dspcntr = REG_READ(map->cntr);
90 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
92 switch (fb->format->cpp[0] * 8) {
93 case 8:
94 dspcntr |= DISPPLANE_8BPP;
95 break;
96 case 16:
97 if (fb->format->depth == 15)
98 dspcntr |= DISPPLANE_15_16BPP;
99 else
100 dspcntr |= DISPPLANE_16BPP;
101 break;
102 case 24:
103 case 32:
104 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
105 break;
106 default:
107 dev_err(dev->dev, "Unknown color depth\n");
108 ret = -EINVAL;
109 goto gma_pipe_set_base_exit;
111 REG_WRITE(map->cntr, dspcntr);
113 dev_dbg(dev->dev,
114 "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
116 /* FIXME: Investigate whether this really is the base for psb and why
117 the linear offset is named base for the other chips. map->surf
118 should be the base and map->linoff the offset for all chips */
119 if (IS_PSB(dev)) {
120 REG_WRITE(map->base, offset + start);
121 REG_READ(map->base);
122 } else {
123 REG_WRITE(map->base, offset);
124 REG_READ(map->base);
125 REG_WRITE(map->surf, start);
126 REG_READ(map->surf);
129 gma_pipe_cleaner:
130 /* If there was a previous display we can now unpin it */
131 if (old_fb)
132 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
134 gma_pipe_set_base_exit:
135 gma_power_end(dev);
136 return ret;
139 /* Loads the palette/gamma unit for the CRTC with the prepared values */
140 void gma_crtc_load_lut(struct drm_crtc *crtc)
142 struct drm_device *dev = crtc->dev;
143 struct drm_psb_private *dev_priv = dev->dev_private;
144 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
145 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
146 int palreg = map->palette;
147 u16 *r, *g, *b;
148 int i;
150 /* The clocks have to be on to load the palette. */
151 if (!crtc->enabled)
152 return;
154 r = crtc->gamma_store;
155 g = r + crtc->gamma_size;
156 b = g + crtc->gamma_size;
158 if (gma_power_begin(dev, false)) {
159 for (i = 0; i < 256; i++) {
160 REG_WRITE(palreg + 4 * i,
161 (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
162 (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
163 ((*b++ >> 8) + gma_crtc->lut_adj[i]));
165 gma_power_end(dev);
166 } else {
167 for (i = 0; i < 256; i++) {
168 /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
169 dev_priv->regs.pipe[0].palette[i] =
170 (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
171 (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
172 ((*b++ >> 8) + gma_crtc->lut_adj[i]);
178 int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
179 u32 size,
180 struct drm_modeset_acquire_ctx *ctx)
182 gma_crtc_load_lut(crtc);
184 return 0;
188 * Sets the power management mode of the pipe and plane.
190 * This code should probably grow support for turning the cursor off and back
191 * on appropriately at the same time as we're turning the pipe off/on.
193 void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
195 struct drm_device *dev = crtc->dev;
196 struct drm_psb_private *dev_priv = dev->dev_private;
197 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
198 int pipe = gma_crtc->pipe;
199 const struct psb_offset *map = &dev_priv->regmap[pipe];
200 u32 temp;
202 /* XXX: When our outputs are all unaware of DPMS modes other than off
203 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
206 if (IS_CDV(dev))
207 dev_priv->ops->disable_sr(dev);
209 switch (mode) {
210 case DRM_MODE_DPMS_ON:
211 case DRM_MODE_DPMS_STANDBY:
212 case DRM_MODE_DPMS_SUSPEND:
213 if (gma_crtc->active)
214 break;
216 gma_crtc->active = true;
218 /* Enable the DPLL */
219 temp = REG_READ(map->dpll);
220 if ((temp & DPLL_VCO_ENABLE) == 0) {
221 REG_WRITE(map->dpll, temp);
222 REG_READ(map->dpll);
223 /* Wait for the clocks to stabilize. */
224 udelay(150);
225 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
226 REG_READ(map->dpll);
227 /* Wait for the clocks to stabilize. */
228 udelay(150);
229 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
230 REG_READ(map->dpll);
231 /* Wait for the clocks to stabilize. */
232 udelay(150);
235 /* Enable the plane */
236 temp = REG_READ(map->cntr);
237 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
238 REG_WRITE(map->cntr,
239 temp | DISPLAY_PLANE_ENABLE);
240 /* Flush the plane changes */
241 REG_WRITE(map->base, REG_READ(map->base));
244 udelay(150);
246 /* Enable the pipe */
247 temp = REG_READ(map->conf);
248 if ((temp & PIPEACONF_ENABLE) == 0)
249 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
251 temp = REG_READ(map->status);
252 temp &= ~(0xFFFF);
253 temp |= PIPE_FIFO_UNDERRUN;
254 REG_WRITE(map->status, temp);
255 REG_READ(map->status);
257 gma_crtc_load_lut(crtc);
259 /* Give the overlay scaler a chance to enable
260 * if it's on this pipe */
261 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
262 break;
263 case DRM_MODE_DPMS_OFF:
264 if (!gma_crtc->active)
265 break;
267 gma_crtc->active = false;
269 /* Give the overlay scaler a chance to disable
270 * if it's on this pipe */
271 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
273 /* Disable the VGA plane that we never use */
274 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
276 /* Turn off vblank interrupts */
277 drm_crtc_vblank_off(crtc);
279 /* Wait for vblank for the disable to take effect */
280 gma_wait_for_vblank(dev);
282 /* Disable plane */
283 temp = REG_READ(map->cntr);
284 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
285 REG_WRITE(map->cntr,
286 temp & ~DISPLAY_PLANE_ENABLE);
287 /* Flush the plane changes */
288 REG_WRITE(map->base, REG_READ(map->base));
289 REG_READ(map->base);
292 /* Disable pipe */
293 temp = REG_READ(map->conf);
294 if ((temp & PIPEACONF_ENABLE) != 0) {
295 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
296 REG_READ(map->conf);
299 /* Wait for vblank for the disable to take effect. */
300 gma_wait_for_vblank(dev);
302 udelay(150);
304 /* Disable DPLL */
305 temp = REG_READ(map->dpll);
306 if ((temp & DPLL_VCO_ENABLE) != 0) {
307 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
308 REG_READ(map->dpll);
311 /* Wait for the clocks to turn off. */
312 udelay(150);
313 break;
316 if (IS_CDV(dev))
317 dev_priv->ops->update_wm(dev, crtc);
319 /* Set FIFO watermarks */
320 REG_WRITE(DSPARB, 0x3F3E);
323 int gma_crtc_cursor_set(struct drm_crtc *crtc,
324 struct drm_file *file_priv,
325 uint32_t handle,
326 uint32_t width, uint32_t height)
328 struct drm_device *dev = crtc->dev;
329 struct drm_psb_private *dev_priv = dev->dev_private;
330 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
331 int pipe = gma_crtc->pipe;
332 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
333 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
334 uint32_t temp;
335 size_t addr = 0;
336 struct gtt_range *gt;
337 struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
338 struct drm_gem_object *obj;
339 void *tmp_dst, *tmp_src;
340 int ret = 0, i, cursor_pages;
342 /* If we didn't get a handle then turn the cursor off */
343 if (!handle) {
344 temp = CURSOR_MODE_DISABLE;
345 if (gma_power_begin(dev, false)) {
346 REG_WRITE(control, temp);
347 REG_WRITE(base, 0);
348 gma_power_end(dev);
351 /* Unpin the old GEM object */
352 if (gma_crtc->cursor_obj) {
353 gt = container_of(gma_crtc->cursor_obj,
354 struct gtt_range, gem);
355 psb_gtt_unpin(gt);
356 drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
357 gma_crtc->cursor_obj = NULL;
359 return 0;
362 /* Currently we only support 64x64 cursors */
363 if (width != 64 || height != 64) {
364 dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
365 return -EINVAL;
368 obj = drm_gem_object_lookup(file_priv, handle);
369 if (!obj) {
370 ret = -ENOENT;
371 goto unlock;
374 if (obj->size < width * height * 4) {
375 dev_dbg(dev->dev, "Buffer is too small\n");
376 ret = -ENOMEM;
377 goto unref_cursor;
380 gt = container_of(obj, struct gtt_range, gem);
382 /* Pin the memory into the GTT */
383 ret = psb_gtt_pin(gt);
384 if (ret) {
385 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
386 goto unref_cursor;
389 if (dev_priv->ops->cursor_needs_phys) {
390 if (cursor_gt == NULL) {
391 dev_err(dev->dev, "No hardware cursor mem available");
392 ret = -ENOMEM;
393 goto unref_cursor;
396 /* Prevent overflow */
397 if (gt->npage > 4)
398 cursor_pages = 4;
399 else
400 cursor_pages = gt->npage;
402 /* Copy the cursor to cursor mem */
403 tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
404 for (i = 0; i < cursor_pages; i++) {
405 tmp_src = kmap(gt->pages[i]);
406 memcpy(tmp_dst, tmp_src, PAGE_SIZE);
407 kunmap(gt->pages[i]);
408 tmp_dst += PAGE_SIZE;
411 addr = gma_crtc->cursor_addr;
412 } else {
413 addr = gt->offset;
414 gma_crtc->cursor_addr = addr;
417 temp = 0;
418 /* set the pipe for the cursor */
419 temp |= (pipe << 28);
420 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
422 if (gma_power_begin(dev, false)) {
423 REG_WRITE(control, temp);
424 REG_WRITE(base, addr);
425 gma_power_end(dev);
428 /* unpin the old bo */
429 if (gma_crtc->cursor_obj) {
430 gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
431 psb_gtt_unpin(gt);
432 drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
435 gma_crtc->cursor_obj = obj;
436 unlock:
437 return ret;
439 unref_cursor:
440 drm_gem_object_unreference_unlocked(obj);
441 return ret;
444 int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
446 struct drm_device *dev = crtc->dev;
447 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
448 int pipe = gma_crtc->pipe;
449 uint32_t temp = 0;
450 uint32_t addr;
452 if (x < 0) {
453 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
454 x = -x;
456 if (y < 0) {
457 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
458 y = -y;
461 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
462 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
464 addr = gma_crtc->cursor_addr;
466 if (gma_power_begin(dev, false)) {
467 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
468 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
469 gma_power_end(dev);
471 return 0;
474 void gma_crtc_prepare(struct drm_crtc *crtc)
476 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
477 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
480 void gma_crtc_commit(struct drm_crtc *crtc)
482 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
483 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
486 void gma_crtc_disable(struct drm_crtc *crtc)
488 struct gtt_range *gt;
489 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
491 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
493 if (crtc->primary->fb) {
494 gt = to_psb_fb(crtc->primary->fb)->gtt;
495 psb_gtt_unpin(gt);
499 void gma_crtc_destroy(struct drm_crtc *crtc)
501 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
503 kfree(gma_crtc->crtc_state);
504 drm_crtc_cleanup(crtc);
505 kfree(gma_crtc);
508 int gma_crtc_set_config(struct drm_mode_set *set,
509 struct drm_modeset_acquire_ctx *ctx)
511 struct drm_device *dev = set->crtc->dev;
512 struct drm_psb_private *dev_priv = dev->dev_private;
513 int ret;
515 if (!dev_priv->rpm_enabled)
516 return drm_crtc_helper_set_config(set, ctx);
518 pm_runtime_forbid(&dev->pdev->dev);
519 ret = drm_crtc_helper_set_config(set, ctx);
520 pm_runtime_allow(&dev->pdev->dev);
522 return ret;
526 * Save HW states of given crtc
528 void gma_crtc_save(struct drm_crtc *crtc)
530 struct drm_device *dev = crtc->dev;
531 struct drm_psb_private *dev_priv = dev->dev_private;
532 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
533 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
534 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
535 uint32_t palette_reg;
536 int i;
538 if (!crtc_state) {
539 dev_err(dev->dev, "No CRTC state found\n");
540 return;
543 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
544 crtc_state->savePIPECONF = REG_READ(map->conf);
545 crtc_state->savePIPESRC = REG_READ(map->src);
546 crtc_state->saveFP0 = REG_READ(map->fp0);
547 crtc_state->saveFP1 = REG_READ(map->fp1);
548 crtc_state->saveDPLL = REG_READ(map->dpll);
549 crtc_state->saveHTOTAL = REG_READ(map->htotal);
550 crtc_state->saveHBLANK = REG_READ(map->hblank);
551 crtc_state->saveHSYNC = REG_READ(map->hsync);
552 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
553 crtc_state->saveVBLANK = REG_READ(map->vblank);
554 crtc_state->saveVSYNC = REG_READ(map->vsync);
555 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
557 /* NOTE: DSPSIZE DSPPOS only for psb */
558 crtc_state->saveDSPSIZE = REG_READ(map->size);
559 crtc_state->saveDSPPOS = REG_READ(map->pos);
561 crtc_state->saveDSPBASE = REG_READ(map->base);
563 palette_reg = map->palette;
564 for (i = 0; i < 256; ++i)
565 crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
569 * Restore HW states of given crtc
571 void gma_crtc_restore(struct drm_crtc *crtc)
573 struct drm_device *dev = crtc->dev;
574 struct drm_psb_private *dev_priv = dev->dev_private;
575 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
576 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
577 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
578 uint32_t palette_reg;
579 int i;
581 if (!crtc_state) {
582 dev_err(dev->dev, "No crtc state\n");
583 return;
586 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
587 REG_WRITE(map->dpll,
588 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
589 REG_READ(map->dpll);
590 udelay(150);
593 REG_WRITE(map->fp0, crtc_state->saveFP0);
594 REG_READ(map->fp0);
596 REG_WRITE(map->fp1, crtc_state->saveFP1);
597 REG_READ(map->fp1);
599 REG_WRITE(map->dpll, crtc_state->saveDPLL);
600 REG_READ(map->dpll);
601 udelay(150);
603 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
604 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
605 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
606 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
607 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
608 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
609 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
611 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
612 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
614 REG_WRITE(map->src, crtc_state->savePIPESRC);
615 REG_WRITE(map->base, crtc_state->saveDSPBASE);
616 REG_WRITE(map->conf, crtc_state->savePIPECONF);
618 gma_wait_for_vblank(dev);
620 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
621 REG_WRITE(map->base, crtc_state->saveDSPBASE);
623 gma_wait_for_vblank(dev);
625 palette_reg = map->palette;
626 for (i = 0; i < 256; ++i)
627 REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
630 void gma_encoder_prepare(struct drm_encoder *encoder)
632 const struct drm_encoder_helper_funcs *encoder_funcs =
633 encoder->helper_private;
634 /* lvds has its own version of prepare see psb_intel_lvds_prepare */
635 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
638 void gma_encoder_commit(struct drm_encoder *encoder)
640 const struct drm_encoder_helper_funcs *encoder_funcs =
641 encoder->helper_private;
642 /* lvds has its own version of commit see psb_intel_lvds_commit */
643 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
646 void gma_encoder_destroy(struct drm_encoder *encoder)
648 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
650 drm_encoder_cleanup(encoder);
651 kfree(intel_encoder);
654 /* Currently there is only a 1:1 mapping of encoders and connectors */
655 struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
657 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
659 return &gma_encoder->base;
662 void gma_connector_attach_encoder(struct gma_connector *connector,
663 struct gma_encoder *encoder)
665 connector->encoder = encoder;
666 drm_mode_connector_attach_encoder(&connector->base,
667 &encoder->base);
670 #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
672 bool gma_pll_is_valid(struct drm_crtc *crtc,
673 const struct gma_limit_t *limit,
674 struct gma_clock_t *clock)
676 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
677 GMA_PLL_INVALID("p1 out of range");
678 if (clock->p < limit->p.min || limit->p.max < clock->p)
679 GMA_PLL_INVALID("p out of range");
680 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
681 GMA_PLL_INVALID("m2 out of range");
682 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
683 GMA_PLL_INVALID("m1 out of range");
684 /* On CDV m1 is always 0 */
685 if (clock->m1 <= clock->m2 && clock->m1 != 0)
686 GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
687 if (clock->m < limit->m.min || limit->m.max < clock->m)
688 GMA_PLL_INVALID("m out of range");
689 if (clock->n < limit->n.min || limit->n.max < clock->n)
690 GMA_PLL_INVALID("n out of range");
691 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
692 GMA_PLL_INVALID("vco out of range");
693 /* XXX: We may need to be checking "Dot clock"
694 * depending on the multiplier, connector, etc.,
695 * rather than just a single range.
697 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
698 GMA_PLL_INVALID("dot out of range");
700 return true;
703 bool gma_find_best_pll(const struct gma_limit_t *limit,
704 struct drm_crtc *crtc, int target, int refclk,
705 struct gma_clock_t *best_clock)
707 struct drm_device *dev = crtc->dev;
708 const struct gma_clock_funcs *clock_funcs =
709 to_gma_crtc(crtc)->clock_funcs;
710 struct gma_clock_t clock;
711 int err = target;
713 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
714 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
716 * For LVDS, if the panel is on, just rely on its current
717 * settings for dual-channel. We haven't figured out how to
718 * reliably set up different single/dual channel state, if we
719 * even can.
721 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
722 LVDS_CLKB_POWER_UP)
723 clock.p2 = limit->p2.p2_fast;
724 else
725 clock.p2 = limit->p2.p2_slow;
726 } else {
727 if (target < limit->p2.dot_limit)
728 clock.p2 = limit->p2.p2_slow;
729 else
730 clock.p2 = limit->p2.p2_fast;
733 memset(best_clock, 0, sizeof(*best_clock));
735 /* m1 is always 0 on CDV so the outmost loop will run just once */
736 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
737 for (clock.m2 = limit->m2.min;
738 (clock.m2 < clock.m1 || clock.m1 == 0) &&
739 clock.m2 <= limit->m2.max; clock.m2++) {
740 for (clock.n = limit->n.min;
741 clock.n <= limit->n.max; clock.n++) {
742 for (clock.p1 = limit->p1.min;
743 clock.p1 <= limit->p1.max;
744 clock.p1++) {
745 int this_err;
747 clock_funcs->clock(refclk, &clock);
749 if (!clock_funcs->pll_is_valid(crtc,
750 limit, &clock))
751 continue;
753 this_err = abs(clock.dot - target);
754 if (this_err < err) {
755 *best_clock = clock;
756 err = this_err;
763 return err != target;