2 * Copyright 1993-2003 NVIDIA, Corporation
3 * Copyright 2007-2009 Stuart Bennett
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
19 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
20 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "nouveau_drm.h"
27 #include "nouveau_reg.h"
30 /****************************************************************************\
32 * The video arbitration routines calculate some "magic" numbers. Fixes *
33 * the snow seen when accessing the framebuffer without it. *
34 * It just works (I hope). *
36 \****************************************************************************/
56 nv04_calc_arb(struct nv_fifo_info
*fifo
, struct nv_sim_state
*arb
)
58 int pagemiss
, cas
, width
, bpp
;
59 int nvclks
, mclks
, pclks
, crtpagemiss
;
60 int found
, mclk_extra
, mclk_loop
, cbs
, m1
, p1
;
61 int mclk_freq
, pclk_freq
, nvclk_freq
;
62 int us_m
, us_n
, us_p
, crtc_drain_rate
;
63 int cpm_us
, us_crt
, clwm
;
65 pclk_freq
= arb
->pclk_khz
;
66 mclk_freq
= arb
->mclk_khz
;
67 nvclk_freq
= arb
->nvclk_khz
;
68 pagemiss
= arb
->mem_page_miss
;
69 cas
= arb
->mem_latency
;
70 width
= arb
->memory_width
>> 6;
83 mclk_loop
= mclks
+ mclk_extra
;
84 us_m
= mclk_loop
* 1000 * 1000 / mclk_freq
;
85 us_n
= nvclks
* 1000 * 1000 / nvclk_freq
;
86 us_p
= nvclks
* 1000 * 1000 / pclk_freq
;
88 crtc_drain_rate
= pclk_freq
* bpp
/ 8;
91 cpm_us
= crtpagemiss
* pagemiss
* 1000 * 1000 / mclk_freq
;
92 us_crt
= cpm_us
+ us_m
+ us_n
+ us_p
;
93 clwm
= us_crt
* crtc_drain_rate
/ (1000 * 1000);
96 m1
= clwm
+ cbs
- 512;
97 p1
= m1
* pclk_freq
/ mclk_freq
;
99 if ((p1
< m1
&& m1
> 0) || clwm
> 519) {
112 nv10_calc_arb(struct nv_fifo_info
*fifo
, struct nv_sim_state
*arb
)
114 int fill_rate
, drain_rate
;
115 int pclks
, nvclks
, mclks
, xclks
;
116 int pclk_freq
, nvclk_freq
, mclk_freq
;
117 int fill_lat
, extra_lat
;
118 int max_burst_o
, max_burst_l
;
119 int fifo_len
, min_lwm
, max_lwm
;
120 const int burst_lat
= 80; /* Maximum allowable latency due
121 * to the CRTC FIFO burst. (ns) */
123 pclk_freq
= arb
->pclk_khz
;
124 nvclk_freq
= arb
->nvclk_khz
;
125 mclk_freq
= arb
->mclk_khz
;
127 fill_rate
= mclk_freq
* arb
->memory_width
/ 8; /* kB/s */
128 drain_rate
= pclk_freq
* arb
->bpp
/ 8; /* kB/s */
130 fifo_len
= arb
->two_heads
? 1536 : 1024; /* B */
132 /* Fixed FIFO refill latency. */
134 pclks
= 4; /* lwm detect. */
136 nvclks
= 3 /* lwm -> sync. */
137 + 2 /* fbi bus cycles (1 req + 1 busy) */
138 + 1 /* 2 edge sync. may be very close to edge so
140 + 1 /* fbi_d_rdv_n */
141 + 1 /* Fbi_d_rdata */
142 + 1; /* crtfifo load */
144 mclks
= 1 /* 2 edge sync. may be very close to edge so
147 + 5 /* tiling pipeline */
148 + 2 /* latency fifo */
149 + 2 /* memory request to fbio block */
150 + 7; /* data returned from fbio block */
152 /* Need to accumulate 256 bits for read */
153 mclks
+= (arb
->memory_type
== 0 ? 2 : 1)
154 * arb
->memory_width
/ 32;
156 fill_lat
= mclks
* 1000 * 1000 / mclk_freq
/* minimum mclk latency */
157 + nvclks
* 1000 * 1000 / nvclk_freq
/* nvclk latency */
158 + pclks
* 1000 * 1000 / pclk_freq
; /* pclk latency */
160 /* Conditional FIFO refill latency. */
162 xclks
= 2 * arb
->mem_page_miss
+ mclks
/* Extra latency due to
164 + 2 * arb
->mem_page_miss
/* Extra pagemiss latency. */
165 + (arb
->bpp
== 32 ? 8 : 4); /* Margin of error. */
167 extra_lat
= xclks
* 1000 * 1000 / mclk_freq
;
170 /* Account for another CRTC. */
171 extra_lat
+= fill_lat
+ extra_lat
+ burst_lat
;
175 /* Max burst not leading to overflows. */
176 max_burst_o
= (1 + fifo_len
- extra_lat
* drain_rate
/ (1000 * 1000))
177 * (fill_rate
/ 1000) / ((fill_rate
- drain_rate
) / 1000);
178 fifo
->burst
= min(max_burst_o
, 1024);
180 /* Max burst value with an acceptable latency. */
181 max_burst_l
= burst_lat
* fill_rate
/ (1000 * 1000);
182 fifo
->burst
= min(max_burst_l
, fifo
->burst
);
184 fifo
->burst
= rounddown_pow_of_two(fifo
->burst
);
186 /* FIFO low watermark */
188 min_lwm
= (fill_lat
+ extra_lat
) * drain_rate
/ (1000 * 1000) + 1;
189 max_lwm
= fifo_len
- fifo
->burst
190 + fill_lat
* drain_rate
/ (1000 * 1000)
191 + fifo
->burst
* drain_rate
/ fill_rate
;
193 fifo
->lwm
= min_lwm
+ 10 * (max_lwm
- min_lwm
) / 100; /* Empirical. */
197 nv04_update_arb(struct drm_device
*dev
, int VClk
, int bpp
,
198 int *burst
, int *lwm
)
200 struct nouveau_drm
*drm
= nouveau_drm(dev
);
201 struct nvif_device
*device
= &nouveau_drm(dev
)->device
;
202 struct nv_fifo_info fifo_data
;
203 struct nv_sim_state sim_data
;
204 int MClk
= nouveau_hw_get_clock(dev
, PLL_MEMORY
);
205 int NVClk
= nouveau_hw_get_clock(dev
, PLL_CORE
);
206 uint32_t cfg1
= nvif_rd32(device
, NV04_PFB_CFG1
);
208 sim_data
.pclk_khz
= VClk
;
209 sim_data
.mclk_khz
= MClk
;
210 sim_data
.nvclk_khz
= NVClk
;
212 sim_data
.two_heads
= nv_two_heads(dev
);
213 if ((dev
->pdev
->device
& 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
214 (dev
->pdev
->device
& 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
217 pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type
);
219 sim_data
.memory_type
= (type
>> 12) & 1;
220 sim_data
.memory_width
= 64;
221 sim_data
.mem_latency
= 3;
222 sim_data
.mem_page_miss
= 10;
224 sim_data
.memory_type
= nvif_rd32(device
, NV04_PFB_CFG0
) & 0x1;
225 sim_data
.memory_width
= (nvif_rd32(device
, NV_PEXTDEV_BOOT_0
) & 0x10) ? 128 : 64;
226 sim_data
.mem_latency
= cfg1
& 0xf;
227 sim_data
.mem_page_miss
= ((cfg1
>> 4) & 0xf) + ((cfg1
>> 31) & 0x1);
230 if (drm
->device
.info
.family
== NV_DEVICE_INFO_V0_TNT
)
231 nv04_calc_arb(&fifo_data
, &sim_data
);
233 nv10_calc_arb(&fifo_data
, &sim_data
);
235 *burst
= ilog2(fifo_data
.burst
>> 4);
236 *lwm
= fifo_data
.lwm
>> 3;
240 nv20_update_arb(int *burst
, int *lwm
)
242 unsigned int fifo_size
, burst_size
, graphics_lwm
;
246 graphics_lwm
= fifo_size
- burst_size
;
248 *burst
= ilog2(burst_size
>> 5);
249 *lwm
= graphics_lwm
>> 3;
253 nouveau_calc_arb(struct drm_device
*dev
, int vclk
, int bpp
, int *burst
, int *lwm
)
255 struct nouveau_drm
*drm
= nouveau_drm(dev
);
257 if (drm
->device
.info
.family
< NV_DEVICE_INFO_V0_KELVIN
)
258 nv04_update_arb(dev
, vclk
, bpp
, burst
, lwm
);
259 else if ((dev
->pdev
->device
& 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
260 (dev
->pdev
->device
& 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
264 nv20_update_arb(burst
, lwm
);