1 /* savage_bci.c -- BCI support for Savage
3 * Copyright 2004 Felix Kuehling
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "savage_drm.h"
27 #include "savage_drv.h"
29 /* Need a long timeout for shadow status updates can take a while
30 * and so can waiting for events when the queue is full. */
31 #define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
32 #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
33 #define SAVAGE_FREELIST_DEBUG 0
35 static int savage_do_cleanup_bci(drm_device_t
*dev
);
38 savage_bci_wait_fifo_shadow(drm_savage_private_t
* dev_priv
, unsigned int n
)
40 uint32_t mask
= dev_priv
->status_used_mask
;
41 uint32_t threshold
= dev_priv
->bci_threshold_hi
;
46 if (n
> dev_priv
->cob_size
+ SAVAGE_BCI_FIFO_SIZE
- threshold
)
47 DRM_ERROR("Trying to emit %d words "
48 "(more than guaranteed space in COB)\n", n
);
51 for (i
= 0; i
< SAVAGE_DEFAULT_USEC_TIMEOUT
; i
++) {
53 status
= dev_priv
->status_ptr
[0];
54 if ((status
& mask
) < threshold
)
60 DRM_ERROR("failed!\n");
61 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status
, threshold
);
63 return DRM_ERR(EBUSY
);
67 savage_bci_wait_fifo_s3d(drm_savage_private_t
* dev_priv
, unsigned int n
)
69 uint32_t maxUsed
= dev_priv
->cob_size
+ SAVAGE_BCI_FIFO_SIZE
- n
;
73 for (i
= 0; i
< SAVAGE_DEFAULT_USEC_TIMEOUT
; i
++) {
74 status
= SAVAGE_READ(SAVAGE_STATUS_WORD0
);
75 if ((status
& SAVAGE_FIFO_USED_MASK_S3D
) <= maxUsed
)
81 DRM_ERROR("failed!\n");
82 DRM_INFO(" status=0x%08x\n", status
);
84 return DRM_ERR(EBUSY
);
88 savage_bci_wait_fifo_s4(drm_savage_private_t
* dev_priv
, unsigned int n
)
90 uint32_t maxUsed
= dev_priv
->cob_size
+ SAVAGE_BCI_FIFO_SIZE
- n
;
94 for (i
= 0; i
< SAVAGE_DEFAULT_USEC_TIMEOUT
; i
++) {
95 status
= SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0
);
96 if ((status
& SAVAGE_FIFO_USED_MASK_S4
) <= maxUsed
)
102 DRM_ERROR("failed!\n");
103 DRM_INFO(" status=0x%08x\n", status
);
105 return DRM_ERR(EBUSY
);
109 * Waiting for events.
111 * The BIOSresets the event tag to 0 on mode changes. Therefore we
112 * never emit 0 to the event tag. If we find a 0 event tag we know the
113 * BIOS stomped on it and return success assuming that the BIOS waited
116 * Note: if the Xserver uses the event tag it has to follow the same
117 * rule. Otherwise there may be glitches every 2^16 events.
120 savage_bci_wait_event_shadow(drm_savage_private_t
* dev_priv
, uint16_t e
)
125 for (i
= 0; i
< SAVAGE_EVENT_USEC_TIMEOUT
; i
++) {
127 status
= dev_priv
->status_ptr
[1];
128 if ((((status
& 0xffff) - e
) & 0xffff) <= 0x7fff ||
129 (status
& 0xffff) == 0)
135 DRM_ERROR("failed!\n");
136 DRM_INFO(" status=0x%08x, e=0x%04x\n", status
, e
);
139 return DRM_ERR(EBUSY
);
143 savage_bci_wait_event_reg(drm_savage_private_t
* dev_priv
, uint16_t e
)
148 for (i
= 0; i
< SAVAGE_EVENT_USEC_TIMEOUT
; i
++) {
149 status
= SAVAGE_READ(SAVAGE_STATUS_WORD1
);
150 if ((((status
& 0xffff) - e
) & 0xffff) <= 0x7fff ||
151 (status
& 0xffff) == 0)
157 DRM_ERROR("failed!\n");
158 DRM_INFO(" status=0x%08x, e=0x%04x\n", status
, e
);
161 return DRM_ERR(EBUSY
);
164 uint16_t savage_bci_emit_event(drm_savage_private_t
* dev_priv
,
170 if (dev_priv
->status_ptr
) {
171 /* coordinate with Xserver */
172 count
= dev_priv
->status_ptr
[1023];
173 if (count
< dev_priv
->event_counter
)
174 dev_priv
->event_wrap
++;
176 count
= dev_priv
->event_counter
;
178 count
= (count
+ 1) & 0xffff;
180 count
++; /* See the comment above savage_wait_event_*. */
181 dev_priv
->event_wrap
++;
183 dev_priv
->event_counter
= count
;
184 if (dev_priv
->status_ptr
)
185 dev_priv
->status_ptr
[1023] = (uint32_t) count
;
187 if ((flags
& (SAVAGE_WAIT_2D
| SAVAGE_WAIT_3D
))) {
188 unsigned int wait_cmd
= BCI_CMD_WAIT
;
189 if ((flags
& SAVAGE_WAIT_2D
))
190 wait_cmd
|= BCI_CMD_WAIT_2D
;
191 if ((flags
& SAVAGE_WAIT_3D
))
192 wait_cmd
|= BCI_CMD_WAIT_3D
;
198 BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG
| (uint32_t) count
);
204 * Freelist management
206 static int savage_freelist_init(drm_device_t
* dev
)
208 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
209 drm_device_dma_t
*dma
= dev
->dma
;
211 drm_savage_buf_priv_t
*entry
;
213 DRM_DEBUG("count=%d\n", dma
->buf_count
);
215 dev_priv
->head
.next
= &dev_priv
->tail
;
216 dev_priv
->head
.prev
= NULL
;
217 dev_priv
->head
.buf
= NULL
;
219 dev_priv
->tail
.next
= NULL
;
220 dev_priv
->tail
.prev
= &dev_priv
->head
;
221 dev_priv
->tail
.buf
= NULL
;
223 for (i
= 0; i
< dma
->buf_count
; i
++) {
224 buf
= dma
->buflist
[i
];
225 entry
= buf
->dev_private
;
227 SET_AGE(&entry
->age
, 0, 0);
230 entry
->next
= dev_priv
->head
.next
;
231 entry
->prev
= &dev_priv
->head
;
232 dev_priv
->head
.next
->prev
= entry
;
233 dev_priv
->head
.next
= entry
;
239 static drm_buf_t
*savage_freelist_get(drm_device_t
* dev
)
241 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
242 drm_savage_buf_priv_t
*tail
= dev_priv
->tail
.prev
;
247 UPDATE_EVENT_COUNTER();
248 if (dev_priv
->status_ptr
)
249 event
= dev_priv
->status_ptr
[1] & 0xffff;
251 event
= SAVAGE_READ(SAVAGE_STATUS_WORD1
) & 0xffff;
252 wrap
= dev_priv
->event_wrap
;
253 if (event
> dev_priv
->event_counter
)
254 wrap
--; /* hardware hasn't passed the last wrap yet */
256 DRM_DEBUG(" tail=0x%04x %d\n", tail
->age
.event
, tail
->age
.wrap
);
257 DRM_DEBUG(" head=0x%04x %d\n", event
, wrap
);
259 if (tail
->buf
&& (TEST_AGE(&tail
->age
, event
, wrap
) || event
== 0)) {
260 drm_savage_buf_priv_t
*next
= tail
->next
;
261 drm_savage_buf_priv_t
*prev
= tail
->prev
;
264 tail
->next
= tail
->prev
= NULL
;
268 DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail
->buf
);
272 void savage_freelist_put(drm_device_t
* dev
, drm_buf_t
* buf
)
274 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
275 drm_savage_buf_priv_t
*entry
= buf
->dev_private
, *prev
, *next
;
277 DRM_DEBUG("age=0x%04x wrap=%d\n", entry
->age
.event
, entry
->age
.wrap
);
279 if (entry
->next
!= NULL
|| entry
->prev
!= NULL
) {
280 DRM_ERROR("entry already on freelist.\n");
284 prev
= &dev_priv
->head
;
295 static int savage_dma_init(drm_savage_private_t
* dev_priv
)
299 dev_priv
->nr_dma_pages
= dev_priv
->cmd_dma
->size
/
300 (SAVAGE_DMA_PAGE_SIZE
* 4);
301 dev_priv
->dma_pages
= drm_alloc(sizeof(drm_savage_dma_page_t
) *
302 dev_priv
->nr_dma_pages
, DRM_MEM_DRIVER
);
303 if (dev_priv
->dma_pages
== NULL
)
304 return DRM_ERR(ENOMEM
);
306 for (i
= 0; i
< dev_priv
->nr_dma_pages
; ++i
) {
307 SET_AGE(&dev_priv
->dma_pages
[i
].age
, 0, 0);
308 dev_priv
->dma_pages
[i
].used
= 0;
309 dev_priv
->dma_pages
[i
].flushed
= 0;
311 SET_AGE(&dev_priv
->last_dma_age
, 0, 0);
313 dev_priv
->first_dma_page
= 0;
314 dev_priv
->current_dma_page
= 0;
319 void savage_dma_reset(drm_savage_private_t
* dev_priv
)
322 unsigned int wrap
, i
;
323 event
= savage_bci_emit_event(dev_priv
, 0);
324 wrap
= dev_priv
->event_wrap
;
325 for (i
= 0; i
< dev_priv
->nr_dma_pages
; ++i
) {
326 SET_AGE(&dev_priv
->dma_pages
[i
].age
, event
, wrap
);
327 dev_priv
->dma_pages
[i
].used
= 0;
328 dev_priv
->dma_pages
[i
].flushed
= 0;
330 SET_AGE(&dev_priv
->last_dma_age
, event
, wrap
);
331 dev_priv
->first_dma_page
= dev_priv
->current_dma_page
= 0;
334 void savage_dma_wait(drm_savage_private_t
* dev_priv
, unsigned int page
)
339 /* Faked DMA buffer pages don't age. */
340 if (dev_priv
->cmd_dma
== &dev_priv
->fake_dma
)
343 UPDATE_EVENT_COUNTER();
344 if (dev_priv
->status_ptr
)
345 event
= dev_priv
->status_ptr
[1] & 0xffff;
347 event
= SAVAGE_READ(SAVAGE_STATUS_WORD1
) & 0xffff;
348 wrap
= dev_priv
->event_wrap
;
349 if (event
> dev_priv
->event_counter
)
350 wrap
--; /* hardware hasn't passed the last wrap yet */
352 if (dev_priv
->dma_pages
[page
].age
.wrap
> wrap
||
353 (dev_priv
->dma_pages
[page
].age
.wrap
== wrap
&&
354 dev_priv
->dma_pages
[page
].age
.event
> event
)) {
355 if (dev_priv
->wait_evnt(dev_priv
,
356 dev_priv
->dma_pages
[page
].age
.event
)
358 DRM_ERROR("wait_evnt failed!\n");
362 uint32_t *savage_dma_alloc(drm_savage_private_t
* dev_priv
, unsigned int n
)
364 unsigned int cur
= dev_priv
->current_dma_page
;
365 unsigned int rest
= SAVAGE_DMA_PAGE_SIZE
-
366 dev_priv
->dma_pages
[cur
].used
;
367 unsigned int nr_pages
= (n
- rest
+ SAVAGE_DMA_PAGE_SIZE
- 1) /
368 SAVAGE_DMA_PAGE_SIZE
;
372 DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
373 cur
, dev_priv
->dma_pages
[cur
].used
, n
, rest
, nr_pages
);
375 if (cur
+ nr_pages
< dev_priv
->nr_dma_pages
) {
376 dma_ptr
= (uint32_t *) dev_priv
->cmd_dma
->handle
+
377 cur
* SAVAGE_DMA_PAGE_SIZE
+ dev_priv
->dma_pages
[cur
].used
;
380 dev_priv
->dma_pages
[cur
].used
+= rest
;
384 dev_priv
->dma_flush(dev_priv
);
386 (n
+ SAVAGE_DMA_PAGE_SIZE
- 1) / SAVAGE_DMA_PAGE_SIZE
;
387 for (i
= cur
; i
< dev_priv
->nr_dma_pages
; ++i
) {
388 dev_priv
->dma_pages
[i
].age
= dev_priv
->last_dma_age
;
389 dev_priv
->dma_pages
[i
].used
= 0;
390 dev_priv
->dma_pages
[i
].flushed
= 0;
392 dma_ptr
= (uint32_t *) dev_priv
->cmd_dma
->handle
;
393 dev_priv
->first_dma_page
= cur
= 0;
395 for (i
= cur
; nr_pages
> 0; ++i
, --nr_pages
) {
397 if (dev_priv
->dma_pages
[i
].used
) {
398 DRM_ERROR("unflushed page %u: used=%u\n",
399 i
, dev_priv
->dma_pages
[i
].used
);
402 if (n
> SAVAGE_DMA_PAGE_SIZE
)
403 dev_priv
->dma_pages
[i
].used
= SAVAGE_DMA_PAGE_SIZE
;
405 dev_priv
->dma_pages
[i
].used
= n
;
406 n
-= SAVAGE_DMA_PAGE_SIZE
;
408 dev_priv
->current_dma_page
= --i
;
410 DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
411 i
, dev_priv
->dma_pages
[i
].used
, n
);
413 savage_dma_wait(dev_priv
, dev_priv
->current_dma_page
);
418 static void savage_dma_flush(drm_savage_private_t
* dev_priv
)
420 unsigned int first
= dev_priv
->first_dma_page
;
421 unsigned int cur
= dev_priv
->current_dma_page
;
423 unsigned int wrap
, pad
, align
, len
, i
;
424 unsigned long phys_addr
;
428 dev_priv
->dma_pages
[cur
].used
== dev_priv
->dma_pages
[cur
].flushed
)
431 /* pad length to multiples of 2 entries
432 * align start of next DMA block to multiles of 8 entries */
433 pad
= -dev_priv
->dma_pages
[cur
].used
& 1;
434 align
= -(dev_priv
->dma_pages
[cur
].used
+ pad
) & 7;
436 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
437 "pad=%u, align=%u\n",
438 first
, cur
, dev_priv
->dma_pages
[first
].flushed
,
439 dev_priv
->dma_pages
[cur
].used
, pad
, align
);
443 uint32_t *dma_ptr
= (uint32_t *) dev_priv
->cmd_dma
->handle
+
444 cur
* SAVAGE_DMA_PAGE_SIZE
+ dev_priv
->dma_pages
[cur
].used
;
445 dev_priv
->dma_pages
[cur
].used
+= pad
;
447 *dma_ptr
++ = BCI_CMD_WAIT
;
455 phys_addr
= dev_priv
->cmd_dma
->offset
+
456 (first
* SAVAGE_DMA_PAGE_SIZE
+
457 dev_priv
->dma_pages
[first
].flushed
) * 4;
458 len
= (cur
- first
) * SAVAGE_DMA_PAGE_SIZE
+
459 dev_priv
->dma_pages
[cur
].used
- dev_priv
->dma_pages
[first
].flushed
;
461 DRM_DEBUG("phys_addr=%lx, len=%u\n",
462 phys_addr
| dev_priv
->dma_type
, len
);
465 BCI_SET_REGISTERS(SAVAGE_DMABUFADDR
, 1);
466 BCI_WRITE(phys_addr
| dev_priv
->dma_type
);
469 /* fix alignment of the start of the next block */
470 dev_priv
->dma_pages
[cur
].used
+= align
;
473 event
= savage_bci_emit_event(dev_priv
, 0);
474 wrap
= dev_priv
->event_wrap
;
475 for (i
= first
; i
< cur
; ++i
) {
476 SET_AGE(&dev_priv
->dma_pages
[i
].age
, event
, wrap
);
477 dev_priv
->dma_pages
[i
].used
= 0;
478 dev_priv
->dma_pages
[i
].flushed
= 0;
480 /* age the current page only when it's full */
481 if (dev_priv
->dma_pages
[cur
].used
== SAVAGE_DMA_PAGE_SIZE
) {
482 SET_AGE(&dev_priv
->dma_pages
[cur
].age
, event
, wrap
);
483 dev_priv
->dma_pages
[cur
].used
= 0;
484 dev_priv
->dma_pages
[cur
].flushed
= 0;
485 /* advance to next page */
487 if (cur
== dev_priv
->nr_dma_pages
)
489 dev_priv
->first_dma_page
= dev_priv
->current_dma_page
= cur
;
491 dev_priv
->first_dma_page
= cur
;
492 dev_priv
->dma_pages
[cur
].flushed
= dev_priv
->dma_pages
[i
].used
;
494 SET_AGE(&dev_priv
->last_dma_age
, event
, wrap
);
496 DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur
,
497 dev_priv
->dma_pages
[cur
].used
,
498 dev_priv
->dma_pages
[cur
].flushed
);
501 static void savage_fake_dma_flush(drm_savage_private_t
* dev_priv
)
506 if (dev_priv
->first_dma_page
== dev_priv
->current_dma_page
&&
507 dev_priv
->dma_pages
[dev_priv
->current_dma_page
].used
== 0)
510 DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
511 dev_priv
->first_dma_page
, dev_priv
->current_dma_page
,
512 dev_priv
->dma_pages
[dev_priv
->current_dma_page
].used
);
514 for (i
= dev_priv
->first_dma_page
;
515 i
<= dev_priv
->current_dma_page
&& dev_priv
->dma_pages
[i
].used
;
517 uint32_t *dma_ptr
= (uint32_t *) dev_priv
->cmd_dma
->handle
+
518 i
* SAVAGE_DMA_PAGE_SIZE
;
520 /* Sanity check: all pages except the last one must be full. */
521 if (i
< dev_priv
->current_dma_page
&&
522 dev_priv
->dma_pages
[i
].used
!= SAVAGE_DMA_PAGE_SIZE
) {
523 DRM_ERROR("partial DMA page %u: used=%u",
524 i
, dev_priv
->dma_pages
[i
].used
);
527 BEGIN_BCI(dev_priv
->dma_pages
[i
].used
);
528 for (j
= 0; j
< dev_priv
->dma_pages
[i
].used
; ++j
) {
529 BCI_WRITE(dma_ptr
[j
]);
531 dev_priv
->dma_pages
[i
].used
= 0;
534 /* reset to first page */
535 dev_priv
->first_dma_page
= dev_priv
->current_dma_page
= 0;
538 int savage_driver_load(drm_device_t
*dev
, unsigned long chipset
)
540 drm_savage_private_t
*dev_priv
;
542 dev_priv
= drm_alloc(sizeof(drm_savage_private_t
), DRM_MEM_DRIVER
);
543 if (dev_priv
== NULL
)
544 return DRM_ERR(ENOMEM
);
546 memset(dev_priv
, 0, sizeof(drm_savage_private_t
));
547 dev
->dev_private
= (void *)dev_priv
;
549 dev_priv
->chipset
= (enum savage_family
)chipset
;
556 * Initalize mappings. On Savage4 and SavageIX the alignment
557 * and size of the aperture is not suitable for automatic MTRR setup
558 * in drm_addmap. Therefore we add them manually before the maps are
559 * initialized, and tear them down on last close.
561 int savage_driver_firstopen(drm_device_t
*dev
)
563 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
564 unsigned long mmio_base
, fb_base
, fb_size
, aperture_base
;
565 /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
566 * in case we decide we need information on the BAR for BSD in the
569 unsigned int fb_rsrc
, aper_rsrc
;
572 dev_priv
->mtrr
[0].handle
= -1;
573 dev_priv
->mtrr
[1].handle
= -1;
574 dev_priv
->mtrr
[2].handle
= -1;
575 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
577 fb_base
= drm_get_resource_start(dev
, 0);
578 fb_size
= SAVAGE_FB_SIZE_S3
;
579 mmio_base
= fb_base
+ SAVAGE_FB_SIZE_S3
;
581 aperture_base
= fb_base
+ SAVAGE_APERTURE_OFFSET
;
582 /* this should always be true */
583 if (drm_get_resource_len(dev
, 0) == 0x08000000) {
584 /* Don't make MMIO write-cobining! We need 3
586 dev_priv
->mtrr
[0].base
= fb_base
;
587 dev_priv
->mtrr
[0].size
= 0x01000000;
588 dev_priv
->mtrr
[0].handle
=
589 drm_mtrr_add(dev_priv
->mtrr
[0].base
,
590 dev_priv
->mtrr
[0].size
, DRM_MTRR_WC
);
591 dev_priv
->mtrr
[1].base
= fb_base
+ 0x02000000;
592 dev_priv
->mtrr
[1].size
= 0x02000000;
593 dev_priv
->mtrr
[1].handle
=
594 drm_mtrr_add(dev_priv
->mtrr
[1].base
,
595 dev_priv
->mtrr
[1].size
, DRM_MTRR_WC
);
596 dev_priv
->mtrr
[2].base
= fb_base
+ 0x04000000;
597 dev_priv
->mtrr
[2].size
= 0x04000000;
598 dev_priv
->mtrr
[2].handle
=
599 drm_mtrr_add(dev_priv
->mtrr
[2].base
,
600 dev_priv
->mtrr
[2].size
, DRM_MTRR_WC
);
602 DRM_ERROR("strange pci_resource_len %08lx\n",
603 drm_get_resource_len(dev
, 0));
605 } else if (dev_priv
->chipset
!= S3_SUPERSAVAGE
&&
606 dev_priv
->chipset
!= S3_SAVAGE2000
) {
607 mmio_base
= drm_get_resource_start(dev
, 0);
609 fb_base
= drm_get_resource_start(dev
, 1);
610 fb_size
= SAVAGE_FB_SIZE_S4
;
612 aperture_base
= fb_base
+ SAVAGE_APERTURE_OFFSET
;
613 /* this should always be true */
614 if (drm_get_resource_len(dev
, 1) == 0x08000000) {
615 /* Can use one MTRR to cover both fb and
617 dev_priv
->mtrr
[0].base
= fb_base
;
618 dev_priv
->mtrr
[0].size
= 0x08000000;
619 dev_priv
->mtrr
[0].handle
=
620 drm_mtrr_add(dev_priv
->mtrr
[0].base
,
621 dev_priv
->mtrr
[0].size
, DRM_MTRR_WC
);
623 DRM_ERROR("strange pci_resource_len %08lx\n",
624 drm_get_resource_len(dev
, 1));
627 mmio_base
= drm_get_resource_start(dev
, 0);
629 fb_base
= drm_get_resource_start(dev
, 1);
630 fb_size
= drm_get_resource_len(dev
, 1);
632 aperture_base
= drm_get_resource_start(dev
, 2);
633 /* Automatic MTRR setup will do the right thing. */
636 ret
= drm_addmap(dev
, mmio_base
, SAVAGE_MMIO_SIZE
, _DRM_REGISTERS
,
637 _DRM_READ_ONLY
, &dev_priv
->mmio
);
641 ret
= drm_addmap(dev
, fb_base
, fb_size
, _DRM_FRAME_BUFFER
,
642 _DRM_WRITE_COMBINING
, &dev_priv
->fb
);
646 ret
= drm_addmap(dev
, aperture_base
, SAVAGE_APERTURE_SIZE
,
647 _DRM_FRAME_BUFFER
, _DRM_WRITE_COMBINING
,
648 &dev_priv
->aperture
);
656 * Delete MTRRs and free device-private data.
658 void savage_driver_lastclose(drm_device_t
*dev
)
660 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
663 for (i
= 0; i
< 3; ++i
)
664 if (dev_priv
->mtrr
[i
].handle
>= 0)
665 drm_mtrr_del(dev_priv
->mtrr
[i
].handle
,
666 dev_priv
->mtrr
[i
].base
,
667 dev_priv
->mtrr
[i
].size
, DRM_MTRR_WC
);
670 int savage_driver_unload(drm_device_t
*dev
)
672 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
674 drm_free(dev_priv
, sizeof(drm_savage_private_t
), DRM_MEM_DRIVER
);
679 static int savage_do_init_bci(drm_device_t
* dev
, drm_savage_init_t
* init
)
681 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
683 if (init
->fb_bpp
!= 16 && init
->fb_bpp
!= 32) {
684 DRM_ERROR("invalid frame buffer bpp %d!\n", init
->fb_bpp
);
685 return DRM_ERR(EINVAL
);
687 if (init
->depth_bpp
!= 16 && init
->depth_bpp
!= 32) {
688 DRM_ERROR("invalid depth buffer bpp %d!\n", init
->fb_bpp
);
689 return DRM_ERR(EINVAL
);
691 if (init
->dma_type
!= SAVAGE_DMA_AGP
&&
692 init
->dma_type
!= SAVAGE_DMA_PCI
) {
693 DRM_ERROR("invalid dma memory type %d!\n", init
->dma_type
);
694 return DRM_ERR(EINVAL
);
697 dev_priv
->cob_size
= init
->cob_size
;
698 dev_priv
->bci_threshold_lo
= init
->bci_threshold_lo
;
699 dev_priv
->bci_threshold_hi
= init
->bci_threshold_hi
;
700 dev_priv
->dma_type
= init
->dma_type
;
702 dev_priv
->fb_bpp
= init
->fb_bpp
;
703 dev_priv
->front_offset
= init
->front_offset
;
704 dev_priv
->front_pitch
= init
->front_pitch
;
705 dev_priv
->back_offset
= init
->back_offset
;
706 dev_priv
->back_pitch
= init
->back_pitch
;
707 dev_priv
->depth_bpp
= init
->depth_bpp
;
708 dev_priv
->depth_offset
= init
->depth_offset
;
709 dev_priv
->depth_pitch
= init
->depth_pitch
;
711 dev_priv
->texture_offset
= init
->texture_offset
;
712 dev_priv
->texture_size
= init
->texture_size
;
715 if (!dev_priv
->sarea
) {
716 DRM_ERROR("could not find sarea!\n");
717 savage_do_cleanup_bci(dev
);
718 return DRM_ERR(EINVAL
);
720 if (init
->status_offset
!= 0) {
721 dev_priv
->status
= drm_core_findmap(dev
, init
->status_offset
);
722 if (!dev_priv
->status
) {
723 DRM_ERROR("could not find shadow status region!\n");
724 savage_do_cleanup_bci(dev
);
725 return DRM_ERR(EINVAL
);
728 dev_priv
->status
= NULL
;
730 if (dev_priv
->dma_type
== SAVAGE_DMA_AGP
&& init
->buffers_offset
) {
731 dev
->agp_buffer_token
= init
->buffers_offset
;
732 dev
->agp_buffer_map
= drm_core_findmap(dev
,
733 init
->buffers_offset
);
734 if (!dev
->agp_buffer_map
) {
735 DRM_ERROR("could not find DMA buffer region!\n");
736 savage_do_cleanup_bci(dev
);
737 return DRM_ERR(EINVAL
);
739 drm_core_ioremap(dev
->agp_buffer_map
, dev
);
740 if (!dev
->agp_buffer_map
) {
741 DRM_ERROR("failed to ioremap DMA buffer region!\n");
742 savage_do_cleanup_bci(dev
);
743 return DRM_ERR(ENOMEM
);
746 if (init
->agp_textures_offset
) {
747 dev_priv
->agp_textures
=
748 drm_core_findmap(dev
, init
->agp_textures_offset
);
749 if (!dev_priv
->agp_textures
) {
750 DRM_ERROR("could not find agp texture region!\n");
751 savage_do_cleanup_bci(dev
);
752 return DRM_ERR(EINVAL
);
755 dev_priv
->agp_textures
= NULL
;
758 if (init
->cmd_dma_offset
) {
759 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
760 DRM_ERROR("command DMA not supported on "
761 "Savage3D/MX/IX.\n");
762 savage_do_cleanup_bci(dev
);
763 return DRM_ERR(EINVAL
);
765 if (dev
->dma
&& dev
->dma
->buflist
) {
766 DRM_ERROR("command and vertex DMA not supported "
767 "at the same time.\n");
768 savage_do_cleanup_bci(dev
);
769 return DRM_ERR(EINVAL
);
771 dev_priv
->cmd_dma
= drm_core_findmap(dev
, init
->cmd_dma_offset
);
772 if (!dev_priv
->cmd_dma
) {
773 DRM_ERROR("could not find command DMA region!\n");
774 savage_do_cleanup_bci(dev
);
775 return DRM_ERR(EINVAL
);
777 if (dev_priv
->dma_type
== SAVAGE_DMA_AGP
) {
778 if (dev_priv
->cmd_dma
->type
!= _DRM_AGP
) {
779 DRM_ERROR("AGP command DMA region is not a "
781 savage_do_cleanup_bci(dev
);
782 return DRM_ERR(EINVAL
);
784 drm_core_ioremap(dev_priv
->cmd_dma
, dev
);
785 if (!dev_priv
->cmd_dma
->handle
) {
786 DRM_ERROR("failed to ioremap command "
788 savage_do_cleanup_bci(dev
);
789 return DRM_ERR(ENOMEM
);
791 } else if (dev_priv
->cmd_dma
->type
!= _DRM_CONSISTENT
) {
792 DRM_ERROR("PCI command DMA region is not a "
793 "_DRM_CONSISTENT map!\n");
794 savage_do_cleanup_bci(dev
);
795 return DRM_ERR(EINVAL
);
798 dev_priv
->cmd_dma
= NULL
;
801 dev_priv
->dma_flush
= savage_dma_flush
;
802 if (!dev_priv
->cmd_dma
) {
803 DRM_DEBUG("falling back to faked command DMA.\n");
804 dev_priv
->fake_dma
.offset
= 0;
805 dev_priv
->fake_dma
.size
= SAVAGE_FAKE_DMA_SIZE
;
806 dev_priv
->fake_dma
.type
= _DRM_SHM
;
807 dev_priv
->fake_dma
.handle
= drm_alloc(SAVAGE_FAKE_DMA_SIZE
,
809 if (!dev_priv
->fake_dma
.handle
) {
810 DRM_ERROR("could not allocate faked DMA buffer!\n");
811 savage_do_cleanup_bci(dev
);
812 return DRM_ERR(ENOMEM
);
814 dev_priv
->cmd_dma
= &dev_priv
->fake_dma
;
815 dev_priv
->dma_flush
= savage_fake_dma_flush
;
818 dev_priv
->sarea_priv
=
819 (drm_savage_sarea_t
*) ((uint8_t *) dev_priv
->sarea
->handle
+
820 init
->sarea_priv_offset
);
822 /* setup bitmap descriptors */
824 unsigned int color_tile_format
;
825 unsigned int depth_tile_format
;
826 unsigned int front_stride
, back_stride
, depth_stride
;
827 if (dev_priv
->chipset
<= S3_SAVAGE4
) {
828 color_tile_format
= dev_priv
->fb_bpp
== 16 ?
829 SAVAGE_BD_TILE_16BPP
: SAVAGE_BD_TILE_32BPP
;
830 depth_tile_format
= dev_priv
->depth_bpp
== 16 ?
831 SAVAGE_BD_TILE_16BPP
: SAVAGE_BD_TILE_32BPP
;
833 color_tile_format
= SAVAGE_BD_TILE_DEST
;
834 depth_tile_format
= SAVAGE_BD_TILE_DEST
;
836 front_stride
= dev_priv
->front_pitch
/ (dev_priv
->fb_bpp
/ 8);
837 back_stride
= dev_priv
->back_pitch
/ (dev_priv
->fb_bpp
/ 8);
839 dev_priv
->depth_pitch
/ (dev_priv
->depth_bpp
/ 8);
841 dev_priv
->front_bd
= front_stride
| SAVAGE_BD_BW_DISABLE
|
842 (dev_priv
->fb_bpp
<< SAVAGE_BD_BPP_SHIFT
) |
843 (color_tile_format
<< SAVAGE_BD_TILE_SHIFT
);
845 dev_priv
->back_bd
= back_stride
| SAVAGE_BD_BW_DISABLE
|
846 (dev_priv
->fb_bpp
<< SAVAGE_BD_BPP_SHIFT
) |
847 (color_tile_format
<< SAVAGE_BD_TILE_SHIFT
);
849 dev_priv
->depth_bd
= depth_stride
| SAVAGE_BD_BW_DISABLE
|
850 (dev_priv
->depth_bpp
<< SAVAGE_BD_BPP_SHIFT
) |
851 (depth_tile_format
<< SAVAGE_BD_TILE_SHIFT
);
854 /* setup status and bci ptr */
855 dev_priv
->event_counter
= 0;
856 dev_priv
->event_wrap
= 0;
857 dev_priv
->bci_ptr
= (volatile uint32_t *)
858 ((uint8_t *) dev_priv
->mmio
->handle
+ SAVAGE_BCI_OFFSET
);
859 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
860 dev_priv
->status_used_mask
= SAVAGE_FIFO_USED_MASK_S3D
;
862 dev_priv
->status_used_mask
= SAVAGE_FIFO_USED_MASK_S4
;
864 if (dev_priv
->status
!= NULL
) {
865 dev_priv
->status_ptr
=
866 (volatile uint32_t *)dev_priv
->status
->handle
;
867 dev_priv
->wait_fifo
= savage_bci_wait_fifo_shadow
;
868 dev_priv
->wait_evnt
= savage_bci_wait_event_shadow
;
869 dev_priv
->status_ptr
[1023] = dev_priv
->event_counter
;
871 dev_priv
->status_ptr
= NULL
;
872 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
873 dev_priv
->wait_fifo
= savage_bci_wait_fifo_s3d
;
875 dev_priv
->wait_fifo
= savage_bci_wait_fifo_s4
;
877 dev_priv
->wait_evnt
= savage_bci_wait_event_reg
;
880 /* cliprect functions */
881 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
))
882 dev_priv
->emit_clip_rect
= savage_emit_clip_rect_s3d
;
884 dev_priv
->emit_clip_rect
= savage_emit_clip_rect_s4
;
886 if (savage_freelist_init(dev
) < 0) {
887 DRM_ERROR("could not initialize freelist\n");
888 savage_do_cleanup_bci(dev
);
889 return DRM_ERR(ENOMEM
);
892 if (savage_dma_init(dev_priv
) < 0) {
893 DRM_ERROR("could not initialize command DMA\n");
894 savage_do_cleanup_bci(dev
);
895 return DRM_ERR(ENOMEM
);
901 static int savage_do_cleanup_bci(drm_device_t
* dev
)
903 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
905 if (dev_priv
->cmd_dma
== &dev_priv
->fake_dma
) {
906 if (dev_priv
->fake_dma
.handle
)
907 drm_free(dev_priv
->fake_dma
.handle
,
908 SAVAGE_FAKE_DMA_SIZE
, DRM_MEM_DRIVER
);
909 } else if (dev_priv
->cmd_dma
&& dev_priv
->cmd_dma
->handle
&&
910 dev_priv
->cmd_dma
->type
== _DRM_AGP
&&
911 dev_priv
->dma_type
== SAVAGE_DMA_AGP
)
912 drm_core_ioremapfree(dev_priv
->cmd_dma
, dev
);
914 if (dev_priv
->dma_type
== SAVAGE_DMA_AGP
&&
915 dev
->agp_buffer_map
&& dev
->agp_buffer_map
->handle
) {
916 drm_core_ioremapfree(dev
->agp_buffer_map
, dev
);
917 /* make sure the next instance (which may be running
918 * in PCI mode) doesn't try to use an old
920 dev
->agp_buffer_map
= NULL
;
923 if (dev_priv
->dma_pages
)
924 drm_free(dev_priv
->dma_pages
,
925 sizeof(drm_savage_dma_page_t
) * dev_priv
->nr_dma_pages
,
931 static int savage_bci_init(DRM_IOCTL_ARGS
)
934 drm_savage_init_t init
;
936 LOCK_TEST_WITH_RETURN(dev
, filp
);
938 DRM_COPY_FROM_USER_IOCTL(init
, (drm_savage_init_t __user
*) data
,
942 case SAVAGE_INIT_BCI
:
943 return savage_do_init_bci(dev
, &init
);
944 case SAVAGE_CLEANUP_BCI
:
945 return savage_do_cleanup_bci(dev
);
948 return DRM_ERR(EINVAL
);
951 static int savage_bci_event_emit(DRM_IOCTL_ARGS
)
954 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
955 drm_savage_event_emit_t event
;
959 LOCK_TEST_WITH_RETURN(dev
, filp
);
961 DRM_COPY_FROM_USER_IOCTL(event
, (drm_savage_event_emit_t __user
*) data
,
964 event
.count
= savage_bci_emit_event(dev_priv
, event
.flags
);
965 event
.count
|= dev_priv
->event_wrap
<< 16;
966 DRM_COPY_TO_USER_IOCTL((drm_savage_event_emit_t __user
*) data
,
967 event
, sizeof(event
));
971 static int savage_bci_event_wait(DRM_IOCTL_ARGS
)
974 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
975 drm_savage_event_wait_t event
;
976 unsigned int event_e
, hw_e
;
977 unsigned int event_w
, hw_w
;
981 DRM_COPY_FROM_USER_IOCTL(event
, (drm_savage_event_wait_t __user
*) data
,
984 UPDATE_EVENT_COUNTER();
985 if (dev_priv
->status_ptr
)
986 hw_e
= dev_priv
->status_ptr
[1] & 0xffff;
988 hw_e
= SAVAGE_READ(SAVAGE_STATUS_WORD1
) & 0xffff;
989 hw_w
= dev_priv
->event_wrap
;
990 if (hw_e
> dev_priv
->event_counter
)
991 hw_w
--; /* hardware hasn't passed the last wrap yet */
993 event_e
= event
.count
& 0xffff;
994 event_w
= event
.count
>> 16;
996 /* Don't need to wait if
997 * - event counter wrapped since the event was emitted or
998 * - the hardware has advanced up to or over the event to wait for.
1000 if (event_w
< hw_w
|| (event_w
== hw_w
&& event_e
<= hw_e
))
1003 return dev_priv
->wait_evnt(dev_priv
, event_e
);
1007 * DMA buffer management
1010 static int savage_bci_get_buffers(DRMFILE filp
, drm_device_t
*dev
, drm_dma_t
*d
)
1015 for (i
= d
->granted_count
; i
< d
->request_count
; i
++) {
1016 buf
= savage_freelist_get(dev
);
1018 return DRM_ERR(EAGAIN
);
1022 if (DRM_COPY_TO_USER(&d
->request_indices
[i
],
1023 &buf
->idx
, sizeof(buf
->idx
)))
1024 return DRM_ERR(EFAULT
);
1025 if (DRM_COPY_TO_USER(&d
->request_sizes
[i
],
1026 &buf
->total
, sizeof(buf
->total
)))
1027 return DRM_ERR(EFAULT
);
1034 int savage_bci_buffers(DRM_IOCTL_ARGS
)
1037 drm_device_dma_t
*dma
= dev
->dma
;
1041 LOCK_TEST_WITH_RETURN(dev
, filp
);
1043 DRM_COPY_FROM_USER_IOCTL(d
, (drm_dma_t __user
*) data
, sizeof(d
));
1045 /* Please don't send us buffers.
1047 if (d
.send_count
!= 0) {
1048 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1049 DRM_CURRENTPID
, d
.send_count
);
1050 return DRM_ERR(EINVAL
);
1053 /* We'll send you buffers.
1055 if (d
.request_count
< 0 || d
.request_count
> dma
->buf_count
) {
1056 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1057 DRM_CURRENTPID
, d
.request_count
, dma
->buf_count
);
1058 return DRM_ERR(EINVAL
);
1061 d
.granted_count
= 0;
1063 if (d
.request_count
) {
1064 ret
= savage_bci_get_buffers(filp
, dev
, &d
);
1067 DRM_COPY_TO_USER_IOCTL((drm_dma_t __user
*) data
, d
, sizeof(d
));
1072 void savage_reclaim_buffers(drm_device_t
*dev
, DRMFILE filp
)
1074 drm_device_dma_t
*dma
= dev
->dma
;
1075 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
1085 /*i830_flush_queue(dev); */
1087 for (i
= 0; i
< dma
->buf_count
; i
++) {
1088 drm_buf_t
*buf
= dma
->buflist
[i
];
1089 drm_savage_buf_priv_t
*buf_priv
= buf
->dev_private
;
1091 if (buf
->filp
== filp
&& buf_priv
&&
1092 buf_priv
->next
== NULL
&& buf_priv
->prev
== NULL
) {
1094 DRM_DEBUG("reclaimed from client\n");
1095 event
= savage_bci_emit_event(dev_priv
, SAVAGE_WAIT_3D
);
1096 SET_AGE(&buf_priv
->age
, event
, dev_priv
->event_wrap
);
1097 savage_freelist_put(dev
, buf
);
1101 drm_core_reclaim_buffers(dev
, filp
);
1104 drm_ioctl_desc_t savage_ioctls
[] = {
1105 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT
)] = {savage_bci_init
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
},
1106 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF
)] = {savage_bci_cmdbuf
, DRM_AUTH
},
1107 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT
)] = {savage_bci_event_emit
, DRM_AUTH
},
1108 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT
)] = {savage_bci_event_wait
, DRM_AUTH
},
1111 int savage_max_ioctl
= DRM_ARRAY_SIZE(savage_ioctls
);