1 /* savage_bci.c -- BCI support for Savage
3 * Copyright 2004 Felix Kuehling
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/uaccess.h>
31 #include <drm/drm_device.h>
32 #include <drm/drm_file.h>
33 #include <drm/drm_print.h>
34 #include <drm/savage_drm.h>
36 #include "savage_drv.h"
38 /* Need a long timeout for shadow status updates can take a while
39 * and so can waiting for events when the queue is full. */
40 #define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
41 #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
42 #define SAVAGE_FREELIST_DEBUG 0
44 static int savage_do_cleanup_bci(struct drm_device
*dev
);
47 savage_bci_wait_fifo_shadow(drm_savage_private_t
* dev_priv
, unsigned int n
)
49 uint32_t mask
= dev_priv
->status_used_mask
;
50 uint32_t threshold
= dev_priv
->bci_threshold_hi
;
55 if (n
> dev_priv
->cob_size
+ SAVAGE_BCI_FIFO_SIZE
- threshold
)
56 DRM_ERROR("Trying to emit %d words "
57 "(more than guaranteed space in COB)\n", n
);
60 for (i
= 0; i
< SAVAGE_DEFAULT_USEC_TIMEOUT
; i
++) {
62 status
= dev_priv
->status_ptr
[0];
63 if ((status
& mask
) < threshold
)
69 DRM_ERROR("failed!\n");
70 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status
, threshold
);
76 savage_bci_wait_fifo_s3d(drm_savage_private_t
* dev_priv
, unsigned int n
)
78 uint32_t maxUsed
= dev_priv
->cob_size
+ SAVAGE_BCI_FIFO_SIZE
- n
;
82 for (i
= 0; i
< SAVAGE_DEFAULT_USEC_TIMEOUT
; i
++) {
83 status
= SAVAGE_READ(SAVAGE_STATUS_WORD0
);
84 if ((status
& SAVAGE_FIFO_USED_MASK_S3D
) <= maxUsed
)
90 DRM_ERROR("failed!\n");
91 DRM_INFO(" status=0x%08x\n", status
);
97 savage_bci_wait_fifo_s4(drm_savage_private_t
* dev_priv
, unsigned int n
)
99 uint32_t maxUsed
= dev_priv
->cob_size
+ SAVAGE_BCI_FIFO_SIZE
- n
;
103 for (i
= 0; i
< SAVAGE_DEFAULT_USEC_TIMEOUT
; i
++) {
104 status
= SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0
);
105 if ((status
& SAVAGE_FIFO_USED_MASK_S4
) <= maxUsed
)
111 DRM_ERROR("failed!\n");
112 DRM_INFO(" status=0x%08x\n", status
);
118 * Waiting for events.
120 * The BIOSresets the event tag to 0 on mode changes. Therefore we
121 * never emit 0 to the event tag. If we find a 0 event tag we know the
122 * BIOS stomped on it and return success assuming that the BIOS waited
125 * Note: if the Xserver uses the event tag it has to follow the same
126 * rule. Otherwise there may be glitches every 2^16 events.
129 savage_bci_wait_event_shadow(drm_savage_private_t
* dev_priv
, uint16_t e
)
134 for (i
= 0; i
< SAVAGE_EVENT_USEC_TIMEOUT
; i
++) {
136 status
= dev_priv
->status_ptr
[1];
137 if ((((status
& 0xffff) - e
) & 0xffff) <= 0x7fff ||
138 (status
& 0xffff) == 0)
144 DRM_ERROR("failed!\n");
145 DRM_INFO(" status=0x%08x, e=0x%04x\n", status
, e
);
152 savage_bci_wait_event_reg(drm_savage_private_t
* dev_priv
, uint16_t e
)
157 for (i
= 0; i
< SAVAGE_EVENT_USEC_TIMEOUT
; i
++) {
158 status
= SAVAGE_READ(SAVAGE_STATUS_WORD1
);
159 if ((((status
& 0xffff) - e
) & 0xffff) <= 0x7fff ||
160 (status
& 0xffff) == 0)
166 DRM_ERROR("failed!\n");
167 DRM_INFO(" status=0x%08x, e=0x%04x\n", status
, e
);
173 uint16_t savage_bci_emit_event(drm_savage_private_t
* dev_priv
,
179 if (dev_priv
->status_ptr
) {
180 /* coordinate with Xserver */
181 count
= dev_priv
->status_ptr
[1023];
182 if (count
< dev_priv
->event_counter
)
183 dev_priv
->event_wrap
++;
185 count
= dev_priv
->event_counter
;
187 count
= (count
+ 1) & 0xffff;
189 count
++; /* See the comment above savage_wait_event_*. */
190 dev_priv
->event_wrap
++;
192 dev_priv
->event_counter
= count
;
193 if (dev_priv
->status_ptr
)
194 dev_priv
->status_ptr
[1023] = (uint32_t) count
;
196 if ((flags
& (SAVAGE_WAIT_2D
| SAVAGE_WAIT_3D
))) {
197 unsigned int wait_cmd
= BCI_CMD_WAIT
;
198 if ((flags
& SAVAGE_WAIT_2D
))
199 wait_cmd
|= BCI_CMD_WAIT_2D
;
200 if ((flags
& SAVAGE_WAIT_3D
))
201 wait_cmd
|= BCI_CMD_WAIT_3D
;
207 BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG
| (uint32_t) count
);
213 * Freelist management
215 static int savage_freelist_init(struct drm_device
* dev
)
217 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
218 struct drm_device_dma
*dma
= dev
->dma
;
220 drm_savage_buf_priv_t
*entry
;
222 DRM_DEBUG("count=%d\n", dma
->buf_count
);
224 dev_priv
->head
.next
= &dev_priv
->tail
;
225 dev_priv
->head
.prev
= NULL
;
226 dev_priv
->head
.buf
= NULL
;
228 dev_priv
->tail
.next
= NULL
;
229 dev_priv
->tail
.prev
= &dev_priv
->head
;
230 dev_priv
->tail
.buf
= NULL
;
232 for (i
= 0; i
< dma
->buf_count
; i
++) {
233 buf
= dma
->buflist
[i
];
234 entry
= buf
->dev_private
;
236 SET_AGE(&entry
->age
, 0, 0);
239 entry
->next
= dev_priv
->head
.next
;
240 entry
->prev
= &dev_priv
->head
;
241 dev_priv
->head
.next
->prev
= entry
;
242 dev_priv
->head
.next
= entry
;
248 static struct drm_buf
*savage_freelist_get(struct drm_device
* dev
)
250 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
251 drm_savage_buf_priv_t
*tail
= dev_priv
->tail
.prev
;
256 UPDATE_EVENT_COUNTER();
257 if (dev_priv
->status_ptr
)
258 event
= dev_priv
->status_ptr
[1] & 0xffff;
260 event
= SAVAGE_READ(SAVAGE_STATUS_WORD1
) & 0xffff;
261 wrap
= dev_priv
->event_wrap
;
262 if (event
> dev_priv
->event_counter
)
263 wrap
--; /* hardware hasn't passed the last wrap yet */
265 DRM_DEBUG(" tail=0x%04x %d\n", tail
->age
.event
, tail
->age
.wrap
);
266 DRM_DEBUG(" head=0x%04x %d\n", event
, wrap
);
268 if (tail
->buf
&& (TEST_AGE(&tail
->age
, event
, wrap
) || event
== 0)) {
269 drm_savage_buf_priv_t
*next
= tail
->next
;
270 drm_savage_buf_priv_t
*prev
= tail
->prev
;
273 tail
->next
= tail
->prev
= NULL
;
277 DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail
->buf
);
281 void savage_freelist_put(struct drm_device
* dev
, struct drm_buf
* buf
)
283 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
284 drm_savage_buf_priv_t
*entry
= buf
->dev_private
, *prev
, *next
;
286 DRM_DEBUG("age=0x%04x wrap=%d\n", entry
->age
.event
, entry
->age
.wrap
);
288 if (entry
->next
!= NULL
|| entry
->prev
!= NULL
) {
289 DRM_ERROR("entry already on freelist.\n");
293 prev
= &dev_priv
->head
;
304 static int savage_dma_init(drm_savage_private_t
* dev_priv
)
308 dev_priv
->nr_dma_pages
= dev_priv
->cmd_dma
->size
/
309 (SAVAGE_DMA_PAGE_SIZE
* 4);
310 dev_priv
->dma_pages
= kmalloc_array(dev_priv
->nr_dma_pages
,
311 sizeof(drm_savage_dma_page_t
),
313 if (dev_priv
->dma_pages
== NULL
)
316 for (i
= 0; i
< dev_priv
->nr_dma_pages
; ++i
) {
317 SET_AGE(&dev_priv
->dma_pages
[i
].age
, 0, 0);
318 dev_priv
->dma_pages
[i
].used
= 0;
319 dev_priv
->dma_pages
[i
].flushed
= 0;
321 SET_AGE(&dev_priv
->last_dma_age
, 0, 0);
323 dev_priv
->first_dma_page
= 0;
324 dev_priv
->current_dma_page
= 0;
329 void savage_dma_reset(drm_savage_private_t
* dev_priv
)
332 unsigned int wrap
, i
;
333 event
= savage_bci_emit_event(dev_priv
, 0);
334 wrap
= dev_priv
->event_wrap
;
335 for (i
= 0; i
< dev_priv
->nr_dma_pages
; ++i
) {
336 SET_AGE(&dev_priv
->dma_pages
[i
].age
, event
, wrap
);
337 dev_priv
->dma_pages
[i
].used
= 0;
338 dev_priv
->dma_pages
[i
].flushed
= 0;
340 SET_AGE(&dev_priv
->last_dma_age
, event
, wrap
);
341 dev_priv
->first_dma_page
= dev_priv
->current_dma_page
= 0;
344 void savage_dma_wait(drm_savage_private_t
* dev_priv
, unsigned int page
)
349 /* Faked DMA buffer pages don't age. */
350 if (dev_priv
->cmd_dma
== &dev_priv
->fake_dma
)
353 UPDATE_EVENT_COUNTER();
354 if (dev_priv
->status_ptr
)
355 event
= dev_priv
->status_ptr
[1] & 0xffff;
357 event
= SAVAGE_READ(SAVAGE_STATUS_WORD1
) & 0xffff;
358 wrap
= dev_priv
->event_wrap
;
359 if (event
> dev_priv
->event_counter
)
360 wrap
--; /* hardware hasn't passed the last wrap yet */
362 if (dev_priv
->dma_pages
[page
].age
.wrap
> wrap
||
363 (dev_priv
->dma_pages
[page
].age
.wrap
== wrap
&&
364 dev_priv
->dma_pages
[page
].age
.event
> event
)) {
365 if (dev_priv
->wait_evnt(dev_priv
,
366 dev_priv
->dma_pages
[page
].age
.event
)
368 DRM_ERROR("wait_evnt failed!\n");
372 uint32_t *savage_dma_alloc(drm_savage_private_t
* dev_priv
, unsigned int n
)
374 unsigned int cur
= dev_priv
->current_dma_page
;
375 unsigned int rest
= SAVAGE_DMA_PAGE_SIZE
-
376 dev_priv
->dma_pages
[cur
].used
;
377 unsigned int nr_pages
= (n
- rest
+ SAVAGE_DMA_PAGE_SIZE
- 1) /
378 SAVAGE_DMA_PAGE_SIZE
;
382 DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
383 cur
, dev_priv
->dma_pages
[cur
].used
, n
, rest
, nr_pages
);
385 if (cur
+ nr_pages
< dev_priv
->nr_dma_pages
) {
386 dma_ptr
= (uint32_t *) dev_priv
->cmd_dma
->handle
+
387 cur
* SAVAGE_DMA_PAGE_SIZE
+ dev_priv
->dma_pages
[cur
].used
;
390 dev_priv
->dma_pages
[cur
].used
+= rest
;
394 dev_priv
->dma_flush(dev_priv
);
396 (n
+ SAVAGE_DMA_PAGE_SIZE
- 1) / SAVAGE_DMA_PAGE_SIZE
;
397 for (i
= cur
; i
< dev_priv
->nr_dma_pages
; ++i
) {
398 dev_priv
->dma_pages
[i
].age
= dev_priv
->last_dma_age
;
399 dev_priv
->dma_pages
[i
].used
= 0;
400 dev_priv
->dma_pages
[i
].flushed
= 0;
402 dma_ptr
= (uint32_t *) dev_priv
->cmd_dma
->handle
;
403 dev_priv
->first_dma_page
= cur
= 0;
405 for (i
= cur
; nr_pages
> 0; ++i
, --nr_pages
) {
407 if (dev_priv
->dma_pages
[i
].used
) {
408 DRM_ERROR("unflushed page %u: used=%u\n",
409 i
, dev_priv
->dma_pages
[i
].used
);
412 if (n
> SAVAGE_DMA_PAGE_SIZE
)
413 dev_priv
->dma_pages
[i
].used
= SAVAGE_DMA_PAGE_SIZE
;
415 dev_priv
->dma_pages
[i
].used
= n
;
416 n
-= SAVAGE_DMA_PAGE_SIZE
;
418 dev_priv
->current_dma_page
= --i
;
420 DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
421 i
, dev_priv
->dma_pages
[i
].used
, n
);
423 savage_dma_wait(dev_priv
, dev_priv
->current_dma_page
);
428 static void savage_dma_flush(drm_savage_private_t
* dev_priv
)
430 unsigned int first
= dev_priv
->first_dma_page
;
431 unsigned int cur
= dev_priv
->current_dma_page
;
433 unsigned int wrap
, pad
, align
, len
, i
;
434 unsigned long phys_addr
;
438 dev_priv
->dma_pages
[cur
].used
== dev_priv
->dma_pages
[cur
].flushed
)
441 /* pad length to multiples of 2 entries
442 * align start of next DMA block to multiles of 8 entries */
443 pad
= -dev_priv
->dma_pages
[cur
].used
& 1;
444 align
= -(dev_priv
->dma_pages
[cur
].used
+ pad
) & 7;
446 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
447 "pad=%u, align=%u\n",
448 first
, cur
, dev_priv
->dma_pages
[first
].flushed
,
449 dev_priv
->dma_pages
[cur
].used
, pad
, align
);
453 uint32_t *dma_ptr
= (uint32_t *) dev_priv
->cmd_dma
->handle
+
454 cur
* SAVAGE_DMA_PAGE_SIZE
+ dev_priv
->dma_pages
[cur
].used
;
455 dev_priv
->dma_pages
[cur
].used
+= pad
;
457 *dma_ptr
++ = BCI_CMD_WAIT
;
465 phys_addr
= dev_priv
->cmd_dma
->offset
+
466 (first
* SAVAGE_DMA_PAGE_SIZE
+
467 dev_priv
->dma_pages
[first
].flushed
) * 4;
468 len
= (cur
- first
) * SAVAGE_DMA_PAGE_SIZE
+
469 dev_priv
->dma_pages
[cur
].used
- dev_priv
->dma_pages
[first
].flushed
;
471 DRM_DEBUG("phys_addr=%lx, len=%u\n",
472 phys_addr
| dev_priv
->dma_type
, len
);
475 BCI_SET_REGISTERS(SAVAGE_DMABUFADDR
, 1);
476 BCI_WRITE(phys_addr
| dev_priv
->dma_type
);
479 /* fix alignment of the start of the next block */
480 dev_priv
->dma_pages
[cur
].used
+= align
;
483 event
= savage_bci_emit_event(dev_priv
, 0);
484 wrap
= dev_priv
->event_wrap
;
485 for (i
= first
; i
< cur
; ++i
) {
486 SET_AGE(&dev_priv
->dma_pages
[i
].age
, event
, wrap
);
487 dev_priv
->dma_pages
[i
].used
= 0;
488 dev_priv
->dma_pages
[i
].flushed
= 0;
490 /* age the current page only when it's full */
491 if (dev_priv
->dma_pages
[cur
].used
== SAVAGE_DMA_PAGE_SIZE
) {
492 SET_AGE(&dev_priv
->dma_pages
[cur
].age
, event
, wrap
);
493 dev_priv
->dma_pages
[cur
].used
= 0;
494 dev_priv
->dma_pages
[cur
].flushed
= 0;
495 /* advance to next page */
497 if (cur
== dev_priv
->nr_dma_pages
)
499 dev_priv
->first_dma_page
= dev_priv
->current_dma_page
= cur
;
501 dev_priv
->first_dma_page
= cur
;
502 dev_priv
->dma_pages
[cur
].flushed
= dev_priv
->dma_pages
[i
].used
;
504 SET_AGE(&dev_priv
->last_dma_age
, event
, wrap
);
506 DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur
,
507 dev_priv
->dma_pages
[cur
].used
,
508 dev_priv
->dma_pages
[cur
].flushed
);
511 static void savage_fake_dma_flush(drm_savage_private_t
* dev_priv
)
516 if (dev_priv
->first_dma_page
== dev_priv
->current_dma_page
&&
517 dev_priv
->dma_pages
[dev_priv
->current_dma_page
].used
== 0)
520 DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
521 dev_priv
->first_dma_page
, dev_priv
->current_dma_page
,
522 dev_priv
->dma_pages
[dev_priv
->current_dma_page
].used
);
524 for (i
= dev_priv
->first_dma_page
;
525 i
<= dev_priv
->current_dma_page
&& dev_priv
->dma_pages
[i
].used
;
527 uint32_t *dma_ptr
= (uint32_t *) dev_priv
->cmd_dma
->handle
+
528 i
* SAVAGE_DMA_PAGE_SIZE
;
530 /* Sanity check: all pages except the last one must be full. */
531 if (i
< dev_priv
->current_dma_page
&&
532 dev_priv
->dma_pages
[i
].used
!= SAVAGE_DMA_PAGE_SIZE
) {
533 DRM_ERROR("partial DMA page %u: used=%u",
534 i
, dev_priv
->dma_pages
[i
].used
);
537 BEGIN_BCI(dev_priv
->dma_pages
[i
].used
);
538 for (j
= 0; j
< dev_priv
->dma_pages
[i
].used
; ++j
) {
539 BCI_WRITE(dma_ptr
[j
]);
541 dev_priv
->dma_pages
[i
].used
= 0;
544 /* reset to first page */
545 dev_priv
->first_dma_page
= dev_priv
->current_dma_page
= 0;
548 int savage_driver_load(struct drm_device
*dev
, unsigned long chipset
)
550 drm_savage_private_t
*dev_priv
;
552 dev_priv
= kzalloc(sizeof(drm_savage_private_t
), GFP_KERNEL
);
553 if (dev_priv
== NULL
)
556 dev
->dev_private
= (void *)dev_priv
;
558 dev_priv
->chipset
= (enum savage_family
)chipset
;
560 pci_set_master(dev
->pdev
);
567 * Initialize mappings. On Savage4 and SavageIX the alignment
568 * and size of the aperture is not suitable for automatic MTRR setup
569 * in drm_legacy_addmap. Therefore we add them manually before the maps are
570 * initialized, and tear them down on last close.
572 int savage_driver_firstopen(struct drm_device
*dev
)
574 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
575 unsigned long mmio_base
, fb_base
, fb_size
, aperture_base
;
578 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
579 fb_base
= pci_resource_start(dev
->pdev
, 0);
580 fb_size
= SAVAGE_FB_SIZE_S3
;
581 mmio_base
= fb_base
+ SAVAGE_FB_SIZE_S3
;
582 aperture_base
= fb_base
+ SAVAGE_APERTURE_OFFSET
;
583 /* this should always be true */
584 if (pci_resource_len(dev
->pdev
, 0) == 0x08000000) {
585 /* Don't make MMIO write-cobining! We need 3
587 dev_priv
->mtrr_handles
[0] =
588 arch_phys_wc_add(fb_base
, 0x01000000);
589 dev_priv
->mtrr_handles
[1] =
590 arch_phys_wc_add(fb_base
+ 0x02000000,
592 dev_priv
->mtrr_handles
[2] =
593 arch_phys_wc_add(fb_base
+ 0x04000000,
596 DRM_ERROR("strange pci_resource_len %08llx\n",
598 pci_resource_len(dev
->pdev
, 0));
600 } else if (dev_priv
->chipset
!= S3_SUPERSAVAGE
&&
601 dev_priv
->chipset
!= S3_SAVAGE2000
) {
602 mmio_base
= pci_resource_start(dev
->pdev
, 0);
603 fb_base
= pci_resource_start(dev
->pdev
, 1);
604 fb_size
= SAVAGE_FB_SIZE_S4
;
605 aperture_base
= fb_base
+ SAVAGE_APERTURE_OFFSET
;
606 /* this should always be true */
607 if (pci_resource_len(dev
->pdev
, 1) == 0x08000000) {
608 /* Can use one MTRR to cover both fb and
610 dev_priv
->mtrr_handles
[0] =
611 arch_phys_wc_add(fb_base
,
614 DRM_ERROR("strange pci_resource_len %08llx\n",
616 pci_resource_len(dev
->pdev
, 1));
619 mmio_base
= pci_resource_start(dev
->pdev
, 0);
620 fb_base
= pci_resource_start(dev
->pdev
, 1);
621 fb_size
= pci_resource_len(dev
->pdev
, 1);
622 aperture_base
= pci_resource_start(dev
->pdev
, 2);
623 /* Automatic MTRR setup will do the right thing. */
626 ret
= drm_legacy_addmap(dev
, mmio_base
, SAVAGE_MMIO_SIZE
,
627 _DRM_REGISTERS
, _DRM_READ_ONLY
,
632 ret
= drm_legacy_addmap(dev
, fb_base
, fb_size
, _DRM_FRAME_BUFFER
,
633 _DRM_WRITE_COMBINING
, &dev_priv
->fb
);
637 ret
= drm_legacy_addmap(dev
, aperture_base
, SAVAGE_APERTURE_SIZE
,
638 _DRM_FRAME_BUFFER
, _DRM_WRITE_COMBINING
,
639 &dev_priv
->aperture
);
644 * Delete MTRRs and free device-private data.
646 void savage_driver_lastclose(struct drm_device
*dev
)
648 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
651 for (i
= 0; i
< 3; ++i
) {
652 arch_phys_wc_del(dev_priv
->mtrr_handles
[i
]);
653 dev_priv
->mtrr_handles
[i
] = 0;
657 void savage_driver_unload(struct drm_device
*dev
)
659 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
664 static int savage_do_init_bci(struct drm_device
* dev
, drm_savage_init_t
* init
)
666 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
668 if (init
->fb_bpp
!= 16 && init
->fb_bpp
!= 32) {
669 DRM_ERROR("invalid frame buffer bpp %d!\n", init
->fb_bpp
);
672 if (init
->depth_bpp
!= 16 && init
->depth_bpp
!= 32) {
673 DRM_ERROR("invalid depth buffer bpp %d!\n", init
->fb_bpp
);
676 if (init
->dma_type
!= SAVAGE_DMA_AGP
&&
677 init
->dma_type
!= SAVAGE_DMA_PCI
) {
678 DRM_ERROR("invalid dma memory type %d!\n", init
->dma_type
);
682 dev_priv
->cob_size
= init
->cob_size
;
683 dev_priv
->bci_threshold_lo
= init
->bci_threshold_lo
;
684 dev_priv
->bci_threshold_hi
= init
->bci_threshold_hi
;
685 dev_priv
->dma_type
= init
->dma_type
;
687 dev_priv
->fb_bpp
= init
->fb_bpp
;
688 dev_priv
->front_offset
= init
->front_offset
;
689 dev_priv
->front_pitch
= init
->front_pitch
;
690 dev_priv
->back_offset
= init
->back_offset
;
691 dev_priv
->back_pitch
= init
->back_pitch
;
692 dev_priv
->depth_bpp
= init
->depth_bpp
;
693 dev_priv
->depth_offset
= init
->depth_offset
;
694 dev_priv
->depth_pitch
= init
->depth_pitch
;
696 dev_priv
->texture_offset
= init
->texture_offset
;
697 dev_priv
->texture_size
= init
->texture_size
;
699 dev_priv
->sarea
= drm_legacy_getsarea(dev
);
700 if (!dev_priv
->sarea
) {
701 DRM_ERROR("could not find sarea!\n");
702 savage_do_cleanup_bci(dev
);
705 if (init
->status_offset
!= 0) {
706 dev_priv
->status
= drm_legacy_findmap(dev
, init
->status_offset
);
707 if (!dev_priv
->status
) {
708 DRM_ERROR("could not find shadow status region!\n");
709 savage_do_cleanup_bci(dev
);
713 dev_priv
->status
= NULL
;
715 if (dev_priv
->dma_type
== SAVAGE_DMA_AGP
&& init
->buffers_offset
) {
716 dev
->agp_buffer_token
= init
->buffers_offset
;
717 dev
->agp_buffer_map
= drm_legacy_findmap(dev
,
718 init
->buffers_offset
);
719 if (!dev
->agp_buffer_map
) {
720 DRM_ERROR("could not find DMA buffer region!\n");
721 savage_do_cleanup_bci(dev
);
724 drm_legacy_ioremap(dev
->agp_buffer_map
, dev
);
725 if (!dev
->agp_buffer_map
->handle
) {
726 DRM_ERROR("failed to ioremap DMA buffer region!\n");
727 savage_do_cleanup_bci(dev
);
731 if (init
->agp_textures_offset
) {
732 dev_priv
->agp_textures
=
733 drm_legacy_findmap(dev
, init
->agp_textures_offset
);
734 if (!dev_priv
->agp_textures
) {
735 DRM_ERROR("could not find agp texture region!\n");
736 savage_do_cleanup_bci(dev
);
740 dev_priv
->agp_textures
= NULL
;
743 if (init
->cmd_dma_offset
) {
744 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
745 DRM_ERROR("command DMA not supported on "
746 "Savage3D/MX/IX.\n");
747 savage_do_cleanup_bci(dev
);
750 if (dev
->dma
&& dev
->dma
->buflist
) {
751 DRM_ERROR("command and vertex DMA not supported "
752 "at the same time.\n");
753 savage_do_cleanup_bci(dev
);
756 dev_priv
->cmd_dma
= drm_legacy_findmap(dev
, init
->cmd_dma_offset
);
757 if (!dev_priv
->cmd_dma
) {
758 DRM_ERROR("could not find command DMA region!\n");
759 savage_do_cleanup_bci(dev
);
762 if (dev_priv
->dma_type
== SAVAGE_DMA_AGP
) {
763 if (dev_priv
->cmd_dma
->type
!= _DRM_AGP
) {
764 DRM_ERROR("AGP command DMA region is not a "
766 savage_do_cleanup_bci(dev
);
769 drm_legacy_ioremap(dev_priv
->cmd_dma
, dev
);
770 if (!dev_priv
->cmd_dma
->handle
) {
771 DRM_ERROR("failed to ioremap command "
773 savage_do_cleanup_bci(dev
);
776 } else if (dev_priv
->cmd_dma
->type
!= _DRM_CONSISTENT
) {
777 DRM_ERROR("PCI command DMA region is not a "
778 "_DRM_CONSISTENT map!\n");
779 savage_do_cleanup_bci(dev
);
783 dev_priv
->cmd_dma
= NULL
;
786 dev_priv
->dma_flush
= savage_dma_flush
;
787 if (!dev_priv
->cmd_dma
) {
788 DRM_DEBUG("falling back to faked command DMA.\n");
789 dev_priv
->fake_dma
.offset
= 0;
790 dev_priv
->fake_dma
.size
= SAVAGE_FAKE_DMA_SIZE
;
791 dev_priv
->fake_dma
.type
= _DRM_SHM
;
792 dev_priv
->fake_dma
.handle
= kmalloc(SAVAGE_FAKE_DMA_SIZE
,
794 if (!dev_priv
->fake_dma
.handle
) {
795 DRM_ERROR("could not allocate faked DMA buffer!\n");
796 savage_do_cleanup_bci(dev
);
799 dev_priv
->cmd_dma
= &dev_priv
->fake_dma
;
800 dev_priv
->dma_flush
= savage_fake_dma_flush
;
803 dev_priv
->sarea_priv
=
804 (drm_savage_sarea_t
*) ((uint8_t *) dev_priv
->sarea
->handle
+
805 init
->sarea_priv_offset
);
807 /* setup bitmap descriptors */
809 unsigned int color_tile_format
;
810 unsigned int depth_tile_format
;
811 unsigned int front_stride
, back_stride
, depth_stride
;
812 if (dev_priv
->chipset
<= S3_SAVAGE4
) {
813 color_tile_format
= dev_priv
->fb_bpp
== 16 ?
814 SAVAGE_BD_TILE_16BPP
: SAVAGE_BD_TILE_32BPP
;
815 depth_tile_format
= dev_priv
->depth_bpp
== 16 ?
816 SAVAGE_BD_TILE_16BPP
: SAVAGE_BD_TILE_32BPP
;
818 color_tile_format
= SAVAGE_BD_TILE_DEST
;
819 depth_tile_format
= SAVAGE_BD_TILE_DEST
;
821 front_stride
= dev_priv
->front_pitch
/ (dev_priv
->fb_bpp
/ 8);
822 back_stride
= dev_priv
->back_pitch
/ (dev_priv
->fb_bpp
/ 8);
824 dev_priv
->depth_pitch
/ (dev_priv
->depth_bpp
/ 8);
826 dev_priv
->front_bd
= front_stride
| SAVAGE_BD_BW_DISABLE
|
827 (dev_priv
->fb_bpp
<< SAVAGE_BD_BPP_SHIFT
) |
828 (color_tile_format
<< SAVAGE_BD_TILE_SHIFT
);
830 dev_priv
->back_bd
= back_stride
| SAVAGE_BD_BW_DISABLE
|
831 (dev_priv
->fb_bpp
<< SAVAGE_BD_BPP_SHIFT
) |
832 (color_tile_format
<< SAVAGE_BD_TILE_SHIFT
);
834 dev_priv
->depth_bd
= depth_stride
| SAVAGE_BD_BW_DISABLE
|
835 (dev_priv
->depth_bpp
<< SAVAGE_BD_BPP_SHIFT
) |
836 (depth_tile_format
<< SAVAGE_BD_TILE_SHIFT
);
839 /* setup status and bci ptr */
840 dev_priv
->event_counter
= 0;
841 dev_priv
->event_wrap
= 0;
842 dev_priv
->bci_ptr
= (volatile uint32_t *)
843 ((uint8_t *) dev_priv
->mmio
->handle
+ SAVAGE_BCI_OFFSET
);
844 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
845 dev_priv
->status_used_mask
= SAVAGE_FIFO_USED_MASK_S3D
;
847 dev_priv
->status_used_mask
= SAVAGE_FIFO_USED_MASK_S4
;
849 if (dev_priv
->status
!= NULL
) {
850 dev_priv
->status_ptr
=
851 (volatile uint32_t *)dev_priv
->status
->handle
;
852 dev_priv
->wait_fifo
= savage_bci_wait_fifo_shadow
;
853 dev_priv
->wait_evnt
= savage_bci_wait_event_shadow
;
854 dev_priv
->status_ptr
[1023] = dev_priv
->event_counter
;
856 dev_priv
->status_ptr
= NULL
;
857 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
858 dev_priv
->wait_fifo
= savage_bci_wait_fifo_s3d
;
860 dev_priv
->wait_fifo
= savage_bci_wait_fifo_s4
;
862 dev_priv
->wait_evnt
= savage_bci_wait_event_reg
;
865 /* cliprect functions */
866 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
))
867 dev_priv
->emit_clip_rect
= savage_emit_clip_rect_s3d
;
869 dev_priv
->emit_clip_rect
= savage_emit_clip_rect_s4
;
871 if (savage_freelist_init(dev
) < 0) {
872 DRM_ERROR("could not initialize freelist\n");
873 savage_do_cleanup_bci(dev
);
877 if (savage_dma_init(dev_priv
) < 0) {
878 DRM_ERROR("could not initialize command DMA\n");
879 savage_do_cleanup_bci(dev
);
886 static int savage_do_cleanup_bci(struct drm_device
* dev
)
888 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
890 if (dev_priv
->cmd_dma
== &dev_priv
->fake_dma
) {
891 kfree(dev_priv
->fake_dma
.handle
);
892 } else if (dev_priv
->cmd_dma
&& dev_priv
->cmd_dma
->handle
&&
893 dev_priv
->cmd_dma
->type
== _DRM_AGP
&&
894 dev_priv
->dma_type
== SAVAGE_DMA_AGP
)
895 drm_legacy_ioremapfree(dev_priv
->cmd_dma
, dev
);
897 if (dev_priv
->dma_type
== SAVAGE_DMA_AGP
&&
898 dev
->agp_buffer_map
&& dev
->agp_buffer_map
->handle
) {
899 drm_legacy_ioremapfree(dev
->agp_buffer_map
, dev
);
900 /* make sure the next instance (which may be running
901 * in PCI mode) doesn't try to use an old
903 dev
->agp_buffer_map
= NULL
;
906 kfree(dev_priv
->dma_pages
);
911 static int savage_bci_init(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
913 drm_savage_init_t
*init
= data
;
915 LOCK_TEST_WITH_RETURN(dev
, file_priv
);
917 switch (init
->func
) {
918 case SAVAGE_INIT_BCI
:
919 return savage_do_init_bci(dev
, init
);
920 case SAVAGE_CLEANUP_BCI
:
921 return savage_do_cleanup_bci(dev
);
927 static int savage_bci_event_emit(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
929 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
930 drm_savage_event_emit_t
*event
= data
;
934 LOCK_TEST_WITH_RETURN(dev
, file_priv
);
936 event
->count
= savage_bci_emit_event(dev_priv
, event
->flags
);
937 event
->count
|= dev_priv
->event_wrap
<< 16;
942 static int savage_bci_event_wait(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
944 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
945 drm_savage_event_wait_t
*event
= data
;
946 unsigned int event_e
, hw_e
;
947 unsigned int event_w
, hw_w
;
951 UPDATE_EVENT_COUNTER();
952 if (dev_priv
->status_ptr
)
953 hw_e
= dev_priv
->status_ptr
[1] & 0xffff;
955 hw_e
= SAVAGE_READ(SAVAGE_STATUS_WORD1
) & 0xffff;
956 hw_w
= dev_priv
->event_wrap
;
957 if (hw_e
> dev_priv
->event_counter
)
958 hw_w
--; /* hardware hasn't passed the last wrap yet */
960 event_e
= event
->count
& 0xffff;
961 event_w
= event
->count
>> 16;
963 /* Don't need to wait if
964 * - event counter wrapped since the event was emitted or
965 * - the hardware has advanced up to or over the event to wait for.
967 if (event_w
< hw_w
|| (event_w
== hw_w
&& event_e
<= hw_e
))
970 return dev_priv
->wait_evnt(dev_priv
, event_e
);
974 * DMA buffer management
977 static int savage_bci_get_buffers(struct drm_device
*dev
,
978 struct drm_file
*file_priv
,
984 for (i
= d
->granted_count
; i
< d
->request_count
; i
++) {
985 buf
= savage_freelist_get(dev
);
989 buf
->file_priv
= file_priv
;
991 if (copy_to_user(&d
->request_indices
[i
],
992 &buf
->idx
, sizeof(buf
->idx
)))
994 if (copy_to_user(&d
->request_sizes
[i
],
995 &buf
->total
, sizeof(buf
->total
)))
1003 int savage_bci_buffers(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
1005 struct drm_device_dma
*dma
= dev
->dma
;
1006 struct drm_dma
*d
= data
;
1009 LOCK_TEST_WITH_RETURN(dev
, file_priv
);
1011 /* Please don't send us buffers.
1013 if (d
->send_count
!= 0) {
1014 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1015 task_pid_nr(current
), d
->send_count
);
1019 /* We'll send you buffers.
1021 if (d
->request_count
< 0 || d
->request_count
> dma
->buf_count
) {
1022 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1023 task_pid_nr(current
), d
->request_count
, dma
->buf_count
);
1027 d
->granted_count
= 0;
1029 if (d
->request_count
) {
1030 ret
= savage_bci_get_buffers(dev
, file_priv
, d
);
1036 void savage_reclaim_buffers(struct drm_device
*dev
, struct drm_file
*file_priv
)
1038 struct drm_device_dma
*dma
= dev
->dma
;
1039 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
1040 int release_idlelock
= 0;
1050 if (file_priv
->master
&& file_priv
->master
->lock
.hw_lock
) {
1051 drm_legacy_idlelock_take(&file_priv
->master
->lock
);
1052 release_idlelock
= 1;
1055 for (i
= 0; i
< dma
->buf_count
; i
++) {
1056 struct drm_buf
*buf
= dma
->buflist
[i
];
1057 drm_savage_buf_priv_t
*buf_priv
= buf
->dev_private
;
1059 if (buf
->file_priv
== file_priv
&& buf_priv
&&
1060 buf_priv
->next
== NULL
&& buf_priv
->prev
== NULL
) {
1062 DRM_DEBUG("reclaimed from client\n");
1063 event
= savage_bci_emit_event(dev_priv
, SAVAGE_WAIT_3D
);
1064 SET_AGE(&buf_priv
->age
, event
, dev_priv
->event_wrap
);
1065 savage_freelist_put(dev
, buf
);
1069 if (release_idlelock
)
1070 drm_legacy_idlelock_release(&file_priv
->master
->lock
);
1073 const struct drm_ioctl_desc savage_ioctls
[] = {
1074 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT
, savage_bci_init
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1075 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF
, savage_bci_cmdbuf
, DRM_AUTH
),
1076 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT
, savage_bci_event_emit
, DRM_AUTH
),
1077 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT
, savage_bci_event_wait
, DRM_AUTH
),
1080 int savage_max_ioctl
= ARRAY_SIZE(savage_ioctls
);