Initial commit
[xorg_rtime.git] / xorg-server-1.4 / hw / xfree86 / common / xf86Bus.c
blobcd309a5a94531beac5acaf7309c464b8d0dadfaf
1 /*
2 * Copyright (c) 1997-2003 by The XFree86 Project, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Except as contained in this notice, the name of the copyright holder(s)
23 * and author(s) shall not be used in advertising or otherwise to promote
24 * the sale, use or other dealings in this Software without prior written
25 * authorization from the copyright holder(s) and author(s).
28 #define REDUCER
30 * This file contains the interfaces to the bus-specific code
33 #ifdef HAVE_XORG_CONFIG_H
34 #include <xorg-config.h>
35 #endif
37 #include <ctype.h>
38 #include <stdlib.h>
39 #include <unistd.h>
40 #include <X11/X.h>
41 #include "os.h"
42 #include "xf86.h"
43 #include "xf86Priv.h"
44 #include "xf86Resources.h"
46 /* Bus-specific headers */
48 #include "xf86Bus.h"
50 #define XF86_OS_PRIVS
51 #define NEED_OS_RAC_PROTOS
52 #include "xf86_OSproc.h"
54 #include "xf86RAC.h"
56 /* Entity data */
57 EntityPtr *xf86Entities = NULL; /* Bus slots claimed by drivers */
58 int xf86NumEntities = 0;
59 static int xf86EntityPrivateCount = 0;
60 BusAccPtr xf86BusAccInfo = NULL;
62 xf86AccessRec AccessNULL = {NULL,NULL,NULL};
64 xf86CurrentAccessRec xf86CurrentAccess = {NULL,NULL};
66 BusRec primaryBus = { BUS_NONE, {{0}}};
68 static Bool xf86ResAccessEnter = FALSE;
70 #ifdef REDUCER
71 /* Resources that temporarily conflict with estimated resources */
72 static resPtr AccReducers = NULL;
73 #endif
75 /* resource lists */
76 resPtr Acc = NULL;
77 resPtr osRes = NULL;
79 /* allocatable ranges */
80 resPtr ResRange = NULL;
82 /* predefined special resources */
83 _X_EXPORT resRange resVgaExclusive[] = {_VGA_EXCLUSIVE, _END};
84 _X_EXPORT resRange resVgaShared[] = {_VGA_SHARED, _END};
85 _X_EXPORT resRange resVgaMemShared[] = {_VGA_SHARED_MEM,_END};
86 _X_EXPORT resRange resVgaIoShared[] = {_VGA_SHARED_IO,_END};
87 _X_EXPORT resRange resVgaUnusedExclusive[] = {_VGA_EXCLUSIVE_UNUSED, _END};
88 _X_EXPORT resRange resVgaUnusedShared[] = {_VGA_SHARED_UNUSED, _END};
89 _X_EXPORT resRange resVgaSparseExclusive[] = {_VGA_EXCLUSIVE_SPARSE, _END};
90 _X_EXPORT resRange resVgaSparseShared[] = {_VGA_SHARED_SPARSE, _END};
91 _X_EXPORT resRange res8514Exclusive[] = {_8514_EXCLUSIVE, _END};
92 _X_EXPORT resRange res8514Shared[] = {_8514_SHARED, _END};
94 /* Flag: do we need RAC ? */
95 static Bool needRAC = FALSE;
96 static Bool doFramebufferMode = FALSE;
98 /* state change notification callback list */
99 static StateChangeNotificationPtr StateChangeNotificationList;
100 static void notifyStateChange(xf86NotifyState state);
102 #undef MIN
103 #define MIN(x,y) ((x<y)?x:y)
107 * Call the bus probes relevant to the architecture.
109 * The only one available so far is for PCI and SBUS.
112 void
113 xf86BusProbe(void)
115 xf86PciProbe();
116 #if (defined(__sparc__) || defined(__sparc)) && !defined(__OpenBSD__)
117 xf86SbusProbe();
118 #endif
122 * Determine what bus type the busID string represents. The start of the
123 * bus-dependent part of the string is returned as retID.
126 BusType
127 StringToBusType(const char* busID, const char **retID)
129 char *p, *s;
130 BusType ret = BUS_NONE;
132 /* If no type field, Default to PCI */
133 if (isdigit(busID[0])) {
134 if (retID)
135 *retID = busID;
136 return BUS_PCI;
139 s = xstrdup(busID);
140 p = strtok(s, ":");
141 if (p == NULL || *p == 0) {
142 xfree(s);
143 return BUS_NONE;
145 if (!xf86NameCmp(p, "pci") || !xf86NameCmp(p, "agp"))
146 ret = BUS_PCI;
147 if (!xf86NameCmp(p, "isa"))
148 ret = BUS_ISA;
149 if (!xf86NameCmp(p, "sbus"))
150 ret = BUS_SBUS;
151 if (ret != BUS_NONE)
152 if (retID)
153 *retID = busID + strlen(p) + 1;
154 xfree(s);
155 return ret;
159 * Entity related code.
162 void
163 xf86EntityInit(void)
165 int i;
166 resPtr *pprev_next;
167 resPtr res;
168 xf86AccessPtr pacc;
170 for (i = 0; i < xf86NumEntities; i++)
171 if (xf86Entities[i]->entityInit) {
172 if (xf86Entities[i]->access->busAcc)
173 ((BusAccPtr)xf86Entities[i]->access->busAcc)->set_f
174 (xf86Entities[i]->access->busAcc);
175 pacc = xf86Entities[i]->access->fallback;
176 if (pacc->AccessEnable)
177 pacc->AccessEnable(pacc->arg);
178 xf86Entities[i]->entityInit(i,xf86Entities[i]->private);
179 if (pacc->AccessDisable)
180 pacc->AccessDisable(pacc->arg);
181 /* remove init resources after init is processed */
182 pprev_next = &Acc;
183 res = Acc;
184 while (res) {
185 if (res->res_type & ResInit && (res->entityIndex == i)) {
186 (*pprev_next) = res->next;
187 xfree(res);
188 } else
189 pprev_next = &(res->next);
190 res = (*pprev_next);
196 xf86AllocateEntity(void)
198 xf86NumEntities++;
199 xf86Entities = xnfrealloc(xf86Entities,
200 sizeof(EntityPtr) * xf86NumEntities);
201 xf86Entities[xf86NumEntities - 1] = xnfcalloc(1,sizeof(EntityRec));
202 xf86Entities[xf86NumEntities - 1]->entityPrivates =
203 xnfcalloc(sizeof(DevUnion) * xf86EntityPrivateCount, 1);
204 return (xf86NumEntities - 1);
207 static void
208 EntityEnter(void)
210 int i;
211 xf86AccessPtr pacc;
213 for (i = 0; i < xf86NumEntities; i++)
214 if (xf86Entities[i]->entityEnter) {
215 if (xf86Entities[i]->access->busAcc)
216 ((BusAccPtr)xf86Entities[i]->access->busAcc)->set_f
217 (xf86Entities[i]->access->busAcc);
218 pacc = xf86Entities[i]->access->fallback;
219 if (pacc->AccessEnable)
220 pacc->AccessEnable(pacc->arg);
221 xf86Entities[i]->entityEnter(i,xf86Entities[i]->private);
222 if (pacc->AccessDisable)
223 pacc->AccessDisable(pacc->arg);
227 static void
228 EntityLeave(void)
230 int i;
231 xf86AccessPtr pacc;
233 for (i = 0; i < xf86NumEntities; i++)
234 if (xf86Entities[i]->entityLeave) {
235 if (xf86Entities[i]->access->busAcc)
236 ((BusAccPtr)xf86Entities[i]->access->busAcc)->set_f
237 (xf86Entities[i]->access->busAcc);
238 pacc = xf86Entities[i]->access->fallback;
239 if (pacc->AccessEnable)
240 pacc->AccessEnable(pacc->arg);
241 xf86Entities[i]->entityLeave(i,xf86Entities[i]->private);
242 if (pacc->AccessDisable)
243 pacc->AccessDisable(pacc->arg);
247 _X_EXPORT Bool
248 xf86IsEntityPrimary(int entityIndex)
250 EntityPtr pEnt = xf86Entities[entityIndex];
252 if (primaryBus.type != pEnt->busType) return FALSE;
254 switch (pEnt->busType) {
255 case BUS_PCI:
256 return (pEnt->pciBusId.bus == primaryBus.id.pci.bus &&
257 pEnt->pciBusId.device == primaryBus.id.pci.device &&
258 pEnt->pciBusId.func == primaryBus.id.pci.func);
259 case BUS_ISA:
260 return TRUE;
261 case BUS_SBUS:
262 return (pEnt->sbusBusId.fbNum == primaryBus.id.sbus.fbNum);
263 default:
264 return FALSE;
268 _X_EXPORT Bool
269 xf86SetEntityFuncs(int entityIndex, EntityProc init, EntityProc enter,
270 EntityProc leave, pointer private)
272 if (entityIndex >= xf86NumEntities)
273 return FALSE;
274 xf86Entities[entityIndex]->entityInit = init;
275 xf86Entities[entityIndex]->entityEnter = enter;
276 xf86Entities[entityIndex]->entityLeave = leave;
277 xf86Entities[entityIndex]->private = private;
278 return TRUE;
281 Bool
282 xf86DriverHasEntities(DriverPtr drvp)
284 int i;
285 for (i = 0; i < xf86NumEntities; i++) {
286 if (xf86Entities[i]->driver == drvp)
287 return TRUE;
289 return FALSE;
292 _X_EXPORT void
293 xf86AddEntityToScreen(ScrnInfoPtr pScrn, int entityIndex)
295 if (entityIndex == -1)
296 return;
297 if (xf86Entities[entityIndex]->inUse &&
298 !(xf86Entities[entityIndex]->entityProp & IS_SHARED_ACCEL))
299 FatalError("Requested Entity already in use!\n");
301 pScrn->numEntities++;
302 pScrn->entityList = xnfrealloc(pScrn->entityList,
303 pScrn->numEntities * sizeof(int));
304 pScrn->entityList[pScrn->numEntities - 1] = entityIndex;
305 xf86Entities[entityIndex]->access->next = pScrn->access;
306 pScrn->access = xf86Entities[entityIndex]->access;
307 xf86Entities[entityIndex]->inUse = TRUE;
308 pScrn->entityInstanceList = xnfrealloc(pScrn->entityInstanceList,
309 pScrn->numEntities * sizeof(int));
310 pScrn->entityInstanceList[pScrn->numEntities - 1] = 0;
311 pScrn->domainIOBase = xf86Entities[entityIndex]->domainIO;
314 _X_EXPORT void
315 xf86SetEntityInstanceForScreen(ScrnInfoPtr pScrn, int entityIndex, int instance)
317 int i;
319 if (entityIndex == -1 || entityIndex >= xf86NumEntities)
320 return;
322 for (i = 0; i < pScrn->numEntities; i++) {
323 if (pScrn->entityList[i] == entityIndex) {
324 pScrn->entityInstanceList[i] = instance;
325 break;
331 * XXX This needs to be updated for the case where a single entity may have
332 * instances associated with more than one screen.
334 _X_EXPORT ScrnInfoPtr
335 xf86FindScreenForEntity(int entityIndex)
337 int i,j;
339 if (entityIndex == -1) return NULL;
341 if (xf86Screens) {
342 for (i = 0; i < xf86NumScreens; i++) {
343 for (j = 0; j < xf86Screens[i]->numEntities; j++) {
344 if ( xf86Screens[i]->entityList[j] == entityIndex )
345 return (xf86Screens[i]);
349 return NULL;
352 _X_EXPORT void
353 xf86RemoveEntityFromScreen(ScrnInfoPtr pScrn, int entityIndex)
355 int i;
356 EntityAccessPtr *ptr = (EntityAccessPtr *)&pScrn->access;
357 EntityAccessPtr peacc;
359 for (i = 0; i < pScrn->numEntities; i++) {
360 if (pScrn->entityList[i] == entityIndex) {
361 peacc = xf86Entities[pScrn->entityList[i]]->access;
362 (*ptr) = peacc->next;
363 /* disable entity: call disable func */
364 if (peacc->pAccess && peacc->pAccess->AccessDisable)
365 peacc->pAccess->AccessDisable(peacc->pAccess->arg);
366 /* also disable fallback - just in case */
367 if (peacc->fallback && peacc->fallback->AccessDisable)
368 peacc->fallback->AccessDisable(peacc->fallback->arg);
369 for (i++; i < pScrn->numEntities; i++)
370 pScrn->entityList[i-1] = pScrn->entityList[i];
371 pScrn->numEntities--;
372 xf86Entities[entityIndex]->inUse = FALSE;
373 break;
375 ptr = &(xf86Entities[pScrn->entityList[i]]->access->next);
380 * xf86ClearEntitiesForScreen() - called when a screen is deleted
381 * to mark it's entities unused. Called by xf86DeleteScreen().
383 void
384 xf86ClearEntityListForScreen(int scrnIndex)
386 ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
387 EntityAccessPtr peacc;
388 int i, entityIndex;
390 if (pScrn->entityList == NULL || pScrn->numEntities == 0) return;
392 for (i = 0; i < pScrn->numEntities; i++) {
393 entityIndex = pScrn->entityList[i];
394 xf86Entities[entityIndex]->inUse = FALSE;
395 /* disable resource: call the disable function */
396 peacc = xf86Entities[entityIndex]->access;
397 if (peacc->pAccess && peacc->pAccess->AccessDisable)
398 peacc->pAccess->AccessDisable(peacc->pAccess->arg);
399 /* and the fallback function */
400 if (peacc->fallback && peacc->fallback->AccessDisable)
401 peacc->fallback->AccessDisable(peacc->fallback->arg);
402 /* shared resources are only needed when entity is active: remove */
403 xf86DeallocateResourcesForEntity(entityIndex, ResShared);
405 xfree(pScrn->entityList);
406 xfree(pScrn->entityInstanceList);
407 if (pScrn->CurrentAccess->pIoAccess == (EntityAccessPtr)pScrn->access)
408 pScrn->CurrentAccess->pIoAccess = NULL;
409 if (pScrn->CurrentAccess->pMemAccess == (EntityAccessPtr)pScrn->access)
410 pScrn->CurrentAccess->pMemAccess = NULL;
411 pScrn->entityList = NULL;
412 pScrn->entityInstanceList = NULL;
415 _X_EXPORT void
416 xf86DeallocateResourcesForEntity(int entityIndex, unsigned long type)
418 resPtr *pprev_next = &Acc;
419 resPtr res = Acc;
421 while (res) {
422 if (res->entityIndex == entityIndex &&
423 (type & ResAccMask & res->res_type))
425 (*pprev_next) = res->next;
426 xfree(res);
427 } else
428 pprev_next = &(res->next);
429 res = (*pprev_next);
434 * Add an extra device section (GDevPtr) to an entity.
437 void
438 xf86AddDevToEntity(int entityIndex, GDevPtr dev)
440 EntityPtr pEnt;
442 if (entityIndex >= xf86NumEntities)
443 return;
445 pEnt = xf86Entities[entityIndex];
446 pEnt->numInstances++;
447 pEnt->devices = xnfrealloc(pEnt->devices,
448 pEnt->numInstances * sizeof(GDevPtr));
449 pEnt->devices[pEnt->numInstances - 1] = dev;
450 dev->claimed = TRUE;
454 * xf86GetEntityInfo() -- This function hands information from the
455 * EntityRec struct to the drivers. The EntityRec structure itself
456 * remains invisible to the driver.
458 _X_EXPORT EntityInfoPtr
459 xf86GetEntityInfo(int entityIndex)
461 EntityInfoPtr pEnt;
462 int i;
464 if (entityIndex >= xf86NumEntities)
465 return NULL;
467 pEnt = xnfcalloc(1,sizeof(EntityInfoRec));
468 pEnt->index = entityIndex;
469 pEnt->location = xf86Entities[entityIndex]->bus;
470 pEnt->active = xf86Entities[entityIndex]->active;
471 pEnt->chipset = xf86Entities[entityIndex]->chipset;
472 pEnt->resources = xf86Entities[entityIndex]->resources;
473 pEnt->driver = xf86Entities[entityIndex]->driver;
474 if ( (xf86Entities[entityIndex]->devices) &&
475 (xf86Entities[entityIndex]->devices[0]) ) {
476 for (i = 0; i < xf86Entities[entityIndex]->numInstances; i++)
477 if (xf86Entities[entityIndex]->devices[i]->screen == 0)
478 break;
479 pEnt->device = xf86Entities[entityIndex]->devices[i];
480 } else
481 pEnt->device = NULL;
483 return pEnt;
486 _X_EXPORT int
487 xf86GetNumEntityInstances(int entityIndex)
489 if (entityIndex >= xf86NumEntities)
490 return -1;
492 return xf86Entities[entityIndex]->numInstances;
495 _X_EXPORT GDevPtr
496 xf86GetDevFromEntity(int entityIndex, int instance)
498 int i;
500 /* We might not use AddDevtoEntity */
501 if ( (!xf86Entities[entityIndex]->devices) ||
502 (!xf86Entities[entityIndex]->devices[0]) )
503 return NULL;
505 if (entityIndex >= xf86NumEntities ||
506 instance >= xf86Entities[entityIndex]->numInstances)
507 return NULL;
509 for (i = 0; i < xf86Entities[entityIndex]->numInstances; i++)
510 if (xf86Entities[entityIndex]->devices[i]->screen == instance)
511 break;
512 return xf86Entities[entityIndex]->devices[i];
516 * general generic disable function.
518 static void
519 disableAccess(void)
521 int i;
522 xf86AccessPtr pacc;
523 EntityAccessPtr peacc;
525 /* call disable funcs and reset current access pointer */
526 /* the entity specific access funcs are in an enabled */
527 /* state - driver must restore their state explicitely */
528 for (i = 0; i < xf86NumScreens; i++) {
529 peacc = xf86Screens[i]->CurrentAccess->pIoAccess;
530 while (peacc) {
531 if (peacc->pAccess && peacc->pAccess->AccessDisable)
532 peacc->pAccess->AccessDisable(peacc->pAccess->arg);
533 peacc = peacc->next;
535 xf86Screens[i]->CurrentAccess->pIoAccess = NULL;
536 peacc = xf86Screens[i]->CurrentAccess->pMemAccess;
537 while (peacc) {
538 if (peacc->pAccess && peacc->pAccess->AccessDisable)
539 peacc->pAccess->AccessDisable(peacc->pAccess->arg);
540 peacc = peacc->next;
542 xf86Screens[i]->CurrentAccess->pMemAccess = NULL;
544 /* then call the generic entity disable funcs */
545 for (i = 0; i < xf86NumEntities; i++) {
546 pacc = xf86Entities[i]->access->fallback;
547 if (pacc->AccessDisable)
548 pacc->AccessDisable(pacc->arg);
552 static void
553 clearAccess(void)
555 int i;
557 /* call disable funcs and reset current access pointer */
558 /* the entity specific access funcs are in an enabled */
559 /* state - driver must restore their state explicitely */
560 for (i = 0; i < xf86NumScreens; i++) {
561 xf86Screens[i]->CurrentAccess->pIoAccess = NULL;
562 xf86Screens[i]->CurrentAccess->pMemAccess = NULL;
568 * Generic interface to bus specific code - add other buses here
572 * xf86AccessInit() - set up everything needed for access control
573 * called only once on first server generation.
575 void
576 xf86AccessInit(void)
578 initPciState();
579 initPciBusState();
580 DisablePciBusAccess();
581 DisablePciAccess();
583 xf86ResAccessEnter = TRUE;
587 * xf86AccessEnter() -- gets called to save the text mode VGA IO
588 * resources when reentering the server after a VT switch.
590 void
591 xf86AccessEnter(void)
593 if (xf86ResAccessEnter)
594 return;
597 * on enter we simply disable routing of special resources
598 * to any bus and let the RAC code to "open" the right bridges.
600 PciBusStateEnter();
601 DisablePciBusAccess();
602 PciStateEnter();
603 disableAccess();
604 EntityEnter();
605 notifyStateChange(NOTIFY_ENTER);
606 xf86EnterServerState(SETUP);
607 xf86ResAccessEnter = TRUE;
611 * xf86AccessLeave() -- prepares access for and calls the
612 * entityLeave() functions.
613 * xf86AccessLeaveState() --- gets called to restore the
614 * access to the VGA IO resources when switching VT or on
615 * server exit.
616 * This was split to call xf86AccessLeaveState() from
617 * ddxGiveUp().
619 void
620 xf86AccessLeave(void)
622 if (!xf86ResAccessEnter)
623 return;
624 notifyStateChange(NOTIFY_LEAVE);
625 disableAccess();
626 DisablePciBusAccess();
627 EntityLeave();
630 void
631 xf86AccessLeaveState(void)
633 if (!xf86ResAccessEnter)
634 return;
635 xf86ResAccessEnter = FALSE;
636 PciStateLeave();
637 PciBusStateLeave();
641 * xf86AccessRestoreState() - Restore the access registers to the
642 * state before X was started. This is handy for framebuffers.
644 static void
645 xf86AccessRestoreState(void)
647 if (!xf86ResAccessEnter)
648 return;
649 PciStateLeave();
650 PciBusStateLeave();
654 * xf86EnableAccess() -- enable access to controlled resources.
655 * To reduce latency when switching access the ScrnInfoRec has
656 * a linked list of the EntityAccPtr of all screen entities.
659 * switching access needs to be done in te following oder:
660 * disable
661 * 1. disable old entity
662 * 2. reroute bus
663 * 3. enable new entity
664 * Otherwise resources needed for access control might be shadowed
665 * by other resources!
668 _X_EXPORT void
669 xf86EnableAccess(ScrnInfoPtr pScrn)
671 register EntityAccessPtr peAcc = (EntityAccessPtr) pScrn->access;
672 register EntityAccessPtr pceAcc;
673 register xf86AccessPtr pAcc;
674 EntityAccessPtr tmp;
676 #ifdef DEBUG
677 ErrorF("Enable access %i\n",pScrn->scrnIndex);
678 #endif
680 /* Entity is not under access control or currently enabled */
681 if (!pScrn->access) {
682 if (pScrn->busAccess) {
683 ((BusAccPtr)pScrn->busAccess)->set_f(pScrn->busAccess);
685 return;
688 switch (pScrn->resourceType) {
689 case IO:
690 pceAcc = pScrn->CurrentAccess->pIoAccess;
691 if (peAcc == pceAcc) {
692 return;
694 if (pScrn->CurrentAccess->pMemAccess == pceAcc)
695 pScrn->CurrentAccess->pMemAccess = NULL;
696 while (pceAcc) {
697 pAcc = pceAcc->pAccess;
698 if ( pAcc && pAcc->AccessDisable)
699 (*pAcc->AccessDisable)(pAcc->arg);
700 pceAcc = pceAcc->next;
702 if (pScrn->busAccess)
703 ((BusAccPtr)pScrn->busAccess)->set_f(pScrn->busAccess);
704 while (peAcc) {
705 pAcc = peAcc->pAccess;
706 if (pAcc && pAcc->AccessEnable)
707 (*pAcc->AccessEnable)(pAcc->arg);
708 peAcc = peAcc->next;
710 pScrn->CurrentAccess->pIoAccess = (EntityAccessPtr) pScrn->access;
711 return;
713 case MEM_IO:
714 pceAcc = pScrn->CurrentAccess->pIoAccess;
715 if (peAcc != pceAcc) { /* current Io != pAccess */
716 tmp = pceAcc;
717 while (pceAcc) {
718 pAcc = pceAcc->pAccess;
719 if (pAcc && pAcc->AccessDisable)
720 (*pAcc->AccessDisable)(pAcc->arg);
721 pceAcc = pceAcc->next;
723 pceAcc = pScrn->CurrentAccess->pMemAccess;
724 if (peAcc != pceAcc /* current Mem != pAccess */
725 && tmp !=pceAcc) {
726 while (pceAcc) {
727 pAcc = pceAcc->pAccess;
728 if (pAcc && pAcc->AccessDisable)
729 (*pAcc->AccessDisable)(pAcc->arg);
730 pceAcc = pceAcc->next;
733 } else { /* current Io == pAccess */
734 pceAcc = pScrn->CurrentAccess->pMemAccess;
735 if (pceAcc == peAcc) { /* current Mem == pAccess */
736 return;
738 while (pceAcc) { /* current Mem != pAccess */
739 pAcc = pceAcc->pAccess;
740 if (pAcc && pAcc->AccessDisable)
741 (*pAcc->AccessDisable)(pAcc->arg);
742 pceAcc = pceAcc->next;
745 if (pScrn->busAccess)
746 ((BusAccPtr)pScrn->busAccess)->set_f(pScrn->busAccess);
747 while (peAcc) {
748 pAcc = peAcc->pAccess;
749 if (pAcc && pAcc->AccessEnable)
750 (*pAcc->AccessEnable)(pAcc->arg);
751 peAcc = peAcc->next;
753 pScrn->CurrentAccess->pMemAccess =
754 pScrn->CurrentAccess->pIoAccess = (EntityAccessPtr) pScrn->access;
755 return;
757 case MEM:
758 pceAcc = pScrn->CurrentAccess->pMemAccess;
759 if (peAcc == pceAcc) {
760 return;
762 if (pScrn->CurrentAccess->pIoAccess == pceAcc)
763 pScrn->CurrentAccess->pIoAccess = NULL;
764 while (pceAcc) {
765 pAcc = pceAcc->pAccess;
766 if ( pAcc && pAcc->AccessDisable)
767 (*pAcc->AccessDisable)(pAcc->arg);
768 pceAcc = pceAcc->next;
770 if (pScrn->busAccess)
771 ((BusAccPtr)pScrn->busAccess)->set_f(pScrn->busAccess);
772 while (peAcc) {
773 pAcc = peAcc->pAccess;
774 if (pAcc && pAcc->AccessEnable)
775 (*pAcc->AccessEnable)(pAcc->arg);
776 peAcc = peAcc->next;
778 pScrn->CurrentAccess->pMemAccess = (EntityAccessPtr) pScrn->access;
779 return;
781 case NONE:
782 if (pScrn->busAccess) {
783 ((BusAccPtr)pScrn->busAccess)->set_f(pScrn->busAccess);
785 return;
789 _X_EXPORT void
790 xf86SetCurrentAccess(Bool Enable, ScrnInfoPtr pScrn)
792 EntityAccessPtr pceAcc2 = NULL;
793 register EntityAccessPtr pceAcc = NULL;
794 register xf86AccessPtr pAcc;
797 switch(pScrn->resourceType) {
798 case IO:
799 pceAcc = pScrn->CurrentAccess->pIoAccess;
800 break;
801 case MEM:
802 pceAcc = pScrn->CurrentAccess->pMemAccess;
803 break;
804 case MEM_IO:
805 pceAcc = pScrn->CurrentAccess->pMemAccess;
806 pceAcc2 = pScrn->CurrentAccess->pIoAccess;
807 break;
808 default:
809 break;
812 while (pceAcc) {
813 pAcc = pceAcc->pAccess;
814 if ( pAcc) {
815 if (!Enable) {
816 if (pAcc->AccessDisable)
817 (*pAcc->AccessDisable)(pAcc->arg);
818 } else {
819 if (pAcc->AccessEnable)
820 (*pAcc->AccessEnable)(pAcc->arg);
823 pceAcc = pceAcc->next;
824 if (!pceAcc) {
825 pceAcc = pceAcc2;
826 pceAcc2 = NULL;
831 _X_EXPORT void
832 xf86SetAccessFuncs(EntityInfoPtr pEnt, xf86SetAccessFuncPtr funcs,
833 xf86SetAccessFuncPtr oldFuncs)
835 AccessFuncPtr rac;
837 if (!xf86Entities[pEnt->index]->rac)
838 xf86Entities[pEnt->index]->rac = xnfcalloc(1,sizeof(AccessFuncRec));
840 rac = xf86Entities[pEnt->index]->rac;
842 if (funcs->mem == funcs->io_mem && funcs->mem && funcs->io)
843 xf86Entities[pEnt->index]->entityProp |= NO_SEPARATE_MEM_FROM_IO;
844 if (funcs->io == funcs->io_mem && funcs->mem && funcs->io)
845 xf86Entities[pEnt->index]->entityProp |= NO_SEPARATE_IO_FROM_MEM;
847 rac->mem_new = funcs->mem;
848 rac->io_new = funcs->io;
849 rac->io_mem_new = funcs->io_mem;
851 rac->old = oldFuncs;
855 * Conflict checking
858 static memType
859 getMask(memType val)
861 memType mask = 0;
862 memType tmp = 0;
864 mask=~mask;
865 tmp = ~((~tmp) >> 1);
867 while (!(val & tmp)) {
868 mask = mask >> 1;
869 val = val << 1;
871 return mask;
875 * checkConflictBlock() -- check for conflicts of a block resource range.
876 * If conflict is found return end of conflicting range. Else return 0.
878 static memType
879 checkConflictBlock(resRange *range, resPtr pRes)
881 memType val,tmp,prev;
882 int i;
884 switch (pRes->res_type & ResExtMask) {
885 case ResBlock:
886 if (range->rBegin < pRes->block_end &&
887 range->rEnd > pRes->block_begin) {
888 #ifdef DEBUG
889 ErrorF("b-b conflict w: %lx %lx\n",
890 pRes->block_begin,pRes->block_end);
891 #endif
892 return pRes->block_end < range->rEnd ?
893 pRes->block_end : range->rEnd;
895 return 0;
896 case ResSparse:
897 if (pRes->sparse_base > range->rEnd) return 0;
899 val = (~pRes->sparse_mask | pRes->sparse_base) & getMask(range->rEnd);
900 #ifdef DEBUG
901 ErrorF("base = 0x%lx, mask = 0x%lx, begin = 0x%lx, end = 0x%lx ,"
902 "val = 0x%lx\n",
903 pRes->sparse_base, pRes->sparse_mask, range->rBegin,
904 range->rEnd, val);
905 #endif
906 i = sizeof(memType) * 8;
907 tmp = prev = pRes->sparse_base;
909 while (i) {
910 tmp |= 1<< (--i) & val;
911 if (tmp > range->rEnd)
912 tmp = prev;
913 else
914 prev = tmp;
916 if (tmp >= range->rBegin) {
917 #ifdef DEBUG
918 ErrorF("conflict found at: 0x%lx\n",tmp);
919 ErrorF("b-d conflict w: %lx %lx\n",
920 pRes->sparse_base,pRes->sparse_mask);
921 #endif
922 return tmp;
924 else
925 return 0;
927 return 0;
931 * checkConflictSparse() -- check for conflicts of a sparse resource range.
932 * If conflict is found return base of conflicting region. Else return 0.
934 #define mt_max ~(memType)0
935 #define length sizeof(memType) * 8
936 static memType
937 checkConflictSparse(resRange *range, resPtr pRes)
939 memType val, tmp, prev;
940 int i;
942 switch (pRes->res_type & ResExtMask) {
943 case ResSparse:
944 tmp = pRes->sparse_mask & range->rMask;
945 if ((tmp & pRes->sparse_base) == (tmp & range->rBase)) {
946 #ifdef DEBUG
947 ErrorF("s-b conflict w: %lx %lx\n",
948 pRes->sparse_base,pRes->sparse_mask);
949 #endif
950 return pRes->sparse_mask;
952 return 0;
954 case ResBlock:
955 if (pRes->block_end < range->rBase) return 0;
957 val = (~range->rMask | range->rBase) & getMask(pRes->block_end);
958 i = length;
959 tmp = prev = range->rBase;
961 while (i) {
962 #ifdef DEBUG
963 ErrorF("tmp = 0x%lx\n",tmp);
964 #endif
965 tmp |= 1<< (--i) & val;
966 if (tmp > pRes->block_end)
967 tmp = prev;
968 else
969 prev = tmp;
971 if (tmp < pRes->block_begin)
972 return 0;
973 else {
975 * now we subdivide the block region in sparse regions
976 * with base values = 2^n and find the smallest mask.
977 * This might be done in a simpler way....
979 memType mask, m_mask = 0, base = pRes->block_begin;
980 int i;
981 while (base < pRes->block_end) {
982 for (i = 1; i < length; i++)
983 if ( base != (base & (mt_max << i))) break;
984 mask = mt_max >> (length - i);
985 do mask >>= 1;
986 while ((mask + base + 1) > pRes->block_end);
987 /* m_mask and are _inverted_ sparse masks */
988 m_mask = mask > m_mask ? mask : m_mask;
989 base = base + mask + 1;
991 #ifdef DEBUG
992 ErrorF("conflict found at: 0x%lx\n",tmp);
993 ErrorF("b-b conflict w: %lx %lx\n",
994 pRes->block_begin,pRes->block_end);
995 #endif
996 return ~m_mask;
999 return 0;
1001 #undef mt_max
1002 #undef length
1005 * needCheck() -- this function decides whether to check for conflicts
1006 * depending on the types of the resource ranges and their locations
1008 static Bool
1009 needCheck(resPtr pRes, unsigned long type, int entityIndex, xf86State state)
1011 /* the same entity shouldn't conflict with itself */
1012 ScrnInfoPtr pScrn;
1013 int i;
1014 BusType loc = BUS_NONE;
1015 BusType r_loc = BUS_NONE;
1017 /* Ignore overlapped ranges that have been nullified */
1018 if ((pRes->res_type & ResOverlap) && (pRes->block_begin > pRes->block_end))
1019 return FALSE;
1021 if ((pRes->res_type & ResTypeMask) != (type & ResTypeMask))
1022 return FALSE;
1025 * Resources set by BIOS (ResBios) are allowed to conflict
1026 * with resources marked (ResBios).
1028 if (pRes->res_type & type & ResBios)
1029 return FALSE;
1031 /*If requested, skip over estimated resources */
1032 if (pRes->res_type & type & ResEstimated)
1033 return FALSE;
1035 if (type & pRes->res_type & ResUnused)
1036 return FALSE;
1038 if (state == OPERATING) {
1039 if (type & ResDisableOpr || pRes->res_type & ResDisableOpr)
1040 return FALSE;
1041 if (type & pRes->res_type & ResUnusedOpr) return FALSE;
1043 * Maybe we should have ResUnused set The resUnusedOpr
1044 * bit, too. This way we could avoid this confusion
1046 if ((type & ResUnusedOpr && pRes->res_type & ResUnused) ||
1047 (type & ResUnused && pRes->res_type & ResUnusedOpr))
1048 return FALSE;
1051 if (entityIndex > -1)
1052 loc = xf86Entities[entityIndex]->busType;
1053 if (pRes->entityIndex > -1)
1054 r_loc = xf86Entities[pRes->entityIndex]->busType;
1056 switch (type & ResAccMask) {
1057 case ResExclusive:
1058 switch (pRes->res_type & ResAccMask) {
1059 case ResExclusive:
1060 break;
1061 case ResShared:
1062 /* ISA buses are only locally exclusive on a PCI system */
1063 if (loc == BUS_ISA && r_loc == BUS_PCI)
1064 return FALSE;
1065 break;
1067 break;
1068 case ResShared:
1069 switch (pRes->res_type & ResAccMask) {
1070 case ResExclusive:
1071 /* ISA buses are only locally exclusive on a PCI system */
1072 if (loc == BUS_PCI && r_loc == BUS_ISA)
1073 return FALSE;
1074 break;
1075 case ResShared:
1076 return FALSE;
1078 break;
1079 case ResAny:
1080 break;
1083 if (pRes->entityIndex == entityIndex) return FALSE;
1085 if (pRes->entityIndex > -1 &&
1086 (pScrn = xf86FindScreenForEntity(entityIndex))) {
1087 for (i = 0; i < pScrn->numEntities; i++)
1088 if (pScrn->entityList[i] == pRes->entityIndex) return FALSE;
1090 return TRUE;
1094 * checkConflict() - main conflict checking function which all other
1095 * function call.
1097 static memType
1098 checkConflict(resRange *rgp, resPtr pRes, int entityIndex,
1099 xf86State state, Bool ignoreIdentical)
1101 memType ret;
1103 while(pRes) {
1104 if (!needCheck(pRes,rgp->type, entityIndex ,state)) {
1105 pRes = pRes->next;
1106 continue;
1108 switch (rgp->type & ResExtMask) {
1109 case ResBlock:
1110 if (rgp->rEnd < rgp->rBegin) {
1111 xf86Msg(X_ERROR,"end of block range 0x%lx < begin 0x%lx\n",
1112 rgp->rEnd,rgp->rBegin);
1113 return 0;
1115 if ((ret = checkConflictBlock(rgp, pRes))) {
1116 if (!ignoreIdentical || (rgp->rBegin != pRes->block_begin)
1117 || (rgp->rEnd != pRes->block_end))
1118 return ret;
1120 break;
1121 case ResSparse:
1122 if ((rgp->rBase & rgp->rMask) != rgp->rBase) {
1123 xf86Msg(X_ERROR,"sparse io range (base: 0x%lx mask: 0x%lx)"
1124 "doesn't satisfy (base & mask = mask)\n",
1125 rgp->rBase, rgp->rMask);
1126 return 0;
1128 if ((ret = checkConflictSparse(rgp, pRes))) {
1129 if (!ignoreIdentical || (rgp->rBase != pRes->sparse_base)
1130 || (rgp->rMask != pRes->sparse_mask))
1131 return ret;
1133 break;
1135 pRes = pRes->next;
1137 return 0;
1141 * ChkConflict() -- used within xxxBus ; find conflict with any location.
1143 memType
1144 ChkConflict(resRange *rgp, resPtr res, xf86State state)
1146 return checkConflict(rgp, res, -2, state,FALSE);
1150 * xf86ChkConflict() - This function is the low level interface to
1151 * the resource broker that gets exported. Tests all resources ie.
1152 * performs test with SETUP flag.
1154 _X_EXPORT memType
1155 xf86ChkConflict(resRange *rgp, int entityIndex)
1157 return checkConflict(rgp, Acc, entityIndex, SETUP,FALSE);
1161 * Resources List handling
1164 _X_EXPORT resPtr
1165 xf86JoinResLists(resPtr rlist1, resPtr rlist2)
1167 resPtr pRes;
1169 if (!rlist1)
1170 return rlist2;
1172 if (!rlist2)
1173 return rlist1;
1175 for (pRes = rlist1; pRes->next; pRes = pRes->next)
1177 pRes->next = rlist2;
1178 return rlist1;
1181 _X_EXPORT resPtr
1182 xf86AddResToList(resPtr rlist, resRange *range, int entityIndex)
1184 resPtr new;
1186 switch (range->type & ResExtMask) {
1187 case ResBlock:
1188 if (range->rEnd < range->rBegin) {
1189 xf86Msg(X_ERROR,"end of block range 0x%lx < begin 0x%lx\n",
1190 range->rEnd,range->rBegin);
1191 return rlist;
1193 break;
1194 case ResSparse:
1195 if ((range->rBase & range->rMask) != range->rBase) {
1196 xf86Msg(X_ERROR,"sparse io range (base: 0x%lx mask: 0x%lx)"
1197 "doesn't satisfy (base & mask = mask)\n",
1198 range->rBase, range->rMask);
1199 return rlist;
1201 break;
1204 new = xnfalloc(sizeof(resRec));
1206 * Only background resources may be registered with ResBios
1207 * and ResEstimated set. Other resources only set it for
1208 * testing.
1210 if (entityIndex != (-1))
1211 range->type &= ~(ResBios | ResEstimated);
1212 new->val = *range;
1213 new->entityIndex = entityIndex;
1214 new->next = rlist;
1215 return new;
1218 _X_EXPORT void
1219 xf86FreeResList(resPtr rlist)
1221 resPtr pRes;
1223 if (!rlist)
1224 return;
1226 for (pRes = rlist->next; pRes; rlist = pRes, pRes = pRes->next)
1227 xfree(rlist);
1228 xfree(rlist);
1231 _X_EXPORT resPtr
1232 xf86DupResList(const resPtr rlist)
1234 resPtr pRes, ret, prev, new;
1236 if (!rlist)
1237 return NULL;
1239 ret = xnfalloc(sizeof(resRec));
1240 *ret = *rlist;
1241 prev = ret;
1242 for (pRes = rlist->next; pRes; pRes = pRes->next) {
1243 new = xnfalloc(sizeof(resRec));
1244 *new = *pRes;
1245 prev->next = new;
1246 prev = new;
1248 return ret;
1251 _X_EXPORT void
1252 xf86PrintResList(int verb, resPtr list)
1254 int i = 0;
1255 const char *s, *r;
1256 resPtr tmp = list;
1257 unsigned long type;
1259 if (!list)
1260 return;
1262 type = ResMem;
1263 r = "M";
1264 while (1) {
1265 while (list) {
1266 if ((list->res_type & ResPhysMask) == type) {
1267 switch (list->res_type & ResExtMask) {
1268 case ResBlock:
1269 xf86ErrorFVerb(verb,
1270 "\t[%d] %d\t%ld\t0x%08lx - 0x%08lx (0x%lx)",
1271 i, list->entityIndex,
1272 (list->res_type & ResDomain) >> 24,
1273 list->block_begin, list->block_end,
1274 list->block_end - list->block_begin + 1);
1275 break;
1276 case ResSparse:
1277 xf86ErrorFVerb(verb, "\t[%d] %d\t%ld\t0x%08lx - 0x%08lx ",
1278 i, list->entityIndex,
1279 (list->res_type & ResDomain) >> 24,
1280 list->sparse_base,list->sparse_mask);
1281 break;
1282 default:
1283 list = list->next;
1284 continue;
1286 xf86ErrorFVerb(verb, " %s", r);
1287 switch (list->res_type & ResAccMask) {
1288 case ResExclusive:
1289 if (list->res_type & ResUnused)
1290 s = "x";
1291 else
1292 s = "X";
1293 break;
1294 case ResShared:
1295 if (list->res_type & ResUnused)
1296 s = "s";
1297 else
1298 s = "S";
1299 break;
1300 default:
1301 s = "?";
1303 xf86ErrorFVerb(verb, "%s", s);
1304 switch (list->res_type & ResExtMask) {
1305 case ResBlock:
1306 s = "[B]";
1307 break;
1308 case ResSparse:
1309 s = "[S]";
1310 break;
1311 default:
1312 s = "[?]";
1314 xf86ErrorFVerb(verb, "%s", s);
1315 if (list->res_type & ResEstimated)
1316 xf86ErrorFVerb(verb, "E");
1317 if (list->res_type & ResOverlap)
1318 xf86ErrorFVerb(verb, "O");
1319 if (list->res_type & ResInit)
1320 xf86ErrorFVerb(verb, "t");
1321 if (list->res_type & ResBios)
1322 xf86ErrorFVerb(verb, "(B)");
1323 if (list->res_type & ResBus)
1324 xf86ErrorFVerb(verb, "(b)");
1325 if (list->res_type & ResOprMask) {
1326 switch (list->res_type & ResOprMask) {
1327 case ResUnusedOpr:
1328 s = "(OprU)";
1329 break;
1330 case ResDisableOpr:
1331 s = "(OprD)";
1332 break;
1333 default:
1334 s = "(Opr?)";
1335 break;
1337 xf86ErrorFVerb(verb, "%s", s);
1339 xf86ErrorFVerb(verb, "\n");
1340 i++;
1342 list = list->next;
1344 if (type == ResIo) break;
1345 type = ResIo;
1346 r = "I";
1347 list = tmp;
1351 resPtr
1352 xf86AddRangesToList(resPtr list, resRange *pRange, int entityIndex)
1354 while(pRange && pRange->type != ResEnd) {
1355 list = xf86AddResToList(list,pRange,entityIndex);
1356 pRange++;
1358 return list;
1361 void
1362 xf86ResourceBrokerInit(void)
1364 resPtr resPci;
1366 osRes = NULL;
1368 /* Get the addressable ranges */
1369 ResRange = xf86BusAccWindowsFromOS();
1370 xf86MsgVerb(X_INFO, 3, "Addressable bus resource ranges are\n");
1371 xf86PrintResList(3, ResRange);
1373 /* Get the ranges used exclusively by the system */
1374 osRes = xf86AccResFromOS(osRes);
1375 xf86MsgVerb(X_INFO, 3, "OS-reported resource ranges:\n");
1376 xf86PrintResList(3, osRes);
1378 /* Bus dep initialization */
1379 resPci = ResourceBrokerInitPci(&osRes);
1380 Acc = xf86JoinResLists(xf86DupResList(osRes), resPci);
1382 xf86MsgVerb(X_INFO, 3, "All system resource ranges:\n");
1383 xf86PrintResList(3, Acc);
1387 #define MEM_ALIGN (1024 * 1024)
1390 * RemoveOverlaps() -- remove overlaps between resources of the
1391 * same kind.
1392 * Beware: This function doesn't check for access attributes.
1393 * At resource broker initialization this is no problem as this
1394 * only deals with exclusive resources.
1396 #if 0
1397 void
1398 RemoveOverlaps(resPtr target, resPtr list, Bool pow2Alignment, Bool useEstimated)
1400 resPtr pRes;
1401 memType size, newsize, adjust;
1403 if (!target)
1404 return;
1406 for (pRes = list; pRes; pRes = pRes->next) {
1407 if (pRes != target
1408 && ((pRes->res_type & ResTypeMask) ==
1409 (target->res_type & ResTypeMask))
1410 && pRes->block_begin <= target->block_end
1411 && pRes->block_end >= target->block_begin) {
1412 /* Possibly ignore estimated resources */
1413 if (!useEstimated && (pRes->res_type & ResEstimated)) continue;
1415 * Target should be a larger region than pRes. If pRes fully
1416 * contains target, don't do anything unless target can overlap.
1418 if (pRes->block_begin <= target->block_begin &&
1419 pRes->block_end >= target->block_end) {
1420 if (target->res_type & ResOverlap) {
1421 /* Nullify range but keep its ResOverlap bit on */
1422 target->block_end = target->block_begin - 1;
1423 return;
1425 continue;
1428 * In cases where the target and pRes have the same starting
1429 * address, reduce the size of the target (given it's an estimate).
1431 if (pRes->block_begin == target->block_begin) {
1432 if (target->res_type & ResOverlap)
1433 target->block_end = target->block_begin - 1;
1434 else
1435 target->block_end = pRes->block_end;
1437 /* Otherwise, trim target to remove the overlap */
1438 else if (pRes->block_begin <= target->block_end) {
1439 target->block_end = pRes->block_begin - 1;
1440 } else if (!pow2Alignment &&
1441 pRes->block_end >= target->block_begin) {
1442 target->block_begin = pRes->block_end + 1;
1444 if (pow2Alignment) {
1446 * Align to a power of two. This requires finding the
1447 * largest power of two that is smaller than the adjusted
1448 * size.
1450 size = target->block_end - target->block_begin + 1;
1451 newsize = 1UL << (sizeof(memType) * 8 - 1);
1452 while (!(newsize & size))
1453 newsize >>= 1;
1454 target->block_end = target->block_begin + newsize - 1;
1455 } else if (target->block_end > MEM_ALIGN) {
1456 /* Align the end to MEM_ALIGN */
1457 if ((adjust = (target->block_end + 1) % MEM_ALIGN))
1458 target->block_end -= adjust;
1463 #else
1465 void
1466 RemoveOverlaps(resPtr target, resPtr list, Bool pow2Alignment, Bool useEstimated)
1468 resPtr pRes;
1469 memType size, newsize, adjust;
1471 if (!target)
1472 return;
1474 if (!(target->res_type & ResEstimated) /* Don't touch sure resources */
1475 && !(target->res_type & ResOverlap)) /* Unless they may overlap */
1476 return;
1478 for (pRes = list; pRes; pRes = pRes->next) {
1479 if (pRes == target
1480 || ((pRes->res_type & ResTypeMask) !=
1481 (target->res_type & ResTypeMask))
1482 || pRes->block_begin > target->block_end
1483 || pRes->block_end < target->block_begin)
1484 continue;
1486 if (pRes->block_begin <= target->block_begin) {
1487 /* Possibly ignore estimated resources */
1488 if (!useEstimated && (pRes->res_type & ResEstimated))
1489 continue;
1491 /* Special cases */
1492 if (pRes->block_end >= target->block_end) {
1494 * If pRes fully contains target, don't do anything
1495 * unless target can overlap.
1497 if (target->res_type & ResOverlap) {
1498 /* Nullify range but keep its ResOverlap bit on */
1499 target->block_end = target->block_begin - 1;
1500 return;
1501 } else
1502 continue;
1503 } else {
1504 #if 0 /* Don't trim start address - we trust what we got */
1506 * If !pow2Alignment trim start address: !pow2Alingment
1507 * is only set when estimated OS addresses are handled.
1508 * In cases where the target and pRes have the same
1509 * starting address, reduce the size of the target
1510 * (given it's an estimate).
1512 if (!pow2Alignment)
1513 target->block_begin = pRes->block_end + 1;
1514 else
1515 #endif
1516 if (pRes->block_begin == target->block_begin)
1517 target->block_end = pRes->block_end;
1518 else
1519 continue;
1521 } else {
1522 /* Trim target to remove the overlap */
1523 target->block_end = pRes->block_begin - 1;
1525 if (pow2Alignment) {
1527 * Align to a power of two. This requires finding the
1528 * largest power of two that is smaller than the adjusted
1529 * size.
1531 size = target->block_end - target->block_begin + 1;
1532 newsize = 1UL << (sizeof(memType) * 8 - 1);
1533 while (!(newsize & size))
1534 newsize >>= 1;
1535 target->block_end = target->block_begin + newsize - 1;
1536 } else if (target->block_end > MEM_ALIGN) {
1537 /* Align the end to MEM_ALIGN */
1538 if ((adjust = (target->block_end + 1) % MEM_ALIGN))
1539 target->block_end -= adjust;
1544 #endif
1547 * Resource request code
1550 #define ALIGN(x,a) ((x) + a) &~(a)
1552 _X_EXPORT resRange
1553 xf86GetBlock(unsigned long type, memType size,
1554 memType window_start, memType window_end,
1555 memType align_mask, resPtr avoid)
1557 memType min, max, tmp;
1558 resRange r = {ResEnd,0,0};
1559 resPtr res_range = ResRange;
1561 if (!size) return r;
1562 if (window_end < window_start || (window_end - window_start) < (size - 1)) {
1563 ErrorF("Requesting insufficient memory window!:"
1564 " start: 0x%lx end: 0x%lx size 0x%lx\n",
1565 window_start,window_end,size);
1566 return r;
1568 type = (type & ~(ResExtMask | ResBios | ResEstimated)) | ResBlock;
1570 while (res_range) {
1571 if ((type & ResTypeMask) == (res_range->res_type & ResTypeMask)) {
1572 if (res_range->block_begin > window_start)
1573 min = res_range->block_begin;
1574 else
1575 min = window_start;
1576 if (res_range->block_end < window_end)
1577 max = res_range->block_end;
1578 else
1579 max = window_end;
1580 min = ALIGN(min,align_mask);
1581 /* do not produce an overflow! */
1582 while (min < max && (max - min) >= (size - 1)) {
1583 RANGE(r,min,min + size - 1,type);
1584 tmp = ChkConflict(&r,Acc,SETUP);
1585 if (!tmp) {
1586 tmp = ChkConflict(&r,avoid,SETUP);
1587 if (!tmp) {
1588 return r;
1591 min = ALIGN(tmp,align_mask);
1594 res_range = res_range->next;
1596 RANGE(r,0,0,ResEnd);
1597 return r;
1600 #define mt_max ~(memType)0
1601 #define length sizeof(memType) * 8
1603 * make_base() -- assign the lowest bits to the bits set in mask.
1604 * example: mask 011010 val 0000110 -> 011000
1606 static memType
1607 make_base(memType val, memType mask)
1609 int i,j = 0;
1610 memType ret = 0
1612 for (i = 0;i<length;i++) {
1613 if ((1 << i) & mask) {
1614 ret |= (((val >> j) & 1) << i);
1615 j++;
1618 return ret;
1622 * make_base() -- assign the bits set in mask to the lowest bits.
1623 * example: mask 011010 , val 010010 -> 000011
1625 static memType
1626 unmake_base(memType val, memType mask)
1628 int i,j = 0;
1629 memType ret = 0;
1631 for (i = 0;i<length;i++) {
1632 if ((1 << i) & mask) {
1633 ret |= (((val >> i) & 1) << j);
1634 j++;
1637 return ret;
1640 static memType
1641 fix_counter(memType val, memType old_mask, memType mask)
1643 mask = old_mask & mask;
1645 val = make_base(val,old_mask);
1646 return unmake_base(val,mask);
1649 _X_EXPORT resRange
1650 xf86GetSparse(unsigned long type, memType fixed_bits,
1651 memType decode_mask, memType address_mask, resPtr avoid)
1653 resRange r = {ResEnd,0,0};
1654 memType new_mask;
1655 memType mask1;
1656 memType base;
1657 memType counter = 0;
1658 memType counter1;
1659 memType max_counter = ~(memType)0;
1660 memType max_counter1;
1661 memType conflict = 0;
1663 /* for sanity */
1664 type = (type & ~(ResExtMask | ResBios | ResEstimated)) | ResSparse;
1667 * a sparse address consists of 3 parts:
1668 * fixed_bits: F bits which hard decoded by the hardware
1669 * decode_bits: D bits which are used to decode address
1670 * but which may be set by software
1671 * address_bits: A bits which are used to address the
1672 * sparse range.
1673 * the decode_mask marks all decode bits while the address_mask
1674 * masks out all address_bits:
1675 * F D A
1676 * decode_mask: 0 1 0
1677 * address_mask: 1 1 0
1679 decode_mask &= address_mask;
1680 new_mask = decode_mask;
1683 * We start by setting the decode_mask bits to different values
1684 * when a conflict is found the address_mask of the conflicting
1685 * resource is returned. We remove those bits from decode_mask
1686 * that are also set in the returned address_mask as they always
1687 * conflict with resources which use them as address masks.
1688 * The resoulting mask is stored in new_mask.
1689 * We continue until no conflict is found or until we have
1690 * tried all possible settings of new_mask.
1692 while (1) {
1693 base = make_base(counter,new_mask) | fixed_bits;
1694 RANGE(r,base,address_mask,type);
1695 conflict = ChkConflict(&r,Acc,SETUP);
1696 if (!conflict) {
1697 conflict = ChkConflict(&r,avoid,SETUP);
1698 if (!conflict) {
1699 return r;
1702 counter = fix_counter(counter,new_mask,conflict);
1703 max_counter = fix_counter(max_counter,new_mask,conflict);
1704 new_mask &= conflict;
1705 counter ++;
1706 if (counter > max_counter) break;
1708 if (!new_mask && (new_mask == decode_mask)) {
1709 RANGE(r,0,0,ResEnd);
1710 return r;
1713 * if we haven't been successful we also try to modify those
1714 * bits in decode_mask that are not at the same time set in
1715 * new mask. These bits overlap with address_bits of some
1716 * resources. If a conflict with a resource of this kind is
1717 * found (ie. returned_mask & mask1 != mask1) with
1718 * mask1 = decode_mask & ~new_mask we cannot
1719 * use our choice of bits in the new_mask part. We try
1720 * another choice.
1722 max_counter = fix_counter(mt_max,mt_max,new_mask);
1723 mask1 = decode_mask & ~new_mask;
1724 max_counter1 = fix_counter(mt_max,mt_max,mask1);
1725 counter = 0;
1727 while (1) {
1728 counter1 = 0;
1729 while (1) {
1730 base = make_base(counter1,mask1);
1731 RANGE(r,base,address_mask,type);
1732 conflict = ChkConflict(&r,Acc,SETUP);
1733 if (!conflict) {
1734 conflict = ChkConflict(&r,avoid,SETUP);
1735 if (!conflict) {
1736 return r;
1739 counter1 ++;
1740 if ((mask1 & conflict) != mask1 || counter1 > max_counter1)
1741 break;
1743 counter ++;
1744 if (counter > max_counter) break;
1746 RANGE(r,0,0,ResEnd);
1747 return r;
1750 #undef length
1751 #undef mt_max
1754 * Resource registrarion
1757 static resList
1758 xf86GetResourcesImplicitly(int entityIndex)
1760 if (entityIndex >= xf86NumEntities) return NULL;
1762 switch (xf86Entities[entityIndex]->bus.type) {
1763 case BUS_ISA:
1764 case BUS_NONE:
1765 case BUS_SBUS:
1766 return NULL;
1767 case BUS_PCI:
1768 return GetImplicitPciResources(entityIndex);
1769 case BUS_last:
1770 return NULL;
1772 return NULL;
1775 static void
1776 convertRange2Host(int entityIndex, resRange *pRange)
1778 if (pRange->type & ResBus) {
1779 switch (xf86Entities[entityIndex]->busType) {
1780 case BUS_PCI:
1781 pciConvertRange2Host(entityIndex,pRange);
1782 break;
1783 case BUS_ISA:
1784 isaConvertRange2Host(pRange);
1785 break;
1786 default:
1787 break;
1790 pRange->type &= ~ResBus;
1794 static void
1795 xf86ConvertListToHost(int entityIndex, resPtr list)
1797 while (list) {
1798 convertRange2Host(entityIndex, &list->val);
1799 list = list->next;
1804 * xf86RegisterResources() -- attempts to register listed resources.
1805 * If list is NULL it tries to obtain resources implicitly. Function
1806 * returns a resPtr listing all resources not successfully registered.
1809 _X_EXPORT resPtr
1810 xf86RegisterResources(int entityIndex, resList list, unsigned long access)
1812 resPtr res = NULL;
1813 resRange range;
1814 resList list_f = NULL;
1816 if (!list) {
1817 list = xf86GetResourcesImplicitly(entityIndex);
1818 /* these resources have to be in host address space already */
1819 if (!list) return NULL;
1820 list_f = list;
1823 while(list->type != ResEnd) {
1824 range = *list;
1826 convertRange2Host(entityIndex,&range);
1828 if ((access != ResNone) && (access & ResAccMask)) {
1829 range.type = (range.type & ~ResAccMask) | (access & ResAccMask);
1831 range.type &= ~ResEstimated; /* Not allowed for drivers */
1832 #if !((defined(__alpha__) || (defined(__ia64__))) && defined(linux))
1833 /* On Alpha Linux, do not check for conflicts, trust the kernel. */
1834 if (checkConflict(&range, Acc, entityIndex, SETUP,TRUE))
1835 res = xf86AddResToList(res,&range,entityIndex);
1836 else
1837 #endif
1839 Acc = xf86AddResToList(Acc,&range,entityIndex);
1841 list++;
1843 if (list_f)
1844 xfree(list_f);
1846 #ifdef DEBUG
1847 xf86MsgVerb(X_INFO, 3,"Resources after driver initialization\n");
1848 xf86PrintResList(3, Acc);
1849 if (res) xf86MsgVerb(X_INFO, 3,
1850 "Failed Resources after driver initialization "
1851 "for Entity: %i\n",entityIndex);
1852 xf86PrintResList(3, res);
1853 #endif
1854 return res;
1858 static void
1859 busTypeSpecific(EntityPtr pEnt, xf86State state, xf86AccessPtr *acc_mem,
1860 xf86AccessPtr *acc_io, xf86AccessPtr *acc_mem_io)
1862 pciAccPtr *ppaccp;
1864 switch (pEnt->bus.type) {
1865 case BUS_ISA:
1866 case BUS_SBUS:
1867 *acc_mem = *acc_io = *acc_mem_io = &AccessNULL;
1868 break;
1869 break;
1870 case BUS_PCI:
1871 ppaccp = xf86PciAccInfo;
1872 while (*ppaccp) {
1873 if ((*ppaccp)->busnum == pEnt->pciBusId.bus
1874 && (*ppaccp)->devnum == pEnt->pciBusId.device
1875 && (*ppaccp)->funcnum == pEnt->pciBusId.func) {
1876 *acc_io = &(*ppaccp)->ioAccess;
1877 *acc_mem = &(*ppaccp)->memAccess;
1878 *acc_mem_io = &(*ppaccp)->io_memAccess;
1879 break;
1881 ppaccp++;
1883 break;
1884 default:
1885 *acc_mem = *acc_io = *acc_mem_io = NULL;
1886 break;
1888 return;
1891 static void
1892 setAccess(EntityPtr pEnt, xf86State state)
1895 xf86AccessPtr acc_mem, acc_io, acc_mem_io;
1896 xf86AccessPtr org_mem = NULL, org_io = NULL, org_mem_io = NULL;
1897 int prop;
1899 busTypeSpecific(pEnt,state,&acc_mem,&acc_io,&acc_mem_io);
1901 /* The replacement function needs to handle _all_ shared resources */
1902 /* unless they are handeled locally and disabled otherwise */
1903 if (pEnt->rac) {
1904 if (pEnt->rac->io_new) {
1905 org_io = acc_io;
1906 acc_io = pEnt->rac->io_new;
1908 if (pEnt->rac->mem_new) {
1909 org_mem = acc_mem;
1910 acc_mem = pEnt->rac->mem_new;
1912 if (pEnt->rac->io_mem_new) {
1913 org_mem_io = acc_mem_io;
1914 acc_mem_io = pEnt->rac->io_mem_new;
1918 if (state == OPERATING) {
1919 prop = pEnt->entityProp;
1920 switch(pEnt->entityProp & NEED_SHARED) {
1921 case NEED_SHARED:
1922 pEnt->access->rt = MEM_IO;
1923 break;
1924 case NEED_IO_SHARED:
1925 pEnt->access->rt = IO;
1926 break;
1927 case NEED_MEM_SHARED:
1928 pEnt->access->rt = MEM;
1929 break;
1930 default:
1931 pEnt->access->rt = NONE;
1933 } else {
1934 prop = NEED_SHARED | NEED_MEM | NEED_IO;
1935 pEnt->access->rt = MEM_IO;
1938 switch(pEnt->access->rt) {
1939 case IO:
1940 pEnt->access->pAccess = acc_io;
1941 break;
1942 case MEM:
1943 pEnt->access->pAccess = acc_mem;
1944 break;
1945 case MEM_IO:
1946 pEnt->access->pAccess = acc_mem_io;
1947 break;
1948 default: /* no conflicts at all */
1949 pEnt->access->pAccess = NULL; /* remove from RAC */
1950 break;
1953 if (org_io) {
1954 /* does the driver want the old access func? */
1955 if (pEnt->rac->old) {
1956 /* give it to the driver, leave state disabled */
1957 pEnt->rac->old->io = org_io;
1958 } else if (org_io->AccessEnable) {
1959 /* driver doesn't want it - enable generic access */
1960 org_io->AccessEnable(org_io->arg);
1964 if (org_mem_io) {
1965 /* does the driver want the old access func? */
1966 if (pEnt->rac->old) {
1967 /* give it to the driver, leave state disabled */
1968 pEnt->rac->old->io_mem = org_mem_io;
1969 } else if (org_mem_io->AccessEnable) {
1970 /* driver doesn't want it - enable generic access */
1971 org_mem_io->AccessEnable(org_mem_io->arg);
1975 if (org_mem) {
1976 /* does the driver want the old access func? */
1977 if (pEnt->rac->old) {
1978 /* give it to the driver, leave state disabled */
1979 pEnt->rac->old->mem = org_mem;
1980 } else if (org_mem->AccessEnable) {
1981 /* driver doesn't want it - enable generic access */
1982 org_mem->AccessEnable(org_mem->arg);
1986 if (!(prop & NEED_MEM_SHARED)){
1987 if (prop & NEED_MEM) {
1988 if (acc_mem && acc_mem->AccessEnable)
1989 acc_mem->AccessEnable(acc_mem->arg);
1990 } else {
1991 if (acc_mem && acc_mem->AccessDisable)
1992 acc_mem->AccessDisable(acc_mem->arg);
1996 if (!(prop & NEED_IO_SHARED)) {
1997 if (prop & NEED_IO) {
1998 if (acc_io && acc_io->AccessEnable)
1999 acc_io->AccessEnable(acc_io->arg);
2000 } else {
2001 if (acc_io && acc_io->AccessDisable)
2002 acc_io->AccessDisable(acc_io->arg);
2006 /* disable shared resources */
2007 if (pEnt->access->pAccess
2008 && pEnt->access->pAccess->AccessDisable)
2009 pEnt->access->pAccess->AccessDisable(pEnt->access->pAccess->arg);
2012 * If device is not under access control it is enabled.
2013 * If it needs bus routing do it here as it isn't bus
2014 * type specific. Any conflicts should be checked at this
2015 * stage
2017 if (!pEnt->access->pAccess
2018 && (pEnt->entityProp & (state == SETUP ? NEED_VGA_ROUTED_SETUP :
2019 NEED_VGA_ROUTED)))
2020 ((BusAccPtr)pEnt->busAcc)->set_f(pEnt->busAcc);
2025 * xf86EnterServerState() -- set state the server is in.
2028 typedef enum { TRI_UNSET, TRI_TRUE, TRI_FALSE } TriState;
2030 static void
2031 SetSIGIOForState(xf86State state)
2033 static int sigio_state;
2034 static TriState sigio_blocked = TRI_UNSET;
2036 if ((state == SETUP) && (sigio_blocked != TRI_TRUE)) {
2037 sigio_state = xf86BlockSIGIO();
2038 sigio_blocked = TRI_TRUE;
2039 } else if ((state == OPERATING) && (sigio_blocked != TRI_UNSET)) {
2040 xf86UnblockSIGIO(sigio_state);
2041 sigio_blocked = TRI_FALSE;
2045 _X_EXPORT void
2046 xf86EnterServerState(xf86State state)
2048 EntityPtr pEnt;
2049 ScrnInfoPtr pScrn;
2050 int i,j;
2051 int needVGA = 0;
2052 resType rt;
2054 * This is a good place to block SIGIO during SETUP state.
2055 * SIGIO should be blocked in SETUP state otherwise (u)sleep()
2056 * might get interrupted early.
2057 * We take care not to call xf86BlockSIGIO() twice.
2059 SetSIGIOForState(state);
2060 #ifdef DEBUG
2061 if (state == SETUP)
2062 ErrorF("Entering SETUP state\n");
2063 else
2064 ErrorF("Entering OPERATING state\n");
2065 #endif
2067 /* When servicing a dumb framebuffer we don't need to do anything */
2068 if (doFramebufferMode) return;
2070 for (i=0; i<xf86NumScreens; i++) {
2071 pScrn = xf86Screens[i];
2072 j = pScrn->entityList[pScrn->numEntities - 1];
2073 pScrn->access = xf86Entities[j]->access;
2075 for (j = 0; j<xf86Screens[i]->numEntities; j++) {
2076 pEnt = xf86Entities[xf86Screens[i]->entityList[j]];
2077 if (pEnt->entityProp & (state == SETUP ? NEED_VGA_ROUTED_SETUP
2078 : NEED_VGA_ROUTED))
2079 xf86Screens[i]->busAccess = pEnt->busAcc;
2081 if (xf86Screens[i]->busAccess)
2082 needVGA ++;
2086 * if we just have one screen we don't have RAC.
2087 * Therefore just enable the screen and return.
2089 if (!needRAC) {
2090 xf86EnableAccess(xf86Screens[0]);
2091 notifyStateChange(NOTIFY_ENABLE);
2092 return;
2095 if (state == SETUP)
2096 notifyStateChange(NOTIFY_SETUP_TRANSITION);
2097 else
2098 notifyStateChange(NOTIFY_OPERATING_TRANSITION);
2100 clearAccess();
2101 for (i=0; i<xf86NumScreens;i++) {
2103 rt = NONE;
2105 for (j = 0; j<xf86Screens[i]->numEntities; j++) {
2106 pEnt = xf86Entities[xf86Screens[i]->entityList[j]];
2107 setAccess(pEnt,state);
2109 if (pEnt->access->rt != NONE) {
2110 if (rt != NONE && rt != pEnt->access->rt)
2111 rt = MEM_IO;
2112 else
2113 rt = pEnt->access->rt;
2116 xf86Screens[i]->resourceType = rt;
2117 if (rt == NONE) {
2118 xf86Screens[i]->access = NULL;
2119 if (needVGA < 2)
2120 xf86Screens[i]->busAccess = NULL;
2123 #ifdef DEBUG
2124 if (xf86Screens[i]->busAccess)
2125 ErrorF("Screen %i setting vga route\n",i);
2126 #endif
2127 switch (rt) {
2128 case MEM_IO:
2129 xf86MsgVerb(X_INFO, 3, "Screen %i shares mem & io resources\n",i);
2130 break;
2131 case IO:
2132 xf86MsgVerb(X_INFO, 3, "Screen %i shares io resources\n",i);
2133 break;
2134 case MEM:
2135 xf86MsgVerb(X_INFO, 3, "Screen %i shares mem resources\n",i);
2136 break;
2137 default:
2138 xf86MsgVerb(X_INFO, 3, "Entity %i shares no resources\n",i);
2139 break;
2142 if (state == SETUP)
2143 notifyStateChange(NOTIFY_SETUP);
2144 else
2145 notifyStateChange(NOTIFY_OPERATING);
2149 * xf86SetOperatingState() -- Set ResOperMask for resources listed.
2151 _X_EXPORT resPtr
2152 xf86SetOperatingState(resList list, int entityIndex, int mask)
2154 resPtr acc;
2155 resPtr r_fail = NULL;
2156 resRange range;
2158 while (list->type != ResEnd) {
2159 range = *list;
2160 convertRange2Host(entityIndex,&range);
2162 acc = Acc;
2163 while (acc) {
2164 #define MASK (ResTypeMask | ResExtMask)
2165 if ((acc->entityIndex == entityIndex)
2166 && (acc->val.a == range.a) && (acc->val.b == range.b)
2167 && ((acc->val.type & MASK) == (range.type & MASK)))
2168 break;
2169 #undef MASK
2170 acc = acc->next;
2172 if (acc)
2173 acc->val.type = (acc->val.type & ~ResOprMask)
2174 | (mask & ResOprMask);
2175 else {
2176 r_fail = xf86AddResToList(r_fail,&range,entityIndex);
2178 list ++;
2181 return r_fail;
2185 * Stage specific code
2188 * ProcessEstimatedConflicts() -- Do something about driver-registered
2189 * resources that conflict with estimated resources. For now, just register
2190 * them with a logged warning.
2192 #ifdef REDUCER
2193 static void
2194 ProcessEstimatedConflicts(void)
2196 if (!AccReducers)
2197 return;
2199 /* Temporary */
2200 xf86MsgVerb(X_WARNING, 3,
2201 "Registering the following despite conflicts with estimated"
2202 " resources:\n");
2203 xf86PrintResList(3, AccReducers);
2204 Acc = xf86JoinResLists(Acc, AccReducers);
2205 AccReducers = NULL;
2207 #endif
2210 * xf86ClaimFixedResources() -- This function gets called from the
2211 * driver Probe() function to claim fixed resources.
2213 static void
2214 resError(resList list)
2216 FatalError("A driver tried to allocate the %s %sresource at \n"
2217 "0x%lx:0x%lx which conflicted with another resource. Send the\n"
2218 "output of the server to %s. Please \n"
2219 "specify your computer hardware as closely as possible.\n",
2220 ResIsBlock(list)?"Block":"Sparse",
2221 ResIsMem(list)?"Mem":"Io",
2222 ResIsBlock(list)?list->rBegin:list->rBase,
2223 ResIsBlock(list)?list->rEnd:list->rMask,BUILDERADDR);
2227 * xf86ClaimFixedResources() is used to allocate non-relocatable resources.
2228 * This should only be done by a driver's Probe() function.
2230 _X_EXPORT void
2231 xf86ClaimFixedResources(resList list, int entityIndex)
2233 resPtr ptr = NULL;
2234 resRange range;
2236 if (!list) return;
2238 while (list->type !=ResEnd) {
2239 range = *list;
2241 convertRange2Host(entityIndex,&range);
2243 range.type &= ~ResEstimated; /* Not allowed for drivers */
2244 switch (range.type & ResAccMask) {
2245 case ResExclusive:
2246 if (!xf86ChkConflict(&range, entityIndex)) {
2247 Acc = xf86AddResToList(Acc, &range, entityIndex);
2248 #ifdef REDUCER
2249 } else {
2250 range.type |= ResEstimated;
2251 if (!xf86ChkConflict(&range, entityIndex) &&
2252 !checkConflict(&range, AccReducers, entityIndex,
2253 SETUP, FALSE)) {
2254 range.type &= ~(ResEstimated | ResBios);
2255 AccReducers =
2256 xf86AddResToList(AccReducers, &range, entityIndex);
2257 #endif
2258 } else resError(&range); /* no return */
2259 #ifdef REDUCER
2261 #endif
2262 break;
2263 case ResShared:
2264 /* at this stage the resources are just added to the
2265 * EntityRec. After the Probe() phase this list is checked by
2266 * xf86PostProbe(). All resources which don't
2267 * conflict with already allocated ones are allocated
2268 * and removed from the EntityRec. Thus a non-empty resource
2269 * list in the EntityRec indicates resource conflicts the
2270 * driver should either handle or fail.
2272 if (xf86Entities[entityIndex]->active)
2273 ptr = xf86AddResToList(ptr,&range,entityIndex);
2274 break;
2276 list++;
2278 xf86Entities[entityIndex]->resources =
2279 xf86JoinResLists(xf86Entities[entityIndex]->resources,ptr);
2280 xf86MsgVerb(X_INFO, 3,
2281 "resource ranges after xf86ClaimFixedResources() call:\n");
2282 xf86PrintResList(3,Acc);
2283 #ifdef REDUCER
2284 ProcessEstimatedConflicts();
2285 #endif
2286 #ifdef DEBUG
2287 if (ptr) {
2288 xf86MsgVerb(X_INFO, 3, "to be registered later:\n");
2289 xf86PrintResList(3,ptr);
2291 #endif
2294 static void
2295 checkRoutingForScreens(xf86State state)
2297 resList list = resVgaUnusedExclusive;
2298 resPtr pResVGA = NULL;
2299 resPtr pResVGAHost;
2300 pointer vga = NULL;
2301 int i,j;
2302 int entityIndex;
2303 EntityPtr pEnt;
2304 resPtr pAcc;
2305 resRange range;
2308 * find devices that need VGA routed: ie the ones that have
2309 * registered VGA resources without ResUnused. ResUnused
2310 * doesn't conflict with itself therefore use it here.
2312 while (list->type != ResEnd) { /* create resPtr from resList for VGA */
2313 range = *list;
2314 range.type &= ~(ResBios | ResEstimated); /* if set remove them */
2315 pResVGA = xf86AddResToList(pResVGA, &range, -1);
2316 list++;
2319 for (i = 0; i < xf86NumScreens; i++) {
2320 for (j = 0; j < xf86Screens[i]->numEntities; j++) {
2321 entityIndex = xf86Screens[i]->entityList[j];
2322 pEnt = xf86Entities[entityIndex];
2323 pAcc = Acc;
2324 vga = NULL;
2325 pResVGAHost = xf86DupResList(pResVGA);
2326 xf86ConvertListToHost(entityIndex,pResVGAHost);
2327 while (pAcc) {
2328 if (pAcc->entityIndex == entityIndex)
2329 if (checkConflict(&pAcc->val, pResVGAHost,
2330 entityIndex, state, FALSE)) {
2331 if (vga && vga != pEnt->busAcc) {
2332 xf86Msg(X_ERROR, "Screen %i needs vga routed to"
2333 "different buses - deleting\n",i);
2334 xf86DeleteScreen(i--,0);
2336 #ifdef DEBUG
2338 resPtr rlist = xf86AddResToList(NULL,&pAcc->val,
2339 pAcc->entityIndex);
2340 xf86MsgVerb(X_INFO,3,"====== %s\n",
2341 state == OPERATING ? "OPERATING"
2342 : "SETUP");
2343 xf86MsgVerb(X_INFO,3,"%s Resource:\n",
2344 (pAcc->val.type) & ResMem ? "Mem" :"Io");
2345 xf86PrintResList(3,rlist);
2346 xf86FreeResList(rlist);
2347 xf86MsgVerb(X_INFO,3,"Conflicts with:\n");
2348 xf86PrintResList(3,pResVGAHost);
2349 xf86MsgVerb(X_INFO,3,"=====\n");
2351 #endif
2352 vga = pEnt->busAcc;
2353 pEnt->entityProp |= (state == SETUP
2354 ? NEED_VGA_ROUTED_SETUP : NEED_VGA_ROUTED);
2355 if (state == OPERATING) {
2356 if (pAcc->val.type & ResMem)
2357 pEnt->entityProp |= NEED_VGA_MEM;
2358 else
2359 pEnt->entityProp |= NEED_VGA_IO;
2362 pAcc = pAcc->next;
2364 if (vga)
2365 xf86MsgVerb(X_INFO, 3,"Setting vga for screen %i.\n",i);
2366 xf86FreeResList(pResVGAHost);
2369 xf86FreeResList(pResVGA);
2373 * xf86PostProbe() -- Allocate all non conflicting resources
2374 * This function gets called by xf86Init().
2376 void
2377 xf86PostProbe(void)
2379 memType val;
2380 int i,j;
2381 resPtr resp, acc, tmp, resp_x, *pprev_next;
2383 if (fbSlotClaimed) {
2384 if (pciSlotClaimed || isaSlotClaimed
2385 #if (defined(__sparc__) || defined(__sparc)) && !defined(__OpenBSD__)
2386 || sbusSlotClaimed
2387 #endif
2388 ) {
2389 FatalError("Cannot run in framebuffer mode. Please specify busIDs "
2390 " for all framebuffer devices\n");
2391 return;
2392 } else {
2393 xf86Msg(X_INFO,"Running in FRAMEBUFFER Mode\n");
2394 xf86AccessRestoreState();
2395 notifyStateChange(NOTIFY_ENABLE);
2396 doFramebufferMode = TRUE;
2398 return;
2401 /* don't compare against ResInit - remove it from clone.*/
2402 acc = tmp = xf86DupResList(Acc);
2403 pprev_next = &acc;
2404 while (tmp) {
2405 if (tmp->res_type & ResInit) {
2406 (*pprev_next) = tmp->next;
2407 xfree(tmp);
2408 } else
2409 pprev_next = &(tmp->next);
2410 tmp = (*pprev_next);
2413 for (i=0; i<xf86NumEntities; i++) {
2414 resp = xf86Entities[i]->resources;
2415 xf86Entities[i]->resources = NULL;
2416 resp_x = NULL;
2417 while (resp) {
2418 if (! (val = checkConflict(&resp->val,acc,i,SETUP,FALSE))) {
2419 resp->res_type &= ~(ResBios); /* just used for chkConflict() */
2420 tmp = resp_x;
2421 resp_x = resp;
2422 resp = resp->next;
2423 resp_x->next = tmp;
2424 #ifdef REDUCER
2425 } else {
2426 resp->res_type |= ResEstimated;
2427 if (!checkConflict(&resp->val, acc, i, SETUP, FALSE)) {
2428 resp->res_type &= ~(ResEstimated | ResBios);
2429 tmp = AccReducers;
2430 AccReducers = resp;
2431 resp = resp->next;
2432 AccReducers->next = tmp;
2433 #endif
2434 } else {
2435 xf86MsgVerb(X_INFO, 3, "Found conflict at: 0x%lx\n",val);
2436 resp->res_type &= ~ResEstimated;
2437 tmp = xf86Entities[i]->resources;
2438 xf86Entities[i]->resources = resp;
2439 resp = resp->next;
2440 xf86Entities[i]->resources->next = tmp;
2442 #ifdef REDUCER
2444 #endif
2446 xf86JoinResLists(Acc,resp_x);
2447 #ifdef REDUCER
2448 ProcessEstimatedConflicts();
2449 #endif
2451 xf86FreeResList(acc);
2452 #if !(defined(__alpha__) && defined(linux)) && \
2453 !(defined(__ia64__) && defined(linux)) && \
2454 !(defined(__sparc64__) && defined(__OpenBSD__))
2456 * No need to validate on Alpha Linux or OpenBSD/sparc64,
2457 * trust the kernel.
2459 ValidatePci();
2460 #endif
2462 xf86MsgVerb(X_INFO, 3, "resource ranges after probing:\n");
2463 xf86PrintResList(3, Acc);
2464 checkRoutingForScreens(SETUP);
2466 for (i = 0; i < xf86NumScreens; i++) {
2467 for (j = 0; j<xf86Screens[i]->numEntities; j++) {
2468 EntityPtr pEnt = xf86Entities[xf86Screens[i]->entityList[j]];
2469 if ((pEnt->entityProp & NEED_VGA_ROUTED_SETUP) &&
2470 ((xf86Screens[i]->busAccess = pEnt->busAcc)))
2471 break;
2476 static void
2477 checkRequiredResources(int entityIndex)
2479 resRange range;
2480 resPtr pAcc = Acc;
2481 const EntityPtr pEnt = xf86Entities[entityIndex];
2482 while (pAcc) {
2483 if (pAcc->entityIndex == entityIndex) {
2484 range = pAcc->val;
2485 /* ResAny to find conflicts with anything. */
2486 range.type = (range.type & ~ResAccMask) | ResAny | ResBios;
2487 if (checkConflict(&range,Acc,entityIndex,OPERATING,FALSE))
2488 switch (pAcc->res_type & ResPhysMask) {
2489 case ResMem:
2490 pEnt->entityProp |= NEED_MEM_SHARED;
2491 break;
2492 case ResIo:
2493 pEnt->entityProp |= NEED_IO_SHARED;
2494 break;
2496 if (!(pAcc->res_type & ResOprMask)) {
2497 switch (pAcc->res_type & ResPhysMask) {
2498 case ResMem:
2499 pEnt->entityProp |= NEED_MEM;
2500 break;
2501 case ResIo:
2502 pEnt->entityProp |= NEED_IO;
2503 break;
2507 pAcc = pAcc->next;
2510 /* check if we can separately enable mem/io resources */
2511 /* XXX we still need to find out how to set this yet */
2512 if ( ((pEnt->entityProp & NO_SEPARATE_MEM_FROM_IO)
2513 && (pEnt->entityProp & NEED_MEM_SHARED))
2514 || ((pEnt->entityProp & NO_SEPARATE_IO_FROM_MEM)
2515 && (pEnt->entityProp & NEED_IO_SHARED)) )
2516 pEnt->entityProp |= NEED_SHARED;
2518 * After we have checked all resources of an entity agains any
2519 * other resource we know if the entity need this resource type
2520 * (ie. mem/io) at all. if not we can disable this type completely,
2521 * so no need to share it either.
2523 if ((pEnt->entityProp & NEED_MEM_SHARED)
2524 && (!(pEnt->entityProp & NEED_MEM))
2525 && (!(pEnt->entityProp & NO_SEPARATE_MEM_FROM_IO)))
2526 pEnt->entityProp &= ~(unsigned long)NEED_MEM_SHARED;
2528 if ((pEnt->entityProp & NEED_IO_SHARED)
2529 && (!(pEnt->entityProp & NEED_IO))
2530 && (!(pEnt->entityProp & NO_SEPARATE_IO_FROM_MEM)))
2531 pEnt->entityProp &= ~(unsigned long)NEED_IO_SHARED;
2534 void
2535 xf86PostPreInit()
2537 if (doFramebufferMode) return;
2539 if (xf86NumScreens > 1)
2540 needRAC = TRUE;
2542 xf86MsgVerb(X_INFO, 3, "do I need RAC?");
2544 if (needRAC) {
2545 xf86ErrorFVerb(3, " Yes, I do.\n");
2546 } else {
2547 xf86ErrorFVerb(3, " No, I don't.\n");
2550 xf86MsgVerb(X_INFO, 3, "resource ranges after preInit:\n");
2551 xf86PrintResList(3, Acc);
2554 void
2555 xf86PostScreenInit(void)
2557 int i,j;
2558 ScreenPtr pScreen;
2559 unsigned int flags;
2560 int nummem = 0, numio = 0;
2562 if (doFramebufferMode) {
2563 SetSIGIOForState(OPERATING);
2564 return;
2567 #ifdef DEBUG
2568 ErrorF("PostScreenInit generation: %i\n",serverGeneration);
2569 #endif
2570 if (serverGeneration == 1) {
2571 checkRoutingForScreens(OPERATING);
2572 for (i=0; i<xf86NumEntities; i++) {
2573 checkRequiredResources(i);
2577 * after removing NEED_XXX_SHARED from entities that
2578 * don't need need XXX resources at all we might have
2579 * a single entity left that has NEED_XXX_SHARED set.
2580 * In this case we can delete that, too.
2582 for (i = 0; i < xf86NumEntities; i++) {
2583 if (xf86Entities[i]->entityProp & NEED_MEM_SHARED)
2584 nummem++;
2585 if (xf86Entities[i]->entityProp & NEED_IO_SHARED)
2586 numio++;
2588 for (i = 0; i < xf86NumEntities; i++) {
2589 if (nummem < 2)
2590 xf86Entities[i]->entityProp &= ~NEED_MEM_SHARED;
2591 if (numio < 2)
2592 xf86Entities[i]->entityProp &= ~NEED_IO_SHARED;
2596 if (xf86Screens && needRAC) {
2597 int needRACforVga = 0;
2599 for (i = 0; i < xf86NumScreens; i++) {
2600 for (j = 0; j < xf86Screens[i]->numEntities; j++) {
2601 if (xf86Entities[xf86Screens[i]->entityList[j]]->entityProp
2602 & NEED_VGA_ROUTED) {
2603 needRACforVga ++;
2604 break; /* only count each screen once */
2609 for (i = 0; i < xf86NumScreens; i++) {
2610 Bool needRACforMem = FALSE, needRACforIo = FALSE;
2612 for (j = 0; j < xf86Screens[i]->numEntities; j++) {
2613 if (xf86Entities[xf86Screens[i]->entityList[j]]->entityProp
2614 & NEED_MEM_SHARED)
2615 needRACforMem = TRUE;
2616 if (xf86Entities[xf86Screens[i]->entityList[j]]->entityProp
2617 & NEED_IO_SHARED)
2618 needRACforIo = TRUE;
2620 * We may need RAC although we don't share any resources
2621 * as we need to route VGA to the correct bus. This can
2622 * only be done simultaniously for MEM and IO.
2624 if (needRACforVga > 1) {
2625 if (xf86Entities[xf86Screens[i]->entityList[j]]->entityProp
2626 & NEED_VGA_MEM)
2627 needRACforMem = TRUE;
2628 if (xf86Entities[xf86Screens[i]->entityList[j]]->entityProp
2629 & NEED_VGA_IO)
2630 needRACforIo = TRUE;
2634 pScreen = xf86Screens[i]->pScreen;
2635 flags = 0;
2636 if (needRACforMem) {
2637 flags |= xf86Screens[i]->racMemFlags;
2638 xf86ErrorFVerb(3, "Screen %d is using RAC for mem\n", i);
2640 if (needRACforIo) {
2641 flags |= xf86Screens[i]->racIoFlags;
2642 xf86ErrorFVerb(3, "Screen %d is using RAC for io\n", i);
2645 xf86RACInit(pScreen,flags);
2649 xf86EnterServerState(OPERATING);
2654 * Sets
2658 static resPtr
2659 decomposeSparse(resRange range)
2661 resRange new;
2662 resPtr ret = NULL;
2663 memType val = range.rBegin;
2664 int i = 0;
2666 new.type = (range.type & ~ResExtMask) | ResSparse;
2668 while (1) {
2669 if (val & 0x01) {
2670 new.rBase = (val << i);
2671 new.rMask = ~((1 << i) - 1);
2672 ret = xf86AddResToList(ret,&new,-1);
2673 val ++;
2675 i++;
2676 val >>= 1;
2677 if ((((val + 1) << i) - 1) > range.rEnd)
2678 break;
2680 i--;
2681 val <<= 1;
2683 while (1) {
2684 if((((val + 1) << i) - 1)> range.rEnd) {
2685 if (--i < 0) break;
2686 val <<= 1;
2687 } else {
2688 new.rBase = (val << i);
2689 new.rMask = ~((1 << i) - 1);
2690 val++;
2691 ret = xf86AddResToList(ret,&new,-1);
2694 return ret;
2697 static Bool
2698 x_isSubsetOf(resRange range, resPtr list1, resPtr list2)
2700 resRange range1, range2;
2701 memType m1_A_m2;
2702 Bool ret;
2703 resPtr list;
2705 if (list1) {
2706 list = list1;
2707 if ((range.type & ResTypeMask) == (list->res_type & ResTypeMask)) {
2708 switch (range.type & ResExtMask) {
2709 case ResBlock:
2710 if ((list->res_type & ResExtMask) == ResBlock) {
2711 if (range.rBegin >= list->block_begin
2712 && range.rEnd <= list->block_end)
2713 return TRUE;
2714 else if (range.rBegin < list->block_begin
2715 && range.rEnd > list->block_end) {
2716 RANGE(range1, range.rBegin, list->block_begin - 1,
2717 range.type);
2718 RANGE(range2, list->block_end + 1, range.rEnd,
2719 range.type);
2720 return (x_isSubsetOf(range1,list->next,list2) &&
2721 x_isSubsetOf(range2,list->next,list2));
2723 else if (range.rBegin >= list->block_begin
2724 && range.rBegin <= list->block_end) {
2725 RANGE(range1, list->block_end + 1, range.rEnd,
2726 range.type);
2727 return (x_isSubsetOf(range1,list->next,list2));
2728 } else if (range.rEnd >= list->block_begin
2729 && range.rEnd <= list->block_end) {
2730 RANGE(range1,range.rBegin, list->block_begin - 1,
2731 range.type);
2732 return (x_isSubsetOf(range1,list->next,list2));
2735 break;
2736 case ResSparse:
2737 if ((list->res_type & ResExtMask) == ResSparse) {
2738 memType test;
2739 int i;
2741 m1_A_m2 = range.rMask & list->sparse_mask;
2742 if ((range.rBase ^ list->sparse_base) & m1_A_m2)
2743 break;
2745 * We use the following system:
2746 * let 0 ^= mask:1 base:0, 1 ^= mask:1 base:1,
2747 * X mask:0 ; S: set TSS: test set for subset
2748 * NTSS: new test set after test
2749 * S: 1 0 1 0 X X 0 1 X
2750 * TSS: 1 0 0 1 1 0 X X X
2751 * T: 0 0 1 1 0 0 0 0 0
2752 * NTSS: 1 0 0/X 1/X 1 0 1 0 X
2753 * R: 0 0 0 0 0 0 1 1 0
2754 * If R != 0 TSS and S are disjunct
2755 * If R == 0 TSS is subset of S
2756 * If R != 0 NTSS contains elements from TSS
2757 * which are not also members of S.
2758 * If a T is set one of the correspondig bits
2759 * in NTSS must be set to the specified value
2760 * all other are X
2762 test = list->sparse_mask & ~range.rMask;
2763 if (test == 0)
2764 return TRUE;
2765 for (i = 0; i < sizeof(memType); i++) {
2766 if ((test >> i) & 0x1) {
2767 RANGE(range1, ((range.rBase & list->sparse_base)
2768 | (range.rBase & ~list->sparse_mask)
2769 | ((~list->sparse_base & list->sparse_mask)
2770 & ~range.rMask)) & range1.rMask,
2771 ((range.rMask | list->sparse_mask) & ~test)
2772 | (1 << i), range.type);
2773 return (x_isSubsetOf(range1,list->next,list2));
2777 break;
2780 return (x_isSubsetOf(range,list->next,list2));
2781 } else if (list2) {
2782 resPtr tmpList = NULL;
2783 switch (range.type & ResExtMask) {
2784 case ResBlock:
2785 tmpList = decomposeSparse(range);
2786 while (tmpList) {
2787 if (!x_isSubsetOf(tmpList->val,list2,NULL)) {
2788 xf86FreeResList(tmpList);
2789 return FALSE;
2791 tmpList = tmpList->next;
2793 xf86FreeResList(tmpList);
2794 return TRUE;
2795 break;
2796 case ResSparse:
2797 while (list2) {
2798 tmpList = xf86JoinResLists(tmpList,decomposeSparse(list2->val));
2799 list2 = list2->next;
2801 ret = x_isSubsetOf(range,tmpList,NULL);
2802 xf86FreeResList(tmpList);
2803 return ret;
2804 break;
2806 } else
2807 return FALSE;
2809 return FALSE;
2812 Bool
2813 xf86IsSubsetOf(resRange range, resPtr list)
2815 resPtr dup = xf86DupResList(list);
2816 resPtr r_sp = NULL, r = NULL, tmp = NULL;
2817 Bool ret = FALSE;
2819 while (dup) {
2820 tmp = dup;
2821 dup = dup->next;
2822 switch (tmp->res_type & ResExtMask) {
2823 case ResBlock:
2824 tmp->next = r;
2825 r = tmp;
2826 break;
2827 case ResSparse:
2828 tmp->next = r_sp;
2829 r_sp = tmp;
2830 break;
2834 switch (range.type & ResExtMask) {
2835 case ResBlock:
2836 ret = x_isSubsetOf(range,r,r_sp);
2837 break;
2838 case ResSparse:
2839 ret = x_isSubsetOf(range,r_sp,r);
2840 break;
2842 xf86FreeResList(r);
2843 xf86FreeResList(r_sp);
2845 return ret;
2848 static resPtr
2849 findIntersect(resRange Range, resPtr list)
2851 resRange range;
2852 resPtr new = NULL;
2854 while (list) {
2855 if ((Range.type & ResTypeMask) == (list->res_type & ResTypeMask)) {
2856 switch (Range.type & ResExtMask) {
2857 case ResBlock:
2858 switch (list->res_type & ResExtMask) {
2859 case ResBlock:
2860 if (Range.rBegin >= list->block_begin)
2861 range.rBegin = Range.rBegin;
2862 else
2863 range.rBegin = list->block_begin;
2864 if (Range.rEnd <= list->block_end)
2865 range.rEnd = Range.rEnd;
2866 else
2867 range.rEnd = list->block_end;
2868 if (range.rEnd > range.rBegin) {
2869 range.type = Range.type;
2870 new = xf86AddResToList(new,&range,-1);
2872 break;
2873 case ResSparse:
2874 new = xf86JoinResLists(new,xf86FindIntersectOfLists(new,decomposeSparse(list->val)));
2875 break;
2877 break;
2878 case ResSparse:
2879 switch (list->res_type & ResExtMask) {
2880 case ResSparse:
2881 if (!((~(range.rBase ^ list->sparse_base)
2882 & (range.rMask & list->sparse_mask)))) {
2883 RANGE(range, (range.rBase & list->sparse_base)
2884 | (~range.rMask & list->sparse_base)
2885 | (~list->sparse_mask & range.rBase),
2886 range.rMask | list->sparse_mask,
2887 Range.type);
2888 new = xf86AddResToList(new,&range,-1);
2890 break;
2891 case ResBlock:
2892 new = xf86JoinResLists(new,xf86FindIntersectOfLists(
2893 decomposeSparse(range),list));
2894 break;
2898 list = list->next;
2900 return new;
2903 resPtr
2904 xf86FindIntersectOfLists(resPtr l1, resPtr l2)
2906 resPtr ret = NULL;
2908 while (l1) {
2909 ret = xf86JoinResLists(ret,findIntersect(l1->val,l2));
2910 l1 = l1->next;
2912 return ret;
2915 #if 0 /* Not used */
2916 static resPtr
2917 xf86FindComplement(resRange Range)
2919 resRange range;
2920 memType tmp;
2921 resPtr new = NULL;
2922 int i;
2924 switch (Range.type & ResExtMask) {
2925 case ResBlock:
2926 if (Range.rBegin > 0) {
2927 RANGE(range, 0, Range.rBegin - 1, Range.type);
2928 new = xf86AddResToList(new,&range,-1);
2930 if (Range.rEnd < (memType)~0) {
2931 RANGE(range,Range.rEnd + 1, (memType)~0, Range.type);
2932 new = xf86AddResToList(new,&range,-1);
2934 break;
2935 case ResSparse:
2936 tmp = Range.rMask;
2937 for (i = 0; i < sizeof(memType); i++) {
2938 if (tmp & 0x1) {
2939 RANGE(range,(~Range.rMask & range.rMask),(1 << i), Range.type);
2940 new = xf86AddResToList(new,&range,-1);
2943 break;
2944 default:
2945 break;
2947 return new;
2949 #endif
2951 resPtr
2952 xf86ExtractTypeFromList(resPtr list, unsigned long type)
2954 resPtr ret = NULL;
2956 while (list) {
2957 if ((list->res_type & ResTypeMask) == type)
2958 ret = xf86AddResToList(ret,&(list->val),list->entityIndex);
2959 list = list->next;
2961 return ret;
2964 /*------------------------------------------------------------*/
2965 static void CheckGenericGA(void);
2968 * xf86FindPrimaryDevice() - Find the display device which
2969 * was active when the server was started.
2971 void
2972 xf86FindPrimaryDevice()
2974 /* if no VGA device is found check for primary PCI device */
2975 if (primaryBus.type == BUS_NONE && xorgHWAccess)
2976 CheckGenericGA();
2977 if (primaryBus.type != BUS_NONE) {
2978 char *bus;
2979 char *loc = xnfcalloc(1,9);
2980 if (loc == NULL) return;
2982 switch (primaryBus.type) {
2983 case BUS_PCI:
2984 bus = "PCI";
2985 sprintf(loc," %2.2x:%2.2x:%1.1x",primaryBus.id.pci.bus,
2986 primaryBus.id.pci.device,primaryBus.id.pci.func);
2987 break;
2988 case BUS_ISA:
2989 bus = "ISA";
2990 loc[0] = '\0';
2991 break;
2992 case BUS_SBUS:
2993 bus = "SBUS";
2994 sprintf(loc," %2.2x",primaryBus.id.sbus.fbNum);
2995 break;
2996 default:
2997 bus = "";
2998 loc[0] = '\0';
3001 xf86MsgVerb(X_INFO, 2, "Primary Device is: %s%s\n",bus,loc);
3002 xfree(loc);
3007 #if !defined(__sparc) && !defined(__sparc__) && !defined(__powerpc__) && !defined(__mips__) && !defined(__arm__)
3008 #include "vgaHW.h"
3009 #include "compiler.h"
3010 #endif
3013 * CheckGenericGA() - Check for presence of a VGA device.
3015 static void
3016 CheckGenericGA()
3018 /* This needs to be changed for multiple domains */
3019 #if !defined(__sparc__) && !defined(__sparc) && !defined(__powerpc__) && !defined(__mips__) && !defined(__ia64__) && !defined(__arm__) && !defined(__s390__)
3020 IOADDRESS GenericIOBase = VGAHW_GET_IOBASE();
3021 CARD8 CurrentValue, TestValue;
3023 /* VGA CRTC registers are not used here, so don't bother unlocking them */
3025 /* VGA has one more read/write attribute register than EGA */
3026 (void) inb(GenericIOBase + VGA_IN_STAT_1_OFFSET); /* Reset flip-flop */
3027 outb(VGA_ATTR_INDEX, 0x14 | 0x20);
3028 CurrentValue = inb(VGA_ATTR_DATA_R);
3029 outb(VGA_ATTR_DATA_W, CurrentValue ^ 0x0F);
3030 outb(VGA_ATTR_INDEX, 0x14 | 0x20);
3031 TestValue = inb(VGA_ATTR_DATA_R);
3032 outb(VGA_ATTR_DATA_W, CurrentValue);
3034 if ((CurrentValue ^ 0x0F) == TestValue) {
3035 primaryBus.type = BUS_ISA;
3037 #endif
3040 _X_EXPORT Bool
3041 xf86NoSharedResources(int screenIndex,resType res)
3043 int j;
3045 if (screenIndex > xf86NumScreens)
3046 return TRUE;
3048 for (j = 0; j < xf86Screens[screenIndex]->numEntities; j++) {
3049 switch (res) {
3050 case IO:
3051 if ( xf86Entities[xf86Screens[screenIndex]->entityList[j]]->entityProp
3052 & NEED_IO_SHARED)
3053 return FALSE;
3054 break;
3055 case MEM:
3056 if ( xf86Entities[xf86Screens[screenIndex]->entityList[j]]->entityProp
3057 & NEED_MEM_SHARED)
3058 return FALSE;
3059 break;
3060 case MEM_IO:
3061 if ( xf86Entities[xf86Screens[screenIndex]->entityList[j]]->entityProp
3062 & NEED_SHARED)
3063 return FALSE;
3064 break;
3065 case NONE:
3066 break;
3069 return TRUE;
3072 _X_EXPORT void
3073 xf86RegisterStateChangeNotificationCallback(xf86StateChangeNotificationCallbackFunc func, pointer arg)
3075 StateChangeNotificationPtr ptr =
3076 (StateChangeNotificationPtr)xnfalloc(sizeof(StateChangeNotificationRec));
3078 ptr->func = func;
3079 ptr->arg = arg;
3080 ptr->next = StateChangeNotificationList;
3081 StateChangeNotificationList = ptr;
3084 _X_EXPORT Bool
3085 xf86DeregisterStateChangeNotificationCallback(xf86StateChangeNotificationCallbackFunc func)
3087 StateChangeNotificationPtr *ptr = &StateChangeNotificationList;
3088 StateChangeNotificationPtr tmp;
3090 while (*ptr) {
3091 if ((*ptr)->func == func) {
3092 tmp = (*ptr);
3093 (*ptr) = (*ptr)->next;
3094 xfree(tmp);
3095 return TRUE;
3097 ptr = &((*ptr)->next);
3099 return FALSE;
3102 static void
3103 notifyStateChange(xf86NotifyState state)
3105 StateChangeNotificationPtr ptr = StateChangeNotificationList;
3106 while (ptr) {
3107 ptr->func(state,ptr->arg);
3108 ptr = ptr->next;
3112 /* Multihead accel sharing accessor functions and entity Private handling */
3114 _X_EXPORT int
3115 xf86GetLastScrnFlag(int entityIndex)
3117 if(entityIndex < xf86NumEntities) {
3118 return(xf86Entities[entityIndex]->lastScrnFlag);
3119 } else {
3120 return -1;
3124 _X_EXPORT void
3125 xf86SetLastScrnFlag(int entityIndex, int scrnIndex)
3127 if(entityIndex < xf86NumEntities) {
3128 xf86Entities[entityIndex]->lastScrnFlag = scrnIndex;
3132 _X_EXPORT Bool
3133 xf86IsEntityShared(int entityIndex)
3135 if(entityIndex < xf86NumEntities) {
3136 if(xf86Entities[entityIndex]->entityProp & IS_SHARED_ACCEL) {
3137 return TRUE;
3140 return FALSE;
3143 _X_EXPORT void
3144 xf86SetEntityShared(int entityIndex)
3146 if(entityIndex < xf86NumEntities) {
3147 xf86Entities[entityIndex]->entityProp |= IS_SHARED_ACCEL;
3151 _X_EXPORT Bool
3152 xf86IsEntitySharable(int entityIndex)
3154 if(entityIndex < xf86NumEntities) {
3155 if(xf86Entities[entityIndex]->entityProp & ACCEL_IS_SHARABLE) {
3156 return TRUE;
3159 return FALSE;
3162 _X_EXPORT void
3163 xf86SetEntitySharable(int entityIndex)
3165 if(entityIndex < xf86NumEntities) {
3166 xf86Entities[entityIndex]->entityProp |= ACCEL_IS_SHARABLE;
3170 _X_EXPORT Bool
3171 xf86IsPrimInitDone(int entityIndex)
3173 if(entityIndex < xf86NumEntities) {
3174 if(xf86Entities[entityIndex]->entityProp & SA_PRIM_INIT_DONE) {
3175 return TRUE;
3178 return FALSE;
3181 _X_EXPORT void
3182 xf86SetPrimInitDone(int entityIndex)
3184 if(entityIndex < xf86NumEntities) {
3185 xf86Entities[entityIndex]->entityProp |= SA_PRIM_INIT_DONE;
3189 _X_EXPORT void
3190 xf86ClearPrimInitDone(int entityIndex)
3192 if(entityIndex < xf86NumEntities) {
3193 xf86Entities[entityIndex]->entityProp &= ~SA_PRIM_INIT_DONE;
3199 * Allocate a private in the entities.
3202 _X_EXPORT int
3203 xf86AllocateEntityPrivateIndex(void)
3205 int idx, i;
3206 EntityPtr pEnt;
3207 DevUnion *nprivs;
3209 idx = xf86EntityPrivateCount++;
3210 for (i = 0; i < xf86NumEntities; i++) {
3211 pEnt = xf86Entities[i];
3212 nprivs = xnfrealloc(pEnt->entityPrivates,
3213 xf86EntityPrivateCount * sizeof(DevUnion));
3214 /* Zero the new private */
3215 bzero(&nprivs[idx], sizeof(DevUnion));
3216 pEnt->entityPrivates = nprivs;
3218 return idx;
3221 _X_EXPORT DevUnion *
3222 xf86GetEntityPrivate(int entityIndex, int privIndex)
3224 if (entityIndex >= xf86NumEntities || privIndex >= xf86EntityPrivateCount)
3225 return NULL;
3227 return &(xf86Entities[entityIndex]->entityPrivates[privIndex]);