1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
6 * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
8 * These are the routines to discover what hardware exists in this box.
9 * This task is complicated by there being 3 different ways of
10 * performing an inventory, depending largely on the age of the box.
11 * The recommended way to do this is to check to see whether the machine
12 * is a `Snake' first, then try System Map, then try PAT. We try System
13 * Map before checking for a Snake -- this probably doesn't cause any
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/slab.h>
22 #include <linux/platform_device.h>
23 #include <asm/hardware.h>
25 #include <asm/mmzone.h>
27 #include <asm/pdcpat.h>
28 #include <asm/processor.h>
30 #include <asm/parisc-device.h>
31 #include <asm/tlbflush.h>
35 ** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
39 int pdc_type __ro_after_init
= PDC_TYPE_ILLEGAL
;
41 /* cell number and location (PAT firmware only) */
42 unsigned long parisc_cell_num __ro_after_init
;
43 unsigned long parisc_cell_loc __ro_after_init
;
44 unsigned long parisc_pat_pdc_cap __ro_after_init
;
47 void __init
setup_pdc(void)
51 struct pdc_system_map_mod_info module_result
;
52 struct pdc_module_path module_path
;
53 struct pdc_model model
;
55 struct pdc_pat_cell_num cell_info
;
58 /* Determine the pdc "type" used on this machine */
60 printk(KERN_INFO
"Determining PDC firmware type: ");
62 status
= pdc_system_map_find_mods(&module_result
, &module_path
, 0);
63 if (status
== PDC_OK
) {
64 pdc_type
= PDC_TYPE_SYSTEM_MAP
;
65 pr_cont("System Map.\n");
70 * If the machine doesn't support PDC_SYSTEM_MAP then either it
71 * is a pdc pat box, or it is an older box. All 64 bit capable
72 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
76 * TODO: We should test for 64 bit capability and give a
81 status
= pdc_pat_cell_get_number(&cell_info
);
82 if (status
== PDC_OK
) {
83 unsigned long legacy_rev
, pat_rev
;
84 pdc_type
= PDC_TYPE_PAT
;
85 pr_cont("64 bit PAT.\n");
86 parisc_cell_num
= cell_info
.cell_num
;
87 parisc_cell_loc
= cell_info
.cell_loc
;
88 pr_info("PAT: Running on cell %lu and location %lu.\n",
89 parisc_cell_num
, parisc_cell_loc
);
90 status
= pdc_pat_pd_get_pdc_revisions(&legacy_rev
,
91 &pat_rev
, &parisc_pat_pdc_cap
);
92 pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n",
93 legacy_rev
, pat_rev
, parisc_pat_pdc_cap
,
95 & PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB
? 1:0,
97 & PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ
? 1:0);
102 /* Check the CPU's bus ID. There's probably a better test. */
104 status
= pdc_model_info(&model
);
106 bus_id
= (model
.hversion
>> (4 + 7)) & 0x1f;
109 case 0x4: /* 720, 730, 750, 735, 755 */
110 case 0x6: /* 705, 710 */
111 case 0x7: /* 715, 725 */
112 case 0x8: /* 745, 747, 742 */
113 case 0xA: /* 712 and similar */
114 case 0xC: /* 715/64, at least */
116 pdc_type
= PDC_TYPE_SNAKE
;
120 default: /* Everything else */
122 pr_cont("Unsupported.\n");
123 panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
127 #define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
130 set_pmem_entry(physmem_range_t
*pmem_ptr
, unsigned long start
,
131 unsigned long pages4k
)
133 /* Rather than aligning and potentially throwing away
134 * memory, we'll assume that any ranges are already
135 * nicely aligned with any reasonable page size, and
136 * panic if they are not (it's more likely that the
137 * pdc info is bad in this case).
140 if (unlikely( ((start
& (PAGE_SIZE
- 1)) != 0)
141 || ((pages4k
& ((1UL << PDC_PAGE_ADJ_SHIFT
) - 1)) != 0) )) {
143 panic("Memory range doesn't align with page size!\n");
146 pmem_ptr
->start_pfn
= (start
>> PAGE_SHIFT
);
147 pmem_ptr
->pages
= (pages4k
>> PDC_PAGE_ADJ_SHIFT
);
150 static void __init
pagezero_memconfig(void)
152 unsigned long npages
;
154 /* Use the 32 bit information from page zero to create a single
155 * entry in the pmem_ranges[] table.
157 * We currently don't support machines with contiguous memory
158 * >= 4 Gb, who report that memory using 64 bit only fields
159 * on page zero. It's not worth doing until it can be tested,
160 * and it is not clear we can support those machines for other
163 * If that support is done in the future, this is where it
167 npages
= (PAGE_ALIGN(PAGE0
->imm_max_mem
) >> PAGE_SHIFT
);
168 set_pmem_entry(pmem_ranges
,0UL,npages
);
174 /* All of the PDC PAT specific code is 64-bit only */
177 ** The module object is filled via PDC_PAT_CELL[Return Cell Module].
178 ** If a module is found, register module will get the IODC bytes via
179 ** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
181 ** The IO view can be used by PDC_PAT_CELL[Return Cell Module]
182 ** only for SBAs and LBAs. This view will cause an invalid
183 ** argument error for all other cell module types.
188 pat_query_module(ulong pcell_loc
, ulong mod_index
)
190 pdc_pat_cell_mod_maddr_block_t
*pa_pdc_cell
;
191 unsigned long bytecnt
;
192 unsigned long temp
; /* 64-bit scratch value */
193 long status
; /* PDC return value status */
194 struct parisc_device
*dev
;
196 pa_pdc_cell
= kmalloc(sizeof (*pa_pdc_cell
), GFP_KERNEL
);
198 panic("couldn't allocate memory for PDC_PAT_CELL!");
200 /* return cell module (PA or Processor view) */
201 status
= pdc_pat_cell_module(&bytecnt
, pcell_loc
, mod_index
,
202 PA_VIEW
, pa_pdc_cell
);
204 if (status
!= PDC_OK
) {
205 /* no more cell modules or error */
210 temp
= pa_pdc_cell
->cba
;
211 dev
= alloc_pa_dev(PAT_GET_CBA(temp
), &(pa_pdc_cell
->mod_path
));
217 /* alloc_pa_dev sets dev->hpa */
220 ** save parameters in the parisc_device
221 ** (The idea being the device driver will call pdc_pat_cell_module()
222 ** and store the results in its own data structure.)
224 dev
->pcell_loc
= pcell_loc
;
225 dev
->mod_index
= mod_index
;
227 /* save generic info returned from the call */
228 /* REVISIT: who is the consumer of this? not sure yet... */
229 dev
->mod_info
= pa_pdc_cell
->mod_info
; /* pass to PAT_GET_ENTITY() */
230 dev
->pmod_loc
= pa_pdc_cell
->mod_location
;
231 dev
->mod0
= pa_pdc_cell
->mod
[0];
233 register_parisc_device(dev
); /* advertise device */
236 /* dump what we see so far... */
237 switch (PAT_GET_ENTITY(dev
->mod_info
)) {
238 pdc_pat_cell_mod_maddr_block_t io_pdc_cell
;
241 case PAT_ENTITY_PROC
:
242 printk(KERN_DEBUG
"PAT_ENTITY_PROC: id_eid 0x%lx\n",
243 pa_pdc_cell
->mod
[0]);
248 "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
249 pa_pdc_cell
->mod
[0], pa_pdc_cell
->mod
[1],
250 pa_pdc_cell
->mod
[2]);
253 printk(KERN_DEBUG
"PAT_ENTITY_CA: %ld\n", pcell_loc
);
257 printk(KERN_DEBUG
"PAT_ENTITY_PBC: ");
261 printk(KERN_DEBUG
"PAT_ENTITY_SBA: ");
265 printk(KERN_DEBUG
"PAT_ENTITY_LBA: ");
268 pdc_pat_cell_module(&bytecnt
, pcell_loc
, mod_index
,
269 IO_VIEW
, &io_pdc_cell
);
270 printk(KERN_DEBUG
"ranges %ld\n", pa_pdc_cell
->mod
[1]);
271 for (i
= 0; i
< pa_pdc_cell
->mod
[1]; i
++) {
273 " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
274 i
, pa_pdc_cell
->mod
[2 + i
* 3], /* type */
275 pa_pdc_cell
->mod
[3 + i
* 3], /* start */
276 pa_pdc_cell
->mod
[4 + i
* 3]); /* finish (ie end) */
278 " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
279 i
, io_pdc_cell
.mod
[2 + i
* 3], /* type */
280 io_pdc_cell
.mod
[3 + i
* 3], /* start */
281 io_pdc_cell
.mod
[4 + i
* 3]); /* finish (ie end) */
283 printk(KERN_DEBUG
"\n");
286 #endif /* DEBUG_PAT */
294 /* pat pdc can return information about a variety of different
295 * types of memory (e.g. firmware,i/o, etc) but we only care about
296 * the usable physical ram right now. Since the firmware specific
297 * information is allocated on the stack, we'll be generous, in
298 * case there is a lot of other information we don't care about.
301 #define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
303 static void __init
pat_memconfig(void)
305 unsigned long actual_len
;
306 struct pdc_pat_pd_addr_map_entry mem_table
[PAT_MAX_RANGES
+1];
307 struct pdc_pat_pd_addr_map_entry
*mtbl_ptr
;
308 physmem_range_t
*pmem_ptr
;
311 unsigned long length
;
314 length
= (PAT_MAX_RANGES
+ 1) * sizeof(struct pdc_pat_pd_addr_map_entry
);
316 status
= pdc_pat_pd_get_addr_map(&actual_len
, mem_table
, length
, 0L);
318 if ((status
!= PDC_OK
)
319 || ((actual_len
% sizeof(struct pdc_pat_pd_addr_map_entry
)) != 0)) {
321 /* The above pdc call shouldn't fail, but, just in
322 * case, just use the PAGE0 info.
326 printk(KERN_WARNING
"WARNING! Could not get full memory configuration. "
327 "All memory may not be used!\n\n\n");
328 pagezero_memconfig();
332 entries
= actual_len
/ sizeof(struct pdc_pat_pd_addr_map_entry
);
334 if (entries
> PAT_MAX_RANGES
) {
335 printk(KERN_WARNING
"This Machine has more memory ranges than we support!\n");
336 printk(KERN_WARNING
"Some memory may not be used!\n");
339 /* Copy information into the firmware independent pmem_ranges
340 * array, skipping types we don't care about. Notice we said
341 * "may" above. We'll use all the entries that were returned.
345 mtbl_ptr
= mem_table
;
346 pmem_ptr
= pmem_ranges
; /* Global firmware independent table */
347 for (i
= 0; i
< entries
; i
++,mtbl_ptr
++) {
348 if ( (mtbl_ptr
->entry_type
!= PAT_MEMORY_DESCRIPTOR
)
349 || (mtbl_ptr
->memory_type
!= PAT_MEMTYPE_MEMORY
)
350 || (mtbl_ptr
->pages
== 0)
351 || ( (mtbl_ptr
->memory_usage
!= PAT_MEMUSE_GENERAL
)
352 && (mtbl_ptr
->memory_usage
!= PAT_MEMUSE_GI
)
353 && (mtbl_ptr
->memory_usage
!= PAT_MEMUSE_GNI
) ) ) {
358 if (npmem_ranges
== MAX_PHYSMEM_RANGES
) {
359 printk(KERN_WARNING
"This Machine has more memory ranges than we support!\n");
360 printk(KERN_WARNING
"Some memory will not be used!\n");
364 set_pmem_entry(pmem_ptr
++,mtbl_ptr
->paddr
,mtbl_ptr
->pages
);
369 static int __init
pat_inventory(void)
373 struct pdc_pat_cell_num cell_info
;
376 ** Note: Prelude (and it's successors: Lclass, A400/500) only
377 ** implement PDC_PAT_CELL sub-options 0 and 2.
379 status
= pdc_pat_cell_get_number(&cell_info
);
380 if (status
!= PDC_OK
) {
385 printk(KERN_DEBUG
"CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info
.cell_num
,
389 while (PDC_OK
== pat_query_module(cell_info
.cell_loc
, mod_index
)) {
396 /* We only look for extended memory ranges on a 64 bit capable box */
397 static void __init
sprockets_memconfig(void)
399 struct pdc_memory_table_raddr r_addr
;
400 struct pdc_memory_table mem_table
[MAX_PHYSMEM_RANGES
];
401 struct pdc_memory_table
*mtbl_ptr
;
402 physmem_range_t
*pmem_ptr
;
407 status
= pdc_mem_mem_table(&r_addr
,mem_table
,
408 (unsigned long)MAX_PHYSMEM_RANGES
);
410 if (status
!= PDC_OK
) {
412 /* The above pdc call only works on boxes with sprockets
413 * firmware (newer B,C,J class). Other non PAT PDC machines
414 * do support more than 3.75 Gb of memory, but we don't
418 pagezero_memconfig();
422 if (r_addr
.entries_total
> MAX_PHYSMEM_RANGES
) {
423 printk(KERN_WARNING
"This Machine has more memory ranges than we support!\n");
424 printk(KERN_WARNING
"Some memory will not be used!\n");
427 entries
= (int)r_addr
.entries_returned
;
430 mtbl_ptr
= mem_table
;
431 pmem_ptr
= pmem_ranges
; /* Global firmware independent table */
432 for (i
= 0; i
< entries
; i
++,mtbl_ptr
++) {
433 set_pmem_entry(pmem_ptr
++,mtbl_ptr
->paddr
,mtbl_ptr
->pages
);
438 #else /* !CONFIG_64BIT */
440 #define pat_inventory() do { } while (0)
441 #define pat_memconfig() do { } while (0)
442 #define sprockets_memconfig() pagezero_memconfig()
444 #endif /* !CONFIG_64BIT */
449 /* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
451 static struct parisc_device
* __init
452 legacy_create_device(struct pdc_memory_map
*r_addr
,
453 struct pdc_module_path
*module_path
)
455 struct parisc_device
*dev
;
456 int status
= pdc_mem_map_hpa(r_addr
, module_path
);
457 if (status
!= PDC_OK
)
460 dev
= alloc_pa_dev(r_addr
->hpa
, &module_path
->path
);
464 register_parisc_device(dev
);
471 * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
472 * To use it, we initialise the mod_path.bc to 0xff and try all values of
473 * mod to get the HPA for the top-level devices. Bus adapters may have
474 * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
475 * module, then trying all possible functions.
477 static void __init
snake_inventory(void)
480 for (mod
= 0; mod
< 16; mod
++) {
481 struct parisc_device
*dev
;
482 struct pdc_module_path module_path
;
483 struct pdc_memory_map r_addr
;
486 memset(module_path
.path
.bc
, 0xff, 6);
487 module_path
.path
.mod
= mod
;
488 dev
= legacy_create_device(&r_addr
, &module_path
);
489 if ((!dev
) || (dev
->id
.hw_type
!= HPHW_BA
))
492 memset(module_path
.path
.bc
, 0xff, 4);
493 module_path
.path
.bc
[4] = mod
;
495 for (func
= 0; func
< 16; func
++) {
496 module_path
.path
.bc
[5] = 0;
497 module_path
.path
.mod
= func
;
498 legacy_create_device(&r_addr
, &module_path
);
503 #else /* CONFIG_PA20 */
504 #define snake_inventory() do { } while (0)
505 #endif /* CONFIG_PA20 */
507 /* Common 32/64 bit based code goes here */
510 * add_system_map_addresses - Add additional addresses to the parisc device.
511 * @dev: The parisc device.
512 * @num_addrs: Then number of addresses to add;
513 * @module_instance: The system_map module instance.
515 * This function adds any additional addresses reported by the system_map
516 * firmware to the parisc device.
519 add_system_map_addresses(struct parisc_device
*dev
, int num_addrs
,
524 struct pdc_system_map_addr_info addr_result
;
526 dev
->addr
= kmalloc_array(num_addrs
, sizeof(*dev
->addr
), GFP_KERNEL
);
528 printk(KERN_ERR
"%s %s(): memory allocation failure\n",
533 for(i
= 1; i
<= num_addrs
; ++i
) {
534 status
= pdc_system_map_find_addrs(&addr_result
,
536 if(PDC_OK
== status
) {
537 dev
->addr
[dev
->num_addrs
] = (unsigned long)addr_result
.mod_addr
;
541 "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
548 * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
550 * This function attempts to retrieve and register all the devices firmware
551 * knows about via the SYSTEM_MAP PDC call.
553 static void __init
system_map_inventory(void)
556 long status
= PDC_OK
;
558 for (i
= 0; i
< 256; i
++) {
559 struct parisc_device
*dev
;
560 struct pdc_system_map_mod_info module_result
;
561 struct pdc_module_path module_path
;
563 status
= pdc_system_map_find_mods(&module_result
,
565 if ((status
== PDC_BAD_PROC
) || (status
== PDC_NE_MOD
))
567 if (status
!= PDC_OK
)
570 dev
= alloc_pa_dev(module_result
.mod_addr
, &module_path
.path
);
574 register_parisc_device(dev
);
576 /* if available, get the additional addresses for a module */
577 if (!module_result
.add_addrs
)
580 add_system_map_addresses(dev
, module_result
.add_addrs
, i
);
587 void __init
do_memory_inventory(void)
595 case PDC_TYPE_SYSTEM_MAP
:
596 sprockets_memconfig();
600 pagezero_memconfig();
604 panic("Unknown PDC type!\n");
607 if (npmem_ranges
== 0 || pmem_ranges
[0].start_pfn
!= 0) {
608 printk(KERN_WARNING
"Bad memory configuration returned!\n");
609 printk(KERN_WARNING
"Some memory may not be used!\n");
610 pagezero_memconfig();
614 void __init
do_device_inventory(void)
616 printk(KERN_INFO
"Searching for devices...\n");
626 case PDC_TYPE_SYSTEM_MAP
:
627 system_map_inventory();
635 panic("Unknown PDC type!\n");
637 printk(KERN_INFO
"Found devices:\n");
638 print_parisc_devices();
640 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
641 pa_serialize_tlb_flushes
= machine_has_merced_bus();
642 if (pa_serialize_tlb_flushes
)
643 pr_info("Merced bus found: Enable PxTLB serialization.\n");
646 #if defined(CONFIG_FW_CFG_SYSFS)
647 if (running_on_qemu
) {
648 struct resource res
[3] = {0,};
651 base
= ((unsigned long long) PAGE0
->pad0
[2] << 32)
652 | PAGE0
->pad0
[3]; /* SeaBIOS stored it here */
654 res
[0].name
= "fw_cfg";
656 res
[0].end
= base
+ 8 - 1;
657 res
[0].flags
= IORESOURCE_MEM
;
659 res
[1].name
= "ctrl";
661 res
[1].flags
= IORESOURCE_REG
;
663 res
[2].name
= "data";
665 res
[2].flags
= IORESOURCE_REG
;
668 pr_info("Found qemu fw_cfg interface at %#08x\n", base
);
669 platform_device_register_simple("fw_cfg",
670 PLATFORM_DEVID_NONE
, res
, 3);