fs/reiserfs/journal.c: change return type of dirty_one_transaction
[linux/fpc-iii.git] / arch / s390 / boot / mem_detect.c
blob5d316fe40480446b9dd5f90fc0bb4f3bba6d3b55
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <asm/sclp.h>
5 #include <asm/sections.h>
6 #include <asm/mem_detect.h>
7 #include <asm/sparsemem.h>
8 #include "compressed/decompressor.h"
9 #include "boot.h"
11 unsigned long __bootdata(max_physmem_end);
12 struct mem_detect_info __bootdata(mem_detect);
14 /* up to 256 storage elements, 1020 subincrements each */
15 #define ENTRIES_EXTENDED_MAX \
16 (256 * (1020 / 2) * sizeof(struct mem_detect_block))
19 * To avoid corrupting old kernel memory during dump, find lowest memory
20 * chunk possible either right after the kernel end (decompressed kernel) or
21 * after initrd (if it is present and there is no hole between the kernel end
22 * and initrd)
24 static void *mem_detect_alloc_extended(void)
26 unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
28 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
29 INITRD_START < offset + ENTRIES_EXTENDED_MAX)
30 offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
32 return (void *)offset;
35 static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
37 if (n < MEM_INLINED_ENTRIES)
38 return &mem_detect.entries[n];
39 if (unlikely(!mem_detect.entries_extended))
40 mem_detect.entries_extended = mem_detect_alloc_extended();
41 return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
45 * sequential calls to add_mem_detect_block with adjacent memory areas
46 * are merged together into single memory block.
48 void add_mem_detect_block(u64 start, u64 end)
50 struct mem_detect_block *block;
52 if (mem_detect.count) {
53 block = __get_mem_detect_block_ptr(mem_detect.count - 1);
54 if (block->end == start) {
55 block->end = end;
56 return;
60 block = __get_mem_detect_block_ptr(mem_detect.count);
61 block->start = start;
62 block->end = end;
63 mem_detect.count++;
66 static unsigned long get_mem_detect_end(void)
68 if (mem_detect.count)
69 return __get_mem_detect_block_ptr(mem_detect.count - 1)->end;
70 return 0;
73 static int __diag260(unsigned long rx1, unsigned long rx2)
75 register unsigned long _rx1 asm("2") = rx1;
76 register unsigned long _rx2 asm("3") = rx2;
77 register unsigned long _ry asm("4") = 0x10; /* storage configuration */
78 int rc = -1; /* fail */
79 unsigned long reg1, reg2;
80 psw_t old = S390_lowcore.program_new_psw;
82 asm volatile(
83 " epsw %0,%1\n"
84 " st %0,%[psw_pgm]\n"
85 " st %1,%[psw_pgm]+4\n"
86 " larl %0,1f\n"
87 " stg %0,%[psw_pgm]+8\n"
88 " diag %[rx],%[ry],0x260\n"
89 " ipm %[rc]\n"
90 " srl %[rc],28\n"
91 "1:\n"
92 : "=&d" (reg1), "=&a" (reg2),
93 [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
94 [rc] "+&d" (rc), [ry] "+d" (_ry)
95 : [rx] "d" (_rx1), "d" (_rx2)
96 : "cc", "memory");
97 S390_lowcore.program_new_psw = old;
98 return rc == 0 ? _ry : -1;
101 static int diag260(void)
103 int rc, i;
105 struct {
106 unsigned long start;
107 unsigned long end;
108 } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
110 memset(storage_extents, 0, sizeof(storage_extents));
111 rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
112 if (rc == -1)
113 return -1;
115 for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
116 add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
117 return 0;
120 static int tprot(unsigned long addr)
122 unsigned long pgm_addr;
123 int rc = -EFAULT;
124 psw_t old = S390_lowcore.program_new_psw;
126 S390_lowcore.program_new_psw.mask = __extract_psw();
127 asm volatile(
128 " larl %[pgm_addr],1f\n"
129 " stg %[pgm_addr],%[psw_pgm_addr]\n"
130 " tprot 0(%[addr]),0\n"
131 " ipm %[rc]\n"
132 " srl %[rc],28\n"
133 "1:\n"
134 : [pgm_addr] "=&d"(pgm_addr),
135 [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
136 [rc] "+&d"(rc)
137 : [addr] "a"(addr)
138 : "cc", "memory");
139 S390_lowcore.program_new_psw = old;
140 return rc;
143 static void search_mem_end(void)
145 unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
146 unsigned long offset = 0;
147 unsigned long pivot;
149 while (range > 1) {
150 range >>= 1;
151 pivot = offset + range;
152 if (!tprot(pivot << 20))
153 offset = pivot;
156 add_mem_detect_block(0, (offset + 1) << 20);
159 void detect_memory(void)
161 sclp_early_get_memsize(&max_physmem_end);
163 if (!sclp_early_read_storage_info()) {
164 mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
165 return;
168 if (!diag260()) {
169 mem_detect.info_source = MEM_DETECT_DIAG260;
170 return;
173 if (max_physmem_end) {
174 add_mem_detect_block(0, max_physmem_end);
175 mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
176 return;
179 search_mem_end();
180 mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
181 max_physmem_end = get_mem_detect_end();