KVM: nVMX: Fix returned value of MSR_IA32_VMX_VMCS_ENUM
[linux/fpc-iii.git] / arch / x86 / vdso / vdso2c.h
blobc6eefaf389b95e6a18bef6a58943afb4c6d597bf
1 /*
2 * This file is included twice from vdso2c.c. It generates code for 32-bit
3 * and 64-bit vDSOs. We need both for 64-bit builds, since 32-bit vDSOs
4 * are built for 32-bit userspace.
5 */
7 static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
9 int found_load = 0;
10 unsigned long load_size = -1; /* Work around bogus warning */
11 unsigned long data_size;
12 Elf_Ehdr *hdr = (Elf_Ehdr *)addr;
13 int i;
14 unsigned long j;
15 Elf_Shdr *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
16 *alt_sec = NULL;
17 Elf_Dyn *dyn = 0, *dyn_end = 0;
18 const char *secstrings;
19 uint64_t syms[NSYMS] = {};
21 uint64_t fake_sections_value = 0, fake_sections_size = 0;
23 Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff));
25 /* Walk the segment table. */
26 for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
27 if (GET_LE(&pt[i].p_type) == PT_LOAD) {
28 if (found_load)
29 fail("multiple PT_LOAD segs\n");
31 if (GET_LE(&pt[i].p_offset) != 0 ||
32 GET_LE(&pt[i].p_vaddr) != 0)
33 fail("PT_LOAD in wrong place\n");
35 if (GET_LE(&pt[i].p_memsz) != GET_LE(&pt[i].p_filesz))
36 fail("cannot handle memsz != filesz\n");
38 load_size = GET_LE(&pt[i].p_memsz);
39 found_load = 1;
40 } else if (GET_LE(&pt[i].p_type) == PT_DYNAMIC) {
41 dyn = addr + GET_LE(&pt[i].p_offset);
42 dyn_end = addr + GET_LE(&pt[i].p_offset) +
43 GET_LE(&pt[i].p_memsz);
46 if (!found_load)
47 fail("no PT_LOAD seg\n");
48 data_size = (load_size + 4095) / 4096 * 4096;
50 /* Walk the dynamic table */
51 for (i = 0; dyn + i < dyn_end &&
52 GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
53 typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
54 if (tag == DT_REL || tag == DT_RELSZ ||
55 tag == DT_RELENT || tag == DT_TEXTREL)
56 fail("vdso image contains dynamic relocations\n");
59 /* Walk the section table */
60 secstrings_hdr = addr + GET_LE(&hdr->e_shoff) +
61 GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
62 secstrings = addr + GET_LE(&secstrings_hdr->sh_offset);
63 for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
64 Elf_Shdr *sh = addr + GET_LE(&hdr->e_shoff) +
65 GET_LE(&hdr->e_shentsize) * i;
66 if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
67 symtab_hdr = sh;
69 if (!strcmp(secstrings + GET_LE(&sh->sh_name),
70 ".altinstructions"))
71 alt_sec = sh;
74 if (!symtab_hdr)
75 fail("no symbol table\n");
77 strtab_hdr = addr + GET_LE(&hdr->e_shoff) +
78 GET_LE(&hdr->e_shentsize) * GET_LE(&symtab_hdr->sh_link);
80 /* Walk the symbol table */
81 for (i = 0;
82 i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
83 i++) {
84 int k;
85 Elf_Sym *sym = addr + GET_LE(&symtab_hdr->sh_offset) +
86 GET_LE(&symtab_hdr->sh_entsize) * i;
87 const char *name = addr + GET_LE(&strtab_hdr->sh_offset) +
88 GET_LE(&sym->st_name);
90 for (k = 0; k < NSYMS; k++) {
91 if (!strcmp(name, required_syms[k])) {
92 if (syms[k]) {
93 fail("duplicate symbol %s\n",
94 required_syms[k]);
96 syms[k] = GET_LE(&sym->st_value);
100 if (!strcmp(name, "vdso_fake_sections")) {
101 if (fake_sections_value)
102 fail("duplicate vdso_fake_sections\n");
103 fake_sections_value = GET_LE(&sym->st_value);
104 fake_sections_size = GET_LE(&sym->st_size);
108 /* Validate mapping addresses. */
109 for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
110 if (!syms[i])
111 continue; /* The mapping isn't used; ignore it. */
113 if (syms[i] % 4096)
114 fail("%s must be a multiple of 4096\n",
115 required_syms[i]);
116 if (syms[i] < data_size)
117 fail("%s must be after the text mapping\n",
118 required_syms[i]);
119 if (syms[sym_end_mapping] < syms[i] + 4096)
120 fail("%s overruns end_mapping\n", required_syms[i]);
122 if (syms[sym_end_mapping] % 4096)
123 fail("end_mapping must be a multiple of 4096\n");
125 /* Remove sections or use fakes */
126 if (fake_sections_size % sizeof(Elf_Shdr))
127 fail("vdso_fake_sections size is not a multiple of %ld\n",
128 (long)sizeof(Elf_Shdr));
129 PUT_LE(&hdr->e_shoff, fake_sections_value);
130 PUT_LE(&hdr->e_shentsize, fake_sections_value ? sizeof(Elf_Shdr) : 0);
131 PUT_LE(&hdr->e_shnum, fake_sections_size / sizeof(Elf_Shdr));
132 PUT_LE(&hdr->e_shstrndx, SHN_UNDEF);
134 if (!name) {
135 fwrite(addr, load_size, 1, outfile);
136 return;
139 fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
140 fprintf(outfile, "#include <linux/linkage.h>\n");
141 fprintf(outfile, "#include <asm/page_types.h>\n");
142 fprintf(outfile, "#include <asm/vdso.h>\n");
143 fprintf(outfile, "\n");
144 fprintf(outfile,
145 "static unsigned char raw_data[%lu] __page_aligned_data = {",
146 data_size);
147 for (j = 0; j < load_size; j++) {
148 if (j % 10 == 0)
149 fprintf(outfile, "\n\t");
150 fprintf(outfile, "0x%02X, ", (int)((unsigned char *)addr)[j]);
152 fprintf(outfile, "\n};\n\n");
154 fprintf(outfile, "static struct page *pages[%lu];\n\n",
155 data_size / 4096);
157 fprintf(outfile, "const struct vdso_image %s = {\n", name);
158 fprintf(outfile, "\t.data = raw_data,\n");
159 fprintf(outfile, "\t.size = %lu,\n", data_size);
160 fprintf(outfile, "\t.text_mapping = {\n");
161 fprintf(outfile, "\t\t.name = \"[vdso]\",\n");
162 fprintf(outfile, "\t\t.pages = pages,\n");
163 fprintf(outfile, "\t},\n");
164 if (alt_sec) {
165 fprintf(outfile, "\t.alt = %lu,\n",
166 (unsigned long)GET_LE(&alt_sec->sh_offset));
167 fprintf(outfile, "\t.alt_len = %lu,\n",
168 (unsigned long)GET_LE(&alt_sec->sh_size));
170 for (i = 0; i < NSYMS; i++) {
171 if (syms[i])
172 fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n",
173 required_syms[i], syms[i]);
175 fprintf(outfile, "};\n");