2 * This file is included twice from vdso2c.c. It generates code for 32-bit
3 * and 64-bit vDSOs. We need both for 64-bit builds, since 32-bit vDSOs
4 * are built for 32-bit userspace.
7 static void BITSFUNC(go
)(void *raw_addr
, size_t raw_len
,
8 void *stripped_addr
, size_t stripped_len
,
9 FILE *outfile
, const char *name
)
12 unsigned long load_size
= -1; /* Work around bogus warning */
13 unsigned long mapping_size
;
14 ELF(Ehdr
) *hdr
= (ELF(Ehdr
) *)raw_addr
;
17 ELF(Shdr
) *symtab_hdr
= NULL
, *strtab_hdr
, *secstrings_hdr
,
19 ELF(Dyn
) *dyn
= 0, *dyn_end
= 0;
20 const char *secstrings
;
21 INT_BITS syms
[NSYMS
] = {};
23 ELF(Phdr
) *pt
= (ELF(Phdr
) *)(raw_addr
+ GET_LE(&hdr
->e_phoff
));
25 /* Walk the segment table. */
26 for (i
= 0; i
< GET_LE(&hdr
->e_phnum
); i
++) {
27 if (GET_LE(&pt
[i
].p_type
) == PT_LOAD
) {
29 fail("multiple PT_LOAD segs\n");
31 if (GET_LE(&pt
[i
].p_offset
) != 0 ||
32 GET_LE(&pt
[i
].p_vaddr
) != 0)
33 fail("PT_LOAD in wrong place\n");
35 if (GET_LE(&pt
[i
].p_memsz
) != GET_LE(&pt
[i
].p_filesz
))
36 fail("cannot handle memsz != filesz\n");
38 load_size
= GET_LE(&pt
[i
].p_memsz
);
40 } else if (GET_LE(&pt
[i
].p_type
) == PT_DYNAMIC
) {
41 dyn
= raw_addr
+ GET_LE(&pt
[i
].p_offset
);
42 dyn_end
= raw_addr
+ GET_LE(&pt
[i
].p_offset
) +
43 GET_LE(&pt
[i
].p_memsz
);
47 fail("no PT_LOAD seg\n");
49 if (stripped_len
< load_size
)
50 fail("stripped input is too short\n");
52 /* Walk the dynamic table */
53 for (i
= 0; dyn
+ i
< dyn_end
&&
54 GET_LE(&dyn
[i
].d_tag
) != DT_NULL
; i
++) {
55 typeof(dyn
[i
].d_tag
) tag
= GET_LE(&dyn
[i
].d_tag
);
56 if (tag
== DT_REL
|| tag
== DT_RELSZ
|| tag
== DT_RELA
||
57 tag
== DT_RELENT
|| tag
== DT_TEXTREL
)
58 fail("vdso image contains dynamic relocations\n");
61 /* Walk the section table */
62 secstrings_hdr
= raw_addr
+ GET_LE(&hdr
->e_shoff
) +
63 GET_LE(&hdr
->e_shentsize
)*GET_LE(&hdr
->e_shstrndx
);
64 secstrings
= raw_addr
+ GET_LE(&secstrings_hdr
->sh_offset
);
65 for (i
= 0; i
< GET_LE(&hdr
->e_shnum
); i
++) {
66 ELF(Shdr
) *sh
= raw_addr
+ GET_LE(&hdr
->e_shoff
) +
67 GET_LE(&hdr
->e_shentsize
) * i
;
68 if (GET_LE(&sh
->sh_type
) == SHT_SYMTAB
)
71 if (!strcmp(secstrings
+ GET_LE(&sh
->sh_name
),
77 fail("no symbol table\n");
79 strtab_hdr
= raw_addr
+ GET_LE(&hdr
->e_shoff
) +
80 GET_LE(&hdr
->e_shentsize
) * GET_LE(&symtab_hdr
->sh_link
);
82 /* Walk the symbol table */
84 i
< GET_LE(&symtab_hdr
->sh_size
) / GET_LE(&symtab_hdr
->sh_entsize
);
87 ELF(Sym
) *sym
= raw_addr
+ GET_LE(&symtab_hdr
->sh_offset
) +
88 GET_LE(&symtab_hdr
->sh_entsize
) * i
;
89 const char *name
= raw_addr
+ GET_LE(&strtab_hdr
->sh_offset
) +
90 GET_LE(&sym
->st_name
);
92 for (k
= 0; k
< NSYMS
; k
++) {
93 if (!strcmp(name
, required_syms
[k
].name
)) {
95 fail("duplicate symbol %s\n",
96 required_syms
[k
].name
);
100 * Careful: we use negative addresses, but
101 * st_value is unsigned, so we rely
102 * on syms[k] being a signed type of the
105 syms
[k
] = GET_LE(&sym
->st_value
);
110 /* Validate mapping addresses. */
111 for (i
= 0; i
< sizeof(special_pages
) / sizeof(special_pages
[0]); i
++) {
112 INT_BITS symval
= syms
[special_pages
[i
]];
115 continue; /* The mapping isn't used; ignore it. */
118 fail("%s must be a multiple of 4096\n",
119 required_syms
[i
].name
);
120 if (symval
+ 4096 < syms
[sym_vvar_start
])
121 fail("%s underruns vvar_start\n",
122 required_syms
[i
].name
);
123 if (symval
+ 4096 > 0)
124 fail("%s is on the wrong side of the vdso text\n",
125 required_syms
[i
].name
);
127 if (syms
[sym_vvar_start
] % 4096)
128 fail("vvar_begin must be a multiple of 4096\n");
131 fwrite(stripped_addr
, stripped_len
, 1, outfile
);
135 mapping_size
= (stripped_len
+ 4095) / 4096 * 4096;
137 fprintf(outfile
, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
138 fprintf(outfile
, "#include <linux/linkage.h>\n");
139 fprintf(outfile
, "#include <asm/page_types.h>\n");
140 fprintf(outfile
, "#include <asm/vdso.h>\n");
141 fprintf(outfile
, "\n");
143 "static unsigned char raw_data[%lu] __page_aligned_data = {",
145 for (j
= 0; j
< stripped_len
; j
++) {
147 fprintf(outfile
, "\n\t");
148 fprintf(outfile
, "0x%02X, ",
149 (int)((unsigned char *)stripped_addr
)[j
]);
151 fprintf(outfile
, "\n};\n\n");
153 fprintf(outfile
, "static struct page *pages[%lu];\n\n",
154 mapping_size
/ 4096);
156 fprintf(outfile
, "const struct vdso_image %s = {\n", name
);
157 fprintf(outfile
, "\t.data = raw_data,\n");
158 fprintf(outfile
, "\t.size = %lu,\n", mapping_size
);
159 fprintf(outfile
, "\t.text_mapping = {\n");
160 fprintf(outfile
, "\t\t.name = \"[vdso]\",\n");
161 fprintf(outfile
, "\t\t.pages = pages,\n");
162 fprintf(outfile
, "\t},\n");
164 fprintf(outfile
, "\t.alt = %lu,\n",
165 (unsigned long)GET_LE(&alt_sec
->sh_offset
));
166 fprintf(outfile
, "\t.alt_len = %lu,\n",
167 (unsigned long)GET_LE(&alt_sec
->sh_size
));
169 for (i
= 0; i
< NSYMS
; i
++) {
170 if (required_syms
[i
].export
&& syms
[i
])
171 fprintf(outfile
, "\t.sym_%s = %" PRIi64
",\n",
172 required_syms
[i
].name
, (int64_t)syms
[i
]);
174 fprintf(outfile
, "};\n");