summaryrefslogtreecommitdiff
path: root/arch/x86/vdso/vdso2c.h
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-05-05 23:19:34 +0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-05-06 00:18:51 +0400
commit6f121e548f83674ab4920a4e60afb58d4f61b829 (patch)
tree699aa67f4e5242d1e3cd46513faf27493debc680 /arch/x86/vdso/vdso2c.h
parentcfda7bb9ecbf9d96264bb5bade33a842966d1062 (diff)
downloadlinux-6f121e548f83674ab4920a4e60afb58d4f61b829.tar.xz
x86, vdso: Reimplement vdso.so preparation in build-time C
Currently, vdso.so files are prepared and analyzed by a combination of objcopy, nm, some linker script tricks, and some simple ELF parsers in the kernel. Replace all of that with plain C code that runs at build time. All five vdso images now generate .c files that are compiled and linked in to the kernel image. This should cause only one userspace-visible change: the loaded vDSO images are stripped more heavily than they used to be. Everything outside the loadable segment is dropped. In particular, this causes the section table and section name strings to be missing. This should be fine: real dynamic loaders don't load or inspect these tables anyway. The result is roughly equivalent to eu-strip's --strip-sections option. The purpose of this change is to enable the vvar and hpet mappings to be moved to the page following the vDSO load segment. Currently, it is possible for the section table to extend into the page after the load segment, so, if we map it, it risks overlapping the vvar or hpet page. This happens whenever the load segment is just under a multiple of PAGE_SIZE. The only real subtlety here is that the old code had a C file with inline assembler that did 'call VDSO32_vsyscall' and a linker script that defined 'VDSO32_vsyscall = __kernel_vsyscall'. This most likely worked by accident: the linker script entry defines a symbol associated with an address as opposed to an alias for the real dynamic symbol __kernel_vsyscall. That caused ld to relocate the reference at link time instead of leaving an interposable dynamic relocation. Since the VDSO32_vsyscall hack is no longer needed, I now use 'call __kernel_vsyscall', and I added -Bsymbolic to make it work. vdso2c will generate an error and abort the build if the resulting image contains any dynamic relocations, so we won't silently generate bad vdso images. (Dynamic relocations are a problem because nothing will even attempt to relocate the vdso.) Signed-off-by: Andy Lutomirski <luto@amacapital.net> Link: http://lkml.kernel.org/r/2c4fcf45524162a34d87fdda1eb046b2a5cecee7.1399317206.git.luto@amacapital.net Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/vdso/vdso2c.h')
-rw-r--r--arch/x86/vdso/vdso2c.h137
1 files changed, 137 insertions, 0 deletions
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
new file mode 100644
index 000000000000..9276e5207620
--- /dev/null
+++ b/arch/x86/vdso/vdso2c.h
@@ -0,0 +1,137 @@
+/*
+ * This file is included twice from vdso2c.c. It generates code for 32-bit
+ * and 64-bit vDSOs. We need both for 64-bit builds, since 32-bit vDSOs
+ * are built for 32-bit userspace.
+ */
+
+static int GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
+{
+ int found_load = 0;
+ unsigned long load_size = -1; /* Work around bogus warning */
+ unsigned long data_size;
+ Elf_Ehdr *hdr = (Elf_Ehdr *)addr;
+ int i;
+ unsigned long j;
+ Elf_Shdr *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
+ *alt_sec = NULL;
+ Elf_Dyn *dyn = 0, *dyn_end = 0;
+ const char *secstrings;
+ uint64_t syms[NSYMS] = {};
+
+ Elf_Phdr *pt = (Elf_Phdr *)(addr + hdr->e_phoff);
+
+ /* Walk the segment table. */
+ for (i = 0; i < hdr->e_phnum; i++) {
+ if (pt[i].p_type == PT_LOAD) {
+ if (found_load)
+ fail("multiple PT_LOAD segs\n");
+
+ if (pt[i].p_offset != 0 || pt[i].p_vaddr != 0)
+ fail("PT_LOAD in wrong place\n");
+
+ if (pt[i].p_memsz != pt[i].p_filesz)
+ fail("cannot handle memsz != filesz\n");
+
+ load_size = pt[i].p_memsz;
+ found_load = 1;
+ } else if (pt[i].p_type == PT_DYNAMIC) {
+ dyn = addr + pt[i].p_offset;
+ dyn_end = addr + pt[i].p_offset + pt[i].p_memsz;
+ }
+ }
+ if (!found_load)
+ fail("no PT_LOAD seg\n");
+ data_size = (load_size + 4095) / 4096 * 4096;
+
+ /* Walk the dynamic table */
+ for (i = 0; dyn + i < dyn_end && dyn[i].d_tag != DT_NULL; i++) {
+ if (dyn[i].d_tag == DT_REL || dyn[i].d_tag == DT_RELSZ ||
+ dyn[i].d_tag == DT_RELENT || dyn[i].d_tag == DT_TEXTREL)
+ fail("vdso image contains dynamic relocations\n");
+ }
+
+ /* Walk the section table */
+ secstrings_hdr = addr + hdr->e_shoff + hdr->e_shentsize*hdr->e_shstrndx;
+ secstrings = addr + secstrings_hdr->sh_offset;
+ for (i = 0; i < hdr->e_shnum; i++) {
+ Elf_Shdr *sh = addr + hdr->e_shoff + hdr->e_shentsize * i;
+ if (sh->sh_type == SHT_SYMTAB)
+ symtab_hdr = sh;
+
+ if (!strcmp(secstrings + sh->sh_name, ".altinstructions"))
+ alt_sec = sh;
+ }
+
+ if (!symtab_hdr) {
+ fail("no symbol table\n");
+ return 1;
+ }
+
+ strtab_hdr = addr + hdr->e_shoff +
+ hdr->e_shentsize * symtab_hdr->sh_link;
+
+ /* Walk the symbol table */
+ for (i = 0; i < symtab_hdr->sh_size / symtab_hdr->sh_entsize; i++) {
+ int k;
+ Elf_Sym *sym = addr + symtab_hdr->sh_offset +
+ symtab_hdr->sh_entsize * i;
+ const char *name = addr + strtab_hdr->sh_offset + sym->st_name;
+ for (k = 0; k < NSYMS; k++) {
+ if (!strcmp(name, required_syms[k])) {
+ if (syms[k]) {
+ fail("duplicate symbol %s\n",
+ required_syms[k]);
+ }
+ syms[k] = sym->st_value;
+ }
+ }
+ }
+
+ /* Remove sections. */
+ hdr->e_shoff = 0;
+ hdr->e_shentsize = 0;
+ hdr->e_shnum = 0;
+ hdr->e_shstrndx = SHN_UNDEF;
+
+ if (!name) {
+ fwrite(addr, load_size, 1, outfile);
+ return 0;
+ }
+
+ fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
+ fprintf(outfile, "#include <linux/linkage.h>\n");
+ fprintf(outfile, "#include <asm/page_types.h>\n");
+ fprintf(outfile, "#include <asm/vdso.h>\n");
+ fprintf(outfile, "\n");
+ fprintf(outfile,
+ "static unsigned char raw_data[%lu] __page_aligned_data = {",
+ data_size);
+ for (j = 0; j < load_size; j++) {
+ if (j % 10 == 0)
+ fprintf(outfile, "\n\t");
+ fprintf(outfile, "0x%02X, ", (int)((unsigned char *)addr)[j]);
+ }
+ fprintf(outfile, "\n};\n\n");
+
+ fprintf(outfile, "static struct page *pages[%lu];\n\n",
+ data_size / 4096);
+
+ fprintf(outfile, "const struct vdso_image %s = {\n", name);
+ fprintf(outfile, "\t.data = raw_data,\n");
+ fprintf(outfile, "\t.size = %lu,\n", data_size);
+ fprintf(outfile, "\t.pages = pages,\n");
+ if (alt_sec) {
+ fprintf(outfile, "\t.alt = %lu,\n",
+ (unsigned long)alt_sec->sh_offset);
+ fprintf(outfile, "\t.alt_len = %lu,\n",
+ (unsigned long)alt_sec->sh_size);
+ }
+ for (i = 0; i < NSYMS; i++) {
+ if (syms[i])
+ fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n",
+ required_syms[i], syms[i]);
+ }
+ fprintf(outfile, "};\n");
+
+ return 0;
+}